repo_name
string
path
string
copies
string
size
string
content
string
license
string
a1d3s/linux-bpi
drivers/misc/mic/bus/scif_bus.c
393
5171
/* * Intel MIC Platform Software Stack (MPSS) * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Intel Symmetric Communications Interface Bus driver. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/idr.h> #include <linux/dma-mapping.h> #include "scif_bus.h" static ssize_t device_show(struct device *d, struct device_attribute *attr, char *buf) { struct scif_hw_dev *dev = dev_to_scif(d); return sprintf(buf, "0x%04x\n", dev->id.device); } static DEVICE_ATTR_RO(device); static ssize_t vendor_show(struct device *d, struct device_attribute *attr, char *buf) { struct scif_hw_dev *dev = dev_to_scif(d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static DEVICE_ATTR_RO(vendor); static ssize_t modalias_show(struct device *d, struct device_attribute *attr, char *buf) { struct scif_hw_dev *dev = dev_to_scif(d); return sprintf(buf, "scif:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static DEVICE_ATTR_RO(modalias); static struct attribute *scif_dev_attrs[] = { &dev_attr_device.attr, &dev_attr_vendor.attr, &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(scif_dev); static inline int scif_id_match(const struct scif_hw_dev *dev, const struct scif_hw_dev_id *id) { if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID) return 0; return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* * This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call scif_dev_probe(). */ static int scif_dev_match(struct device *dv, struct device_driver *dr) { unsigned int i; struct scif_hw_dev *dev = dev_to_scif(dv); const struct scif_hw_dev_id *ids; ids = drv_to_scif(dr)->id_table; for (i = 0; ids[i].device; i++) if (scif_id_match(dev, &ids[i])) return 1; return 0; } static int scif_uevent(struct device *dv, struct kobj_uevent_env *env) { struct scif_hw_dev *dev = dev_to_scif(dv); return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X", dev->id.device, dev->id.vendor); } static int scif_dev_probe(struct device *d) { struct scif_hw_dev *dev = dev_to_scif(d); struct scif_driver *drv = drv_to_scif(dev->dev.driver); return drv->probe(dev); } static int scif_dev_remove(struct device *d) { struct scif_hw_dev *dev = dev_to_scif(d); struct scif_driver *drv = drv_to_scif(dev->dev.driver); drv->remove(dev); return 0; } static struct bus_type scif_bus = { .name = "scif_bus", .match = scif_dev_match, .dev_groups = scif_dev_groups, .uevent = scif_uevent, .probe = scif_dev_probe, .remove = scif_dev_remove, }; int scif_register_driver(struct scif_driver *driver) { driver->driver.bus = &scif_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(scif_register_driver); void scif_unregister_driver(struct scif_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(scif_unregister_driver); static void scif_release_dev(struct device *d) { struct scif_hw_dev *sdev = dev_to_scif(d); kfree(sdev); } struct scif_hw_dev * scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, struct dma_chan **chan, int num_chan) { int ret; struct scif_hw_dev *sdev; sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); if (!sdev) return ERR_PTR(-ENOMEM); sdev->dev.parent = pdev; sdev->id.device = id; sdev->id.vendor = SCIF_DEV_ANY_ID; sdev->dev.archdata.dma_ops = dma_ops; sdev->dev.release = scif_release_dev; sdev->hw_ops = hw_ops; sdev->dnode = dnode; sdev->snode = snode; dev_set_drvdata(&sdev->dev, sdev); sdev->dev.bus = &scif_bus; sdev->mmio = mmio; sdev->aper = aper; sdev->dp = dp; sdev->rdp = rdp; sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask; dma_set_mask(&sdev->dev, DMA_BIT_MASK(64)); sdev->dma_ch = chan; sdev->num_dma_ch = num_chan; dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode); /* * device_register() causes the bus infrastructure to look for a * matching driver. */ ret = device_register(&sdev->dev); if (ret) goto free_sdev; return sdev; free_sdev: kfree(sdev); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(scif_register_device); void scif_unregister_device(struct scif_hw_dev *sdev) { device_unregister(&sdev->dev); } EXPORT_SYMBOL_GPL(scif_unregister_device); static int __init scif_init(void) { return bus_register(&scif_bus); } static void __exit scif_exit(void) { bus_unregister(&scif_bus); } core_initcall(scif_init); module_exit(scif_exit); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel(R) SCIF Bus driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
janarthananfit/android_kernel_msm_beni
drivers/acpi/acpica/tbutils.c
905
23441
/****************************************************************************** * * Module Name: tbutils - table utilities * *****************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbutils") /* Local prototypes */ static void acpi_tb_fix_string(char *string, acpi_size length); static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header); static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); /******************************************************************************* * * FUNCTION: acpi_tb_check_xsdt * * PARAMETERS: address - Pointer to the XSDT * * RETURN: status * AE_OK - XSDT is okay * AE_NO_MEMORY - can't map XSDT * AE_INVALID_TABLE_LENGTH - invalid table length * AE_NULL_ENTRY - XSDT has NULL entry * * DESCRIPTION: validate XSDT ******************************************************************************/ static acpi_status acpi_tb_check_xsdt(acpi_physical_address address) { struct acpi_table_header *table; u32 length; u64 xsdt_entry_address; u8 *table_entry; u32 table_count; int i; table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!table) return AE_NO_MEMORY; length = table->length; acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); if (length < sizeof(struct acpi_table_header)) return AE_INVALID_TABLE_LENGTH; table = acpi_os_map_memory(address, length); if (!table) return AE_NO_MEMORY; /* Calculate the number of tables described in XSDT */ table_count = (u32) ((table->length - sizeof(struct acpi_table_header)) / sizeof(u64)); table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); for (i = 0; i < table_count; i++) { ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry); if (!xsdt_entry_address) { /* XSDT has NULL entry */ break; } table_entry += sizeof(u64); } acpi_os_unmap_memory(table, length); if (i < table_count) return AE_NULL_ENTRY; else return AE_OK; } /******************************************************************************* * * FUNCTION: acpi_tb_initialize_facs * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Create a permanent mapping for the FADT and save it in a global * for accessing the Global Lock and Firmware Waking Vector * ******************************************************************************/ acpi_status acpi_tb_initialize_facs(void) { acpi_status status; status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, ACPI_CAST_INDIRECT_PTR(struct acpi_table_header, &acpi_gbl_FACS)); return status; } /******************************************************************************* * * FUNCTION: acpi_tb_tables_loaded * * PARAMETERS: None * * RETURN: TRUE if required ACPI tables are loaded * * DESCRIPTION: Determine if the minimum required ACPI tables are present * (FADT, FACS, DSDT) * ******************************************************************************/ u8 acpi_tb_tables_loaded(void) { if (acpi_gbl_root_table_list.current_table_count >= 3) { return (TRUE); } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_tb_fix_string * * PARAMETERS: String - String to be repaired * Length - Maximum length * * RETURN: None * * DESCRIPTION: Replace every non-printable or non-ascii byte in the string * with a question mark '?'. * ******************************************************************************/ static void acpi_tb_fix_string(char *string, acpi_size length) { while (length && *string) { if (!ACPI_IS_PRINT(*string)) { *string = '?'; } string++; length--; } } /******************************************************************************* * * FUNCTION: acpi_tb_cleanup_table_header * * PARAMETERS: out_header - Where the cleaned header is returned * Header - Input ACPI table header * * RETURN: Returns the cleaned header in out_header * * DESCRIPTION: Copy the table header and ensure that all "string" fields in * the header consist of printable characters. * ******************************************************************************/ static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header) { ACPI_MEMCPY(out_header, header, sizeof(struct acpi_table_header)); acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE); acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE); acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE); } /******************************************************************************* * * FUNCTION: acpi_tb_print_table_header * * PARAMETERS: Address - Table physical address * Header - Table header * * RETURN: None * * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP. * ******************************************************************************/ void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header) { struct acpi_table_header local_header; /* * The reason that the Address is cast to a void pointer is so that we * can use %p which will work properly on both 32-bit and 64-bit hosts. */ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ ACPI_INFO((AE_INFO, "%4.4s %p %05X", header->signature, ACPI_CAST_PTR(void, address), header->length)); } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) { /* RSDP has no common fields */ ACPI_MEMCPY(local_header.oem_id, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)", ACPI_CAST_PTR (void, address), (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> revision > 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, header)->length : 20, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision, local_header.oem_id)); } else { /* Standard ACPI table with full common header */ acpi_tb_cleanup_table_header(&local_header, header); ACPI_INFO((AE_INFO, "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)", local_header.signature, ACPI_CAST_PTR(void, address), local_header.length, local_header.revision, local_header.oem_id, local_header.oem_table_id, local_header.oem_revision, local_header.asl_compiler_id, local_header.asl_compiler_revision)); } } /******************************************************************************* * * FUNCTION: acpi_tb_validate_checksum * * PARAMETERS: Table - ACPI table to verify * Length - Length of entire table * * RETURN: Status * * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns * exception on bad checksum. * ******************************************************************************/ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) { u8 checksum; /* Compute the checksum on the table */ checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); /* Checksum ok? (should be zero) */ if (checksum) { ACPI_WARNING((AE_INFO, "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X", table->signature, table->checksum, (u8) (table->checksum - checksum))); #if (ACPI_CHECKSUM_ABORT) return (AE_BAD_CHECKSUM); #endif } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_tb_checksum * * PARAMETERS: Buffer - Pointer to memory region to be checked * Length - Length of this memory region * * RETURN: Checksum (u8) * * DESCRIPTION: Calculates circular checksum of memory region. * ******************************************************************************/ u8 acpi_tb_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; while (buffer < end) { sum = (u8) (sum + *(buffer++)); } return sum; } /******************************************************************************* * * FUNCTION: acpi_tb_check_dsdt_header * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect * if the DSDT has been replaced from outside the OS and/or if * the DSDT header has been corrupted. * ******************************************************************************/ void acpi_tb_check_dsdt_header(void) { /* Compare original length and checksum to current values */ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length || acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) { ACPI_ERROR((AE_INFO, "The DSDT has been corrupted or replaced - old, new headers below")); acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header); acpi_tb_print_table_header(0, acpi_gbl_DSDT); ACPI_ERROR((AE_INFO, "Please send DMI info to linux-acpi@vger.kernel.org\n" "If system does not work as expected, please boot with acpi=copy_dsdt")); /* Disable further error messages */ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length; acpi_gbl_original_dsdt_header.checksum = acpi_gbl_DSDT->checksum; } } /******************************************************************************* * * FUNCTION: acpi_tb_copy_dsdt * * PARAMETERS: table_desc - Installed table to copy * * RETURN: None * * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory. * Some very bad BIOSs are known to either corrupt the DSDT or * install a new, bad DSDT. This copy works around the problem. * ******************************************************************************/ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) { struct acpi_table_header *new_table; struct acpi_table_desc *table_desc; table_desc = &acpi_gbl_root_table_list.tables[table_index]; new_table = ACPI_ALLOCATE(table_desc->length); if (!new_table) { ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X", table_desc->length)); return (NULL); } ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length); acpi_tb_delete_table(table_desc); table_desc->pointer = new_table; table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED; ACPI_INFO((AE_INFO, "Forced DSDT copy: length 0x%05X copied locally, original unmapped", new_table->length)); return (new_table); } /******************************************************************************* * * FUNCTION: acpi_tb_install_table * * PARAMETERS: Address - Physical address of DSDT or FACS * Signature - Table signature, NULL if no need to * match * table_index - Index into root table array * * RETURN: None * * DESCRIPTION: Install an ACPI table into the global data structure. The * table override mechanism is implemented here to allow the host * OS to replace any table before it is installed in the root * table array. * ******************************************************************************/ void acpi_tb_install_table(acpi_physical_address address, char *signature, u32 table_index) { u8 flags; acpi_status status; struct acpi_table_header *table_to_install; struct acpi_table_header *mapped_table; struct acpi_table_header *override_table = NULL; if (!address) { ACPI_ERROR((AE_INFO, "Null physical address for ACPI table [%s]", signature)); return; } /* Map just the table header */ mapped_table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!mapped_table) { return; } /* If a particular signature is expected (DSDT/FACS), it must match */ if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) { ACPI_ERROR((AE_INFO, "Invalid signature 0x%X for ACPI table, expected [%s]", *ACPI_CAST_PTR(u32, mapped_table->signature), signature)); goto unmap_and_exit; } /* * ACPI Table Override: * * Before we install the table, let the host OS override it with a new * one if desired. Any table within the RSDT/XSDT can be replaced, * including the DSDT which is pointed to by the FADT. */ status = acpi_os_table_override(mapped_table, &override_table); if (ACPI_SUCCESS(status) && override_table) { ACPI_INFO((AE_INFO, "%4.4s @ 0x%p Table override, replaced with:", mapped_table->signature, ACPI_CAST_PTR(void, address))); acpi_gbl_root_table_list.tables[table_index].pointer = override_table; address = ACPI_PTR_TO_PHYSADDR(override_table); table_to_install = override_table; flags = ACPI_TABLE_ORIGIN_OVERRIDE; } else { table_to_install = mapped_table; flags = ACPI_TABLE_ORIGIN_MAPPED; } /* Initialize the table entry */ acpi_gbl_root_table_list.tables[table_index].address = address; acpi_gbl_root_table_list.tables[table_index].length = table_to_install->length; acpi_gbl_root_table_list.tables[table_index].flags = flags; ACPI_MOVE_32_TO_32(& (acpi_gbl_root_table_list.tables[table_index]. signature), table_to_install->signature); acpi_tb_print_table_header(address, table_to_install); if (table_index == ACPI_TABLE_INDEX_DSDT) { /* Global integer width is based upon revision of the DSDT */ acpi_ut_set_integer_width(table_to_install->revision); } unmap_and_exit: acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); } /******************************************************************************* * * FUNCTION: acpi_tb_get_root_table_entry * * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry * table_entry_size - sizeof 32 or 64 (RSDT or XSDT) * * RETURN: Physical address extracted from the root table * * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on * both 32-bit and 64-bit platforms * * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on * 64-bit platforms. * ******************************************************************************/ static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) { u64 address64; /* * Get the table physical address (32-bit for RSDT, 64-bit for XSDT): * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT */ if (table_entry_size == sizeof(u32)) { /* * 32-bit platform, RSDT: Return 32-bit table entry * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return */ return ((acpi_physical_address) (*ACPI_CAST_PTR(u32, table_entry))); } else { /* * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, * return 64-bit */ ACPI_MOVE_64_TO_64(&address64, table_entry); #if ACPI_MACHINE_WIDTH == 32 if (address64 > ACPI_UINT32_MAX) { /* Will truncate 64-bit address to 32 bits, issue warning */ ACPI_WARNING((AE_INFO, "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X)," " truncating", ACPI_FORMAT_UINT64(address64))); } #endif return ((acpi_physical_address) (address64)); } } /******************************************************************************* * * FUNCTION: acpi_tb_parse_root_table * * PARAMETERS: Rsdp - Pointer to the RSDP * * RETURN: Status * * DESCRIPTION: This function is called to parse the Root System Description * Table (RSDT or XSDT) * * NOTE: Tables are mapped (not copied) for efficiency. The FACS must * be mapped and cannot be copied because it contains the actual * memory location of the ACPI Global Lock. * ******************************************************************************/ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) { struct acpi_table_rsdp *rsdp; u32 table_entry_size; u32 i; u32 table_count; struct acpi_table_header *table; acpi_physical_address address; acpi_physical_address uninitialized_var(rsdt_address); u32 length; u8 *table_entry; acpi_status status; ACPI_FUNCTION_TRACE(tb_parse_root_table); /* * Map the entire RSDP and extract the address of the RSDT or XSDT */ rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp)); if (!rsdp) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_tb_print_table_header(rsdp_address, ACPI_CAST_PTR(struct acpi_table_header, rsdp)); /* Differentiate between RSDT and XSDT root tables */ if (rsdp->revision > 1 && rsdp->xsdt_physical_address && !acpi_rsdt_forced) { /* * Root table is an XSDT (64-bit physical addresses). We must use the * XSDT if the revision is > 1 and the XSDT pointer is present, as per * the ACPI specification. */ address = (acpi_physical_address) rsdp->xsdt_physical_address; table_entry_size = sizeof(u64); rsdt_address = (acpi_physical_address) rsdp->rsdt_physical_address; } else { /* Root table is an RSDT (32-bit physical addresses) */ address = (acpi_physical_address) rsdp->rsdt_physical_address; table_entry_size = sizeof(u32); } /* * It is not possible to map more than one entry in some environments, * so unmap the RSDP here before mapping other tables */ acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); if (table_entry_size == sizeof(u64)) { if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) { /* XSDT has NULL entry, RSDT is used */ address = rsdt_address; table_entry_size = sizeof(u32); ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, " "using RSDT")); } } /* Map the RSDT/XSDT table header to get the full table length */ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_tb_print_table_header(address, table); /* Get the length of the full table, verify length and map entire table */ length = table->length; acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); if (length < sizeof(struct acpi_table_header)) { ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT", length)); return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); } table = acpi_os_map_memory(address, length); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Validate the root table checksum */ status = acpi_tb_verify_checksum(table, length); if (ACPI_FAILURE(status)) { acpi_os_unmap_memory(table, length); return_ACPI_STATUS(status); } /* Calculate the number of tables described in the root table */ table_count = (u32)((table->length - sizeof(struct acpi_table_header)) / table_entry_size); /* * First two entries in the table array are reserved for the DSDT * and FACS, which are not actually present in the RSDT/XSDT - they * come from the FADT */ table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); acpi_gbl_root_table_list.current_table_count = 2; /* * Initialize the root table array from the RSDT/XSDT */ for (i = 0; i < table_count; i++) { if (acpi_gbl_root_table_list.current_table_count >= acpi_gbl_root_table_list.max_table_count) { /* There is no more room in the root table array, attempt resize */ status = acpi_tb_resize_root_table_list(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Truncating %u table entries!", (unsigned) (table_count - (acpi_gbl_root_table_list. current_table_count - 2)))); break; } } /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list. current_table_count].address = acpi_tb_get_root_table_entry(table_entry, table_entry_size); table_entry += table_entry_size; acpi_gbl_root_table_list.current_table_count++; } /* * It is not possible to map more than one entry in some environments, * so unmap the root table here before mapping other tables */ acpi_os_unmap_memory(table, length); /* * Complete the initialization of the root table array by examining * the header of each table */ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) { acpi_tb_install_table(acpi_gbl_root_table_list.tables[i]. address, NULL, i); /* Special case for FADT - get the DSDT and FACS */ if (ACPI_COMPARE_NAME (&acpi_gbl_root_table_list.tables[i].signature, ACPI_SIG_FADT)) { acpi_tb_parse_fadt(i); } } return_ACPI_STATUS(AE_OK); }
gpl-2.0
markolino631/lge_kernel_msm7x27
drivers/usb/gadget/lh7a40x_udc.c
905
50769
/* * linux/drivers/usb/gadget/lh7a40x_udc.c * Sharp LH7A40x on-chip full speed USB device controllers * * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID * Copyright (C) 2004 Bo Henriksen, Nordic ID * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/platform_device.h> #include <linux/slab.h> #include "lh7a40x_udc.h" //#define DEBUG printk //#define DEBUG_EP0 printk //#define DEBUG_SETUP printk #ifndef DEBUG_EP0 # define DEBUG_EP0(fmt,args...) #endif #ifndef DEBUG_SETUP # define DEBUG_SETUP(fmt,args...) #endif #ifndef DEBUG # define NO_STATES # define DEBUG(fmt,args...) #endif #define DRIVER_DESC "LH7A40x USB Device Controller" #define DRIVER_VERSION __DATE__ #ifndef _BIT /* FIXME - what happended to _BIT in 2.6.7bk18? */ #define _BIT(x) (1<<(x)) #endif struct lh7a40x_udc *the_controller; static const char driver_name[] = "lh7a40x_udc"; static const char driver_desc[] = DRIVER_DESC; static const char ep0name[] = "ep0-control"; /* Local definintions. */ #ifndef NO_STATES static char *state_names[] = { "WAIT_FOR_SETUP", "DATA_STATE_XMIT", "DATA_STATE_NEED_ZLP", "WAIT_FOR_OUT_STATUS", "DATA_STATE_RECV" }; #endif /* Local declarations. */ static int lh7a40x_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *); static int lh7a40x_ep_disable(struct usb_ep *ep); static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t); static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *); static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t); static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *); static int lh7a40x_set_halt(struct usb_ep *ep, int); static int lh7a40x_fifo_status(struct usb_ep *ep); static void lh7a40x_fifo_flush(struct usb_ep *ep); static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep); static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr); static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int status); static void pio_irq_enable(int bEndpointAddress); static void pio_irq_disable(int bEndpointAddress); static void stop_activity(struct lh7a40x_udc *dev, struct usb_gadget_driver *driver); static void flush(struct lh7a40x_ep *ep); static void udc_enable(struct lh7a40x_udc *dev); static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address); static struct usb_ep_ops lh7a40x_ep_ops = { .enable = lh7a40x_ep_enable, .disable = lh7a40x_ep_disable, .alloc_request = lh7a40x_alloc_request, .free_request = lh7a40x_free_request, .queue = lh7a40x_queue, .dequeue = lh7a40x_dequeue, .set_halt = lh7a40x_set_halt, .fifo_status = lh7a40x_fifo_status, .fifo_flush = lh7a40x_fifo_flush, }; /* Inline code */ static __inline__ int write_packet(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int max) { u8 *buf; int length, count; volatile u32 *fifo = (volatile u32 *)ep->fifo; buf = req->req.buf + req->req.actual; prefetch(buf); length = req->req.length - req->req.actual; length = min(length, max); req->req.actual += length; DEBUG("Write %d (max %d), fifo %p\n", length, max, fifo); count = length; while (count--) { *fifo = *buf++; } return length; } static __inline__ void usb_set_index(u32 ep) { *(volatile u32 *)io_p2v(USB_INDEX) = ep; } static __inline__ u32 usb_read(u32 port) { return *(volatile u32 *)io_p2v(port); } static __inline__ void usb_write(u32 val, u32 port) { *(volatile u32 *)io_p2v(port) = val; } static __inline__ void usb_set(u32 val, u32 port) { volatile u32 *ioport = (volatile u32 *)io_p2v(port); u32 after = (*ioport) | val; *ioport = after; } static __inline__ void usb_clear(u32 val, u32 port) { volatile u32 *ioport = (volatile u32 *)io_p2v(port); u32 after = (*ioport) & ~val; *ioport = after; } /*-------------------------------------------------------------------------*/ #define GPIO_PORTC_DR (0x80000E08) #define GPIO_PORTC_DDR (0x80000E18) #define GPIO_PORTC_PDR (0x80000E70) /* get port C pin data register */ #define get_portc_pdr(bit) ((usb_read(GPIO_PORTC_PDR) & _BIT(bit)) != 0) /* get port C data direction register */ #define get_portc_ddr(bit) ((usb_read(GPIO_PORTC_DDR) & _BIT(bit)) != 0) /* set port C data register */ #define set_portc_dr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DR) : usb_clear(_BIT(bit), GPIO_PORTC_DR)) /* set port C data direction register */ #define set_portc_ddr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DDR) : usb_clear(_BIT(bit), GPIO_PORTC_DDR)) /* * LPD7A404 GPIO's: * Port C bit 1 = USB Port 1 Power Enable * Port C bit 2 = USB Port 1 Data Carrier Detect */ #define is_usb_connected() get_portc_pdr(2) #ifdef CONFIG_USB_GADGET_DEBUG_FILES static const char proc_node_name[] = "driver/udc"; static int udc_proc_read(char *page, char **start, off_t off, int count, int *eof, void *_dev) { char *buf = page; struct lh7a40x_udc *dev = _dev; char *next = buf; unsigned size = count; unsigned long flags; int t; if (off != 0) return 0; local_irq_save(flags); /* basic device status */ t = scnprintf(next, size, DRIVER_DESC "\n" "%s version: %s\n" "Gadget driver: %s\n" "Host: %s\n\n", driver_name, DRIVER_VERSION, dev->driver ? dev->driver->driver.name : "(none)", is_usb_connected()? "full speed" : "disconnected"); size -= t; next += t; t = scnprintf(next, size, "GPIO:\n" " Port C bit 1: %d, dir %d\n" " Port C bit 2: %d, dir %d\n\n", get_portc_pdr(1), get_portc_ddr(1), get_portc_pdr(2), get_portc_ddr(2) ); size -= t; next += t; t = scnprintf(next, size, "DCP pullup: %d\n\n", (usb_read(USB_PM) & PM_USB_DCP) != 0); size -= t; next += t; local_irq_restore(flags); *eof = 1; return count - size; } #define create_proc_files() create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev) #define remove_proc_files() remove_proc_entry(proc_node_name, NULL) #else /* !CONFIG_USB_GADGET_DEBUG_FILES */ #define create_proc_files() do {} while (0) #define remove_proc_files() do {} while (0) #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ /* * udc_disable - disable USB device controller */ static void udc_disable(struct lh7a40x_udc *dev) { DEBUG("%s, %p\n", __func__, dev); udc_set_address(dev, 0); /* Disable interrupts */ usb_write(0, USB_IN_INT_EN); usb_write(0, USB_OUT_INT_EN); usb_write(0, USB_INT_EN); /* Disable the USB */ usb_write(0, USB_PM); #ifdef CONFIG_ARCH_LH7A404 /* Disable USB power */ set_portc_dr(1, 0); #endif /* if hardware supports it, disconnect from usb */ /* make_usb_disappear(); */ dev->ep0state = WAIT_FOR_SETUP; dev->gadget.speed = USB_SPEED_UNKNOWN; dev->usb_address = 0; } /* * udc_reinit - initialize software state */ static void udc_reinit(struct lh7a40x_udc *dev) { u32 i; DEBUG("%s, %p\n", __func__, dev); /* device/ep0 records init */ INIT_LIST_HEAD(&dev->gadget.ep_list); INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); dev->ep0state = WAIT_FOR_SETUP; /* basic endpoint records init */ for (i = 0; i < UDC_MAX_ENDPOINTS; i++) { struct lh7a40x_ep *ep = &dev->ep[i]; if (i != 0) list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); ep->desc = 0; ep->stopped = 0; INIT_LIST_HEAD(&ep->queue); ep->pio_irqs = 0; } /* the rest was statically initialized, and is read-only */ } #define BYTES2MAXP(x) (x / 8) #define MAXP2BYTES(x) (x * 8) /* until it's enabled, this UDC should be completely invisible * to any USB host. */ static void udc_enable(struct lh7a40x_udc *dev) { int ep; DEBUG("%s, %p\n", __func__, dev); dev->gadget.speed = USB_SPEED_UNKNOWN; #ifdef CONFIG_ARCH_LH7A404 /* Set Port C bit 1 & 2 as output */ set_portc_ddr(1, 1); set_portc_ddr(2, 1); /* Enable USB power */ set_portc_dr(1, 0); #endif /* * C.f Chapter 18.1.3.1 Initializing the USB */ /* Disable the USB */ usb_clear(PM_USB_ENABLE, USB_PM); /* Reset APB & I/O sides of the USB */ usb_set(USB_RESET_APB | USB_RESET_IO, USB_RESET); mdelay(5); usb_clear(USB_RESET_APB | USB_RESET_IO, USB_RESET); /* Set MAXP values for each */ for (ep = 0; ep < UDC_MAX_ENDPOINTS; ep++) { struct lh7a40x_ep *ep_reg = &dev->ep[ep]; u32 csr; usb_set_index(ep); switch (ep_reg->ep_type) { case ep_bulk_in: case ep_interrupt: usb_clear(USB_IN_CSR2_USB_DMA_EN | USB_IN_CSR2_AUTO_SET, ep_reg->csr2); /* Fall through */ case ep_control: usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)), USB_IN_MAXP); break; case ep_bulk_out: usb_clear(USB_OUT_CSR2_USB_DMA_EN | USB_OUT_CSR2_AUTO_CLR, ep_reg->csr2); usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)), USB_OUT_MAXP); break; } /* Read & Write CSR1, just in case */ csr = usb_read(ep_reg->csr1); usb_write(csr, ep_reg->csr1); flush(ep_reg); } /* Disable interrupts */ usb_write(0, USB_IN_INT_EN); usb_write(0, USB_OUT_INT_EN); usb_write(0, USB_INT_EN); /* Enable interrupts */ usb_set(USB_IN_INT_EP0, USB_IN_INT_EN); usb_set(USB_INT_RESET_INT | USB_INT_RESUME_INT, USB_INT_EN); /* Dont enable rest of the interrupts */ /* usb_set(USB_IN_INT_EP3 | USB_IN_INT_EP1 | USB_IN_INT_EP0, USB_IN_INT_EN); usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN); */ /* Enable SUSPEND */ usb_set(PM_ENABLE_SUSPEND, USB_PM); /* Enable the USB */ usb_set(PM_USB_ENABLE, USB_PM); #ifdef CONFIG_ARCH_LH7A404 /* NOTE: DOES NOT WORK! */ /* Let host detect UDC: * Software must write a 0 to the PMR:DCP_CTRL bit to turn this * transistor on and pull the USBDP pin HIGH. */ /* usb_clear(PM_USB_DCP, USB_PM); usb_set(PM_USB_DCP, USB_PM); */ #endif } /* Register entry point for the peripheral controller driver. */ int usb_gadget_register_driver(struct usb_gadget_driver *driver) { struct lh7a40x_udc *dev = the_controller; int retval; DEBUG("%s: %s\n", __func__, driver->driver.name); if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind || !driver->disconnect || !driver->setup) return -EINVAL; if (!dev) return -ENODEV; if (dev->driver) return -EBUSY; /* first hook up the driver ... */ dev->driver = driver; dev->gadget.dev.driver = &driver->driver; device_add(&dev->gadget.dev); retval = driver->bind(&dev->gadget); if (retval) { printk(KERN_WARNING "%s: bind to driver %s --> error %d\n", dev->gadget.name, driver->driver.name, retval); device_del(&dev->gadget.dev); dev->driver = 0; dev->gadget.dev.driver = 0; return retval; } /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. * NOTE: this shouldn't power up until later. */ printk(KERN_WARNING "%s: registered gadget driver '%s'\n", dev->gadget.name, driver->driver.name); udc_enable(dev); return 0; } EXPORT_SYMBOL(usb_gadget_register_driver); /* Unregister entry point for the peripheral controller driver. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct lh7a40x_udc *dev = the_controller; unsigned long flags; if (!dev) return -ENODEV; if (!driver || driver != dev->driver || !driver->unbind) return -EINVAL; spin_lock_irqsave(&dev->lock, flags); dev->driver = 0; stop_activity(dev, driver); spin_unlock_irqrestore(&dev->lock, flags); driver->unbind(&dev->gadget); dev->gadget.dev.driver = NULL; device_del(&dev->gadget.dev); udc_disable(dev); DEBUG("unregistered gadget driver '%s'\n", driver->driver.name); return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /*-------------------------------------------------------------------------*/ /** Write request to FIFO (max write == maxp size) * Return: 0 = still running, 1 = completed, negative = errno * NOTE: INDEX register must be set for EP */ static int write_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 max; u32 csr; max = le16_to_cpu(ep->desc->wMaxPacketSize); csr = usb_read(ep->csr1); DEBUG("CSR: %x %d\n", csr, csr & USB_IN_CSR1_FIFO_NOT_EMPTY); if (!(csr & USB_IN_CSR1_FIFO_NOT_EMPTY)) { unsigned count; int is_last, is_short; count = write_packet(ep, req, max); usb_set(USB_IN_CSR1_IN_PKT_RDY, ep->csr1); /* last packet is usually short (or a zlp) */ if (unlikely(count != max)) is_last = is_short = 1; else { if (likely(req->req.length != req->req.actual) || req->req.zero) is_last = 0; else is_last = 1; /* interrupt/iso maxpacket may not fill the fifo */ is_short = unlikely(max < ep_maxpacket(ep)); } DEBUG("%s: wrote %s %d bytes%s%s %d left %p\n", __func__, ep->ep.name, count, is_last ? "/L" : "", is_short ? "/S" : "", req->req.length - req->req.actual, req); /* requests complete when all IN data is in the FIFO */ if (is_last) { done(ep, req, 0); if (list_empty(&ep->queue)) { pio_irq_disable(ep_index(ep)); } return 1; } } else { DEBUG("Hmm.. %d ep FIFO is not empty!\n", ep_index(ep)); } return 0; } /** Read to request from FIFO (max read == bytes in fifo) * Return: 0 = still running, 1 = completed, negative = errno * NOTE: INDEX register must be set for EP */ static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 csr; u8 *buf; unsigned bufferspace, count, is_short; volatile u32 *fifo = (volatile u32 *)ep->fifo; /* make sure there's a packet in the FIFO. */ csr = usb_read(ep->csr1); if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) { DEBUG("%s: Packet NOT ready!\n", __func__); return -EINVAL; } buf = req->req.buf + req->req.actual; prefetchw(buf); bufferspace = req->req.length - req->req.actual; /* read all bytes from this packet */ count = usb_read(USB_OUT_FIFO_WC1); req->req.actual += min(count, bufferspace); is_short = (count < ep->ep.maxpacket); DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n", ep->ep.name, csr, count, is_short ? "/S" : "", req, req->req.actual, req->req.length); while (likely(count-- != 0)) { u8 byte = (u8) (*fifo & 0xff); if (unlikely(bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data. */ if (req->req.status != -EOVERFLOW) printk(KERN_WARNING "%s overflow %d\n", ep->ep.name, count); req->req.status = -EOVERFLOW; } else { *buf++ = byte; bufferspace--; } } usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1); /* completion */ if (is_short || req->req.actual == req->req.length) { done(ep, req, 0); usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); if (list_empty(&ep->queue)) pio_irq_disable(ep_index(ep)); return 1; } /* finished that packet. the next one may be waiting... */ return 0; } /* * done - retire a request; caller blocked irqs * INDEX register is preserved to keep same */ static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int status) { unsigned int stopped = ep->stopped; u32 index; DEBUG("%s, %p\n", __func__, ep); list_del_init(&req->queue); if (likely(req->req.status == -EINPROGRESS)) req->req.status = status; else status = req->req.status; if (status && status != -ESHUTDOWN) DEBUG("complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; /* Read current index (completion may modify it) */ index = usb_read(USB_INDEX); spin_unlock(&ep->dev->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&ep->dev->lock); /* Restore index */ usb_set_index(index); ep->stopped = stopped; } /** Enable EP interrupt */ static void pio_irq_enable(int ep) { DEBUG("%s: %d\n", __func__, ep); switch (ep) { case 1: usb_set(USB_IN_INT_EP1, USB_IN_INT_EN); break; case 2: usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN); break; case 3: usb_set(USB_IN_INT_EP3, USB_IN_INT_EN); break; default: DEBUG("Unknown endpoint: %d\n", ep); break; } } /** Disable EP interrupt */ static void pio_irq_disable(int ep) { DEBUG("%s: %d\n", __func__, ep); switch (ep) { case 1: usb_clear(USB_IN_INT_EP1, USB_IN_INT_EN); break; case 2: usb_clear(USB_OUT_INT_EP2, USB_OUT_INT_EN); break; case 3: usb_clear(USB_IN_INT_EP3, USB_IN_INT_EN); break; default: DEBUG("Unknown endpoint: %d\n", ep); break; } } /* * nuke - dequeue ALL requests */ void nuke(struct lh7a40x_ep *ep, int status) { struct lh7a40x_request *req; DEBUG("%s, %p\n", __func__, ep); /* Flush FIFO */ flush(ep); /* called with irqs blocked */ while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct lh7a40x_request, queue); done(ep, req, status); } /* Disable IRQ if EP is enabled (has descriptor) */ if (ep->desc) pio_irq_disable(ep_index(ep)); } /* void nuke_all(struct lh7a40x_udc *dev) { int n; for(n=0; n<UDC_MAX_ENDPOINTS; n++) { struct lh7a40x_ep *ep = &dev->ep[n]; usb_set_index(n); nuke(ep, 0); } }*/ /* static void flush_all(struct lh7a40x_udc *dev) { int n; for (n = 0; n < UDC_MAX_ENDPOINTS; n++) { struct lh7a40x_ep *ep = &dev->ep[n]; flush(ep); } } */ /** Flush EP * NOTE: INDEX register must be set before this call */ static void flush(struct lh7a40x_ep *ep) { DEBUG("%s, %p\n", __func__, ep); switch (ep->ep_type) { case ep_control: /* check, by implication c.f. 15.1.2.11 */ break; case ep_bulk_in: case ep_interrupt: /* if(csr & USB_IN_CSR1_IN_PKT_RDY) */ usb_set(USB_IN_CSR1_FIFO_FLUSH, ep->csr1); break; case ep_bulk_out: /* if(csr & USB_OUT_CSR1_OUT_PKT_RDY) */ usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); break; } } /** * lh7a40x_in_epn - handle IN interrupt */ static void lh7a40x_in_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr) { u32 csr; struct lh7a40x_ep *ep = &dev->ep[ep_idx]; struct lh7a40x_request *req; usb_set_index(ep_idx); csr = usb_read(ep->csr1); DEBUG("%s: %d, csr %x\n", __func__, ep_idx, csr); if (csr & USB_IN_CSR1_SENT_STALL) { DEBUG("USB_IN_CSR1_SENT_STALL\n"); usb_set(USB_IN_CSR1_SENT_STALL /*|USB_IN_CSR1_SEND_STALL */ , ep->csr1); return; } if (!ep->desc) { DEBUG("%s: NO EP DESC\n", __func__); return; } if (list_empty(&ep->queue)) req = 0; else req = list_entry(ep->queue.next, struct lh7a40x_request, queue); DEBUG("req: %p\n", req); if (!req) return; write_fifo(ep, req); } /* ********************************************************************************************* */ /* Bulk OUT (recv) */ static void lh7a40x_out_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr) { struct lh7a40x_ep *ep = &dev->ep[ep_idx]; struct lh7a40x_request *req; DEBUG("%s: %d\n", __func__, ep_idx); usb_set_index(ep_idx); if (ep->desc) { u32 csr; csr = usb_read(ep->csr1); while ((csr = usb_read(ep-> csr1)) & (USB_OUT_CSR1_OUT_PKT_RDY | USB_OUT_CSR1_SENT_STALL)) { DEBUG("%s: %x\n", __func__, csr); if (csr & USB_OUT_CSR1_SENT_STALL) { DEBUG("%s: stall sent, flush fifo\n", __func__); /* usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); */ flush(ep); } else if (csr & USB_OUT_CSR1_OUT_PKT_RDY) { if (list_empty(&ep->queue)) req = 0; else req = list_entry(ep->queue.next, struct lh7a40x_request, queue); if (!req) { printk(KERN_WARNING "%s: NULL REQ %d\n", __func__, ep_idx); flush(ep); break; } else { read_fifo(ep, req); } } } } else { /* Throw packet away.. */ printk(KERN_WARNING "%s: No descriptor?!?\n", __func__); flush(ep); } } static void stop_activity(struct lh7a40x_udc *dev, struct usb_gadget_driver *driver) { int i; /* don't disconnect drivers more than once */ if (dev->gadget.speed == USB_SPEED_UNKNOWN) driver = 0; dev->gadget.speed = USB_SPEED_UNKNOWN; /* prevent new request submissions, kill any outstanding requests */ for (i = 0; i < UDC_MAX_ENDPOINTS; i++) { struct lh7a40x_ep *ep = &dev->ep[i]; ep->stopped = 1; usb_set_index(i); nuke(ep, -ESHUTDOWN); } /* report disconnect; the driver is already quiesced */ if (driver) { spin_unlock(&dev->lock); driver->disconnect(&dev->gadget); spin_lock(&dev->lock); } /* re-init driver-visible data structures */ udc_reinit(dev); } /** Handle USB RESET interrupt */ static void lh7a40x_reset_intr(struct lh7a40x_udc *dev) { #if 0 /* def CONFIG_ARCH_LH7A404 */ /* Does not work always... */ DEBUG("%s: %d\n", __func__, dev->usb_address); if (!dev->usb_address) { /*usb_set(USB_RESET_IO, USB_RESET); mdelay(5); usb_clear(USB_RESET_IO, USB_RESET); */ return; } /* Put the USB controller into reset. */ usb_set(USB_RESET_IO, USB_RESET); /* Set Device ID to 0 */ udc_set_address(dev, 0); /* Let PLL2 settle down */ mdelay(5); /* Release the USB controller from reset */ usb_clear(USB_RESET_IO, USB_RESET); /* Re-enable UDC */ udc_enable(dev); #endif dev->gadget.speed = USB_SPEED_FULL; } /* * lh7a40x usb client interrupt handler. */ static irqreturn_t lh7a40x_udc_irq(int irq, void *_dev) { struct lh7a40x_udc *dev = _dev; DEBUG("\n\n"); spin_lock(&dev->lock); for (;;) { u32 intr_in = usb_read(USB_IN_INT); u32 intr_out = usb_read(USB_OUT_INT); u32 intr_int = usb_read(USB_INT); /* Test also against enable bits.. (lh7a40x errata).. Sigh.. */ u32 in_en = usb_read(USB_IN_INT_EN); u32 out_en = usb_read(USB_OUT_INT_EN); if (!intr_out && !intr_in && !intr_int) break; DEBUG("%s (on state %s)\n", __func__, state_names[dev->ep0state]); DEBUG("intr_out = %x\n", intr_out); DEBUG("intr_in = %x\n", intr_in); DEBUG("intr_int = %x\n", intr_int); if (intr_in) { usb_write(intr_in, USB_IN_INT); if ((intr_in & USB_IN_INT_EP1) && (in_en & USB_IN_INT_EP1)) { DEBUG("USB_IN_INT_EP1\n"); lh7a40x_in_epn(dev, 1, intr_in); } if ((intr_in & USB_IN_INT_EP3) && (in_en & USB_IN_INT_EP3)) { DEBUG("USB_IN_INT_EP3\n"); lh7a40x_in_epn(dev, 3, intr_in); } if (intr_in & USB_IN_INT_EP0) { DEBUG("USB_IN_INT_EP0 (control)\n"); lh7a40x_handle_ep0(dev, intr_in); } } if (intr_out) { usb_write(intr_out, USB_OUT_INT); if ((intr_out & USB_OUT_INT_EP2) && (out_en & USB_OUT_INT_EP2)) { DEBUG("USB_OUT_INT_EP2\n"); lh7a40x_out_epn(dev, 2, intr_out); } } if (intr_int) { usb_write(intr_int, USB_INT); if (intr_int & USB_INT_RESET_INT) { lh7a40x_reset_intr(dev); } if (intr_int & USB_INT_RESUME_INT) { DEBUG("USB resume\n"); if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->resume && is_usb_connected()) { dev->driver->resume(&dev->gadget); } } if (intr_int & USB_INT_SUSPEND_INT) { DEBUG("USB suspend%s\n", is_usb_connected()? "" : "+disconnect"); if (!is_usb_connected()) { stop_activity(dev, dev->driver); } else if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->suspend) { dev->driver->suspend(&dev->gadget); } } } } spin_unlock(&dev->lock); return IRQ_HANDLED; } static int lh7a40x_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct lh7a40x_ep *ep; struct lh7a40x_udc *dev; unsigned long flags; DEBUG("%s, %p\n", __func__, _ep); ep = container_of(_ep, struct lh7a40x_ep, ep); if (!_ep || !desc || ep->desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || ep->bEndpointAddress != desc->bEndpointAddress || ep_maxpacket(ep) < le16_to_cpu(desc->wMaxPacketSize)) { DEBUG("%s, bad ep or descriptor\n", __func__); return -EINVAL; } /* xfer types must match, except that interrupt ~= bulk */ if (ep->bmAttributes != desc->bmAttributes && ep->bmAttributes != USB_ENDPOINT_XFER_BULK && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { DEBUG("%s, %s type mismatch\n", __func__, _ep->name); return -EINVAL; } /* hardware _could_ do smaller, but driver doesn't */ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(ep)) || !desc->wMaxPacketSize) { DEBUG("%s, bad %s maxpacket\n", __func__, _ep->name); return -ERANGE; } dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { DEBUG("%s, bogus device state\n", __func__); return -ESHUTDOWN; } spin_lock_irqsave(&ep->dev->lock, flags); ep->stopped = 0; ep->desc = desc; ep->pio_irqs = 0; ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize); spin_unlock_irqrestore(&ep->dev->lock, flags); /* Reset halt state (does flush) */ lh7a40x_set_halt(_ep, 0); DEBUG("%s: enabled %s\n", __func__, _ep->name); return 0; } /** Disable EP * NOTE: Sets INDEX register */ static int lh7a40x_ep_disable(struct usb_ep *_ep) { struct lh7a40x_ep *ep; unsigned long flags; DEBUG("%s, %p\n", __func__, _ep); ep = container_of(_ep, struct lh7a40x_ep, ep); if (!_ep || !ep->desc) { DEBUG("%s, %s not enabled\n", __func__, _ep ? ep->ep.name : NULL); return -EINVAL; } spin_lock_irqsave(&ep->dev->lock, flags); usb_set_index(ep_index(ep)); /* Nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); /* Disable ep IRQ */ pio_irq_disable(ep_index(ep)); ep->desc = 0; ep->stopped = 1; spin_unlock_irqrestore(&ep->dev->lock, flags); DEBUG("%s: disabled %s\n", __func__, _ep->name); return 0; } static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct lh7a40x_request *req; DEBUG("%s, %p\n", __func__, ep); req = kzalloc(sizeof(*req), gfp_flags); if (!req) return 0; INIT_LIST_HEAD(&req->queue); return &req->req; } static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req) { struct lh7a40x_request *req; DEBUG("%s, %p\n", __func__, ep); req = container_of(_req, struct lh7a40x_request, req); WARN_ON(!list_empty(&req->queue)); kfree(req); } /** Queue one request * Kickstart transfer if needed * NOTE: Sets INDEX register */ static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct lh7a40x_request *req; struct lh7a40x_ep *ep; struct lh7a40x_udc *dev; unsigned long flags; DEBUG("\n\n\n%s, %p\n", __func__, _ep); req = container_of(_req, struct lh7a40x_request, req); if (unlikely (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue))) { DEBUG("%s, bad params\n", __func__); return -EINVAL; } ep = container_of(_ep, struct lh7a40x_ep, ep); if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { DEBUG("%s, bad ep\n", __func__); return -EINVAL; } dev = ep->dev; if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { DEBUG("%s, bogus device state %p\n", __func__, dev->driver); return -ESHUTDOWN; } DEBUG("%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf); spin_lock_irqsave(&dev->lock, flags); _req->status = -EINPROGRESS; _req->actual = 0; /* kickstart this i/o queue? */ DEBUG("Add to %d Q %d %d\n", ep_index(ep), list_empty(&ep->queue), ep->stopped); if (list_empty(&ep->queue) && likely(!ep->stopped)) { u32 csr; if (unlikely(ep_index(ep) == 0)) { /* EP0 */ list_add_tail(&req->queue, &ep->queue); lh7a40x_ep0_kick(dev, ep); req = 0; } else if (ep_is_in(ep)) { /* EP1 & EP3 */ usb_set_index(ep_index(ep)); csr = usb_read(ep->csr1); pio_irq_enable(ep_index(ep)); if ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY) == 0) { if (write_fifo(ep, req) == 1) req = 0; } } else { /* EP2 */ usb_set_index(ep_index(ep)); csr = usb_read(ep->csr1); pio_irq_enable(ep_index(ep)); if (!(csr & USB_OUT_CSR1_FIFO_FULL)) { if (read_fifo(ep, req) == 1) req = 0; } } } /* pio or dma irq handler advances the queue. */ if (likely(req != 0)) list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&dev->lock, flags); return 0; } /* dequeue JUST ONE request */ static int lh7a40x_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct lh7a40x_ep *ep; struct lh7a40x_request *req; unsigned long flags; DEBUG("%s, %p\n", __func__, _ep); ep = container_of(_ep, struct lh7a40x_ep, ep); if (!_ep || ep->ep.name == ep0name) return -EINVAL; spin_lock_irqsave(&ep->dev->lock, flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore(&ep->dev->lock, flags); return -EINVAL; } done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->dev->lock, flags); return 0; } /** Halt specific EP * Return 0 if success * NOTE: Sets INDEX register to EP ! */ static int lh7a40x_set_halt(struct usb_ep *_ep, int value) { struct lh7a40x_ep *ep; unsigned long flags; ep = container_of(_ep, struct lh7a40x_ep, ep); if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { DEBUG("%s, bad ep\n", __func__); return -EINVAL; } usb_set_index(ep_index(ep)); DEBUG("%s, ep %d, val %d\n", __func__, ep_index(ep), value); spin_lock_irqsave(&ep->dev->lock, flags); if (ep_index(ep) == 0) { /* EP0 */ usb_set(EP0_SEND_STALL, ep->csr1); } else if (ep_is_in(ep)) { u32 csr = usb_read(ep->csr1); if (value && ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY) || !list_empty(&ep->queue))) { /* * Attempts to halt IN endpoints will fail (returning -EAGAIN) * if any transfer requests are still queued, or if the controller * FIFO still holds bytes that the host hasn't collected. */ spin_unlock_irqrestore(&ep->dev->lock, flags); DEBUG ("Attempt to halt IN endpoint failed (returning -EAGAIN) %d %d\n", (csr & USB_IN_CSR1_FIFO_NOT_EMPTY), !list_empty(&ep->queue)); return -EAGAIN; } flush(ep); if (value) usb_set(USB_IN_CSR1_SEND_STALL, ep->csr1); else { usb_clear(USB_IN_CSR1_SEND_STALL, ep->csr1); usb_set(USB_IN_CSR1_CLR_DATA_TOGGLE, ep->csr1); } } else { flush(ep); if (value) usb_set(USB_OUT_CSR1_SEND_STALL, ep->csr1); else { usb_clear(USB_OUT_CSR1_SEND_STALL, ep->csr1); usb_set(USB_OUT_CSR1_CLR_DATA_REG, ep->csr1); } } if (value) { ep->stopped = 1; } else { ep->stopped = 0; } spin_unlock_irqrestore(&ep->dev->lock, flags); DEBUG("%s %s halted\n", _ep->name, value == 0 ? "NOT" : "IS"); return 0; } /** Return bytes in EP FIFO * NOTE: Sets INDEX register to EP */ static int lh7a40x_fifo_status(struct usb_ep *_ep) { u32 csr; int count = 0; struct lh7a40x_ep *ep; ep = container_of(_ep, struct lh7a40x_ep, ep); if (!_ep) { DEBUG("%s, bad ep\n", __func__); return -ENODEV; } DEBUG("%s, %d\n", __func__, ep_index(ep)); /* LPD can't report unclaimed bytes from IN fifos */ if (ep_is_in(ep)) return -EOPNOTSUPP; usb_set_index(ep_index(ep)); csr = usb_read(ep->csr1); if (ep->dev->gadget.speed != USB_SPEED_UNKNOWN || csr & USB_OUT_CSR1_OUT_PKT_RDY) { count = usb_read(USB_OUT_FIFO_WC1); } return count; } /** Flush EP FIFO * NOTE: Sets INDEX register to EP */ static void lh7a40x_fifo_flush(struct usb_ep *_ep) { struct lh7a40x_ep *ep; ep = container_of(_ep, struct lh7a40x_ep, ep); if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { DEBUG("%s, bad ep\n", __func__); return; } usb_set_index(ep_index(ep)); flush(ep); } /****************************************************************/ /* End Point 0 related functions */ /****************************************************************/ /* return: 0 = still running, 1 = completed, negative = errno */ static int write_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 max; unsigned count; int is_last; max = ep_maxpacket(ep); DEBUG_EP0("%s\n", __func__); count = write_packet(ep, req, max); /* last packet is usually short (or a zlp) */ if (unlikely(count != max)) is_last = 1; else { if (likely(req->req.length != req->req.actual) || req->req.zero) is_last = 0; else is_last = 1; } DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __func__, ep->ep.name, count, is_last ? "/L" : "", req->req.length - req->req.actual, req); /* requests complete when all IN data is in the FIFO */ if (is_last) { done(ep, req, 0); return 1; } return 0; } static __inline__ int lh7a40x_fifo_read(struct lh7a40x_ep *ep, unsigned char *cp, int max) { int bytes; int count = usb_read(USB_OUT_FIFO_WC1); volatile u32 *fifo = (volatile u32 *)ep->fifo; if (count > max) count = max; bytes = count; while (count--) *cp++ = *fifo & 0xFF; return bytes; } static __inline__ void lh7a40x_fifo_write(struct lh7a40x_ep *ep, unsigned char *cp, int count) { volatile u32 *fifo = (volatile u32 *)ep->fifo; DEBUG_EP0("fifo_write: %d %d\n", ep_index(ep), count); while (count--) *fifo = *cp++; } static int read_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 csr; u8 *buf; unsigned bufferspace, count, is_short; volatile u32 *fifo = (volatile u32 *)ep->fifo; DEBUG_EP0("%s\n", __func__); csr = usb_read(USB_EP0_CSR); if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) return 0; buf = req->req.buf + req->req.actual; prefetchw(buf); bufferspace = req->req.length - req->req.actual; /* read all bytes from this packet */ if (likely(csr & EP0_OUT_PKT_RDY)) { count = usb_read(USB_OUT_FIFO_WC1); req->req.actual += min(count, bufferspace); } else /* zlp */ count = 0; is_short = (count < ep->ep.maxpacket); DEBUG_EP0("read %s %02x, %d bytes%s req %p %d/%d\n", ep->ep.name, csr, count, is_short ? "/S" : "", req, req->req.actual, req->req.length); while (likely(count-- != 0)) { u8 byte = (u8) (*fifo & 0xff); if (unlikely(bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data. */ if (req->req.status != -EOVERFLOW) DEBUG_EP0("%s overflow %d\n", ep->ep.name, count); req->req.status = -EOVERFLOW; } else { *buf++ = byte; bufferspace--; } } /* completion */ if (is_short || req->req.actual == req->req.length) { done(ep, req, 0); return 1; } /* finished that packet. the next one may be waiting... */ return 0; } /** * udc_set_address - set the USB address for this device * @address: * * Called from control endpoint function after it decodes a set address setup packet. */ static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address) { DEBUG_EP0("%s: %d\n", __func__, address); /* c.f. 15.1.2.2 Table 15-4 address will be used after DATA_END is set */ dev->usb_address = address; usb_set((address & USB_FA_FUNCTION_ADDR), USB_FA); usb_set(USB_FA_ADDR_UPDATE | (address & USB_FA_FUNCTION_ADDR), USB_FA); /* usb_read(USB_FA); */ } /* * DATA_STATE_RECV (OUT_PKT_RDY) * - if error * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits * - else * set EP0_CLR_OUT bit if last set EP0_DATA_END bit */ static void lh7a40x_ep0_out(struct lh7a40x_udc *dev, u32 csr) { struct lh7a40x_request *req; struct lh7a40x_ep *ep = &dev->ep[0]; int ret; DEBUG_EP0("%s: %x\n", __func__, csr); if (list_empty(&ep->queue)) req = 0; else req = list_entry(ep->queue.next, struct lh7a40x_request, queue); if (req) { if (req->req.length == 0) { DEBUG_EP0("ZERO LENGTH OUT!\n"); usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR); dev->ep0state = WAIT_FOR_SETUP; return; } ret = read_fifo_ep0(ep, req); if (ret) { /* Done! */ DEBUG_EP0("%s: finished, waiting for status\n", __func__); usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR); dev->ep0state = WAIT_FOR_SETUP; } else { /* Not done yet.. */ DEBUG_EP0("%s: not finished\n", __func__); usb_set(EP0_CLR_OUT, USB_EP0_CSR); } } else { DEBUG_EP0("NO REQ??!\n"); } } /* * DATA_STATE_XMIT */ static int lh7a40x_ep0_in(struct lh7a40x_udc *dev, u32 csr) { struct lh7a40x_request *req; struct lh7a40x_ep *ep = &dev->ep[0]; int ret, need_zlp = 0; DEBUG_EP0("%s: %x\n", __func__, csr); if (list_empty(&ep->queue)) req = 0; else req = list_entry(ep->queue.next, struct lh7a40x_request, queue); if (!req) { DEBUG_EP0("%s: NULL REQ\n", __func__); return 0; } if (req->req.length == 0) { usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR); dev->ep0state = WAIT_FOR_SETUP; return 1; } if (req->req.length - req->req.actual == EP0_PACKETSIZE) { /* Next write will end with the packet size, */ /* so we need Zero-length-packet */ need_zlp = 1; } ret = write_fifo_ep0(ep, req); if (ret == 1 && !need_zlp) { /* Last packet */ DEBUG_EP0("%s: finished, waiting for status\n", __func__); usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR); dev->ep0state = WAIT_FOR_SETUP; } else { DEBUG_EP0("%s: not finished\n", __func__); usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR); } if (need_zlp) { DEBUG_EP0("%s: Need ZLP!\n", __func__); usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR); dev->ep0state = DATA_STATE_NEED_ZLP; } return 1; } static int lh7a40x_handle_get_status(struct lh7a40x_udc *dev, struct usb_ctrlrequest *ctrl) { struct lh7a40x_ep *ep0 = &dev->ep[0]; struct lh7a40x_ep *qep; int reqtype = (ctrl->bRequestType & USB_RECIP_MASK); u16 val = 0; if (reqtype == USB_RECIP_INTERFACE) { /* This is not supported. * And according to the USB spec, this one does nothing.. * Just return 0 */ DEBUG_SETUP("GET_STATUS: USB_RECIP_INTERFACE\n"); } else if (reqtype == USB_RECIP_DEVICE) { DEBUG_SETUP("GET_STATUS: USB_RECIP_DEVICE\n"); val |= (1 << 0); /* Self powered */ /*val |= (1<<1); *//* Remote wakeup */ } else if (reqtype == USB_RECIP_ENDPOINT) { int ep_num = (ctrl->wIndex & ~USB_DIR_IN); DEBUG_SETUP ("GET_STATUS: USB_RECIP_ENDPOINT (%d), ctrl->wLength = %d\n", ep_num, ctrl->wLength); if (ctrl->wLength > 2 || ep_num > 3) return -EOPNOTSUPP; qep = &dev->ep[ep_num]; if (ep_is_in(qep) != ((ctrl->wIndex & USB_DIR_IN) ? 1 : 0) && ep_index(qep) != 0) { return -EOPNOTSUPP; } usb_set_index(ep_index(qep)); /* Return status on next IN token */ switch (qep->ep_type) { case ep_control: val = (usb_read(qep->csr1) & EP0_SEND_STALL) == EP0_SEND_STALL; break; case ep_bulk_in: case ep_interrupt: val = (usb_read(qep->csr1) & USB_IN_CSR1_SEND_STALL) == USB_IN_CSR1_SEND_STALL; break; case ep_bulk_out: val = (usb_read(qep->csr1) & USB_OUT_CSR1_SEND_STALL) == USB_OUT_CSR1_SEND_STALL; break; } /* Back to EP0 index */ usb_set_index(0); DEBUG_SETUP("GET_STATUS, ep: %d (%x), val = %d\n", ep_num, ctrl->wIndex, val); } else { DEBUG_SETUP("Unknown REQ TYPE: %d\n", reqtype); return -EOPNOTSUPP; } /* Clear "out packet ready" */ usb_set((EP0_CLR_OUT), USB_EP0_CSR); /* Put status to FIFO */ lh7a40x_fifo_write(ep0, (u8 *) & val, sizeof(val)); /* Issue "In packet ready" */ usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR); return 0; } /* * WAIT_FOR_SETUP (OUT_PKT_RDY) * - read data packet from EP0 FIFO * - decode command * - if error * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits * - else * set EP0_CLR_OUT | EP0_DATA_END bits */ static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr) { struct lh7a40x_ep *ep = &dev->ep[0]; struct usb_ctrlrequest ctrl; int i, bytes, is_in; DEBUG_SETUP("%s: %x\n", __func__, csr); /* Nuke all previous transfers */ nuke(ep, -EPROTO); /* read control req from fifo (8 bytes) */ bytes = lh7a40x_fifo_read(ep, (unsigned char *)&ctrl, 8); DEBUG_SETUP("Read CTRL REQ %d bytes\n", bytes); DEBUG_SETUP("CTRL.bRequestType = %d (is_in %d)\n", ctrl.bRequestType, ctrl.bRequestType == USB_DIR_IN); DEBUG_SETUP("CTRL.bRequest = %d\n", ctrl.bRequest); DEBUG_SETUP("CTRL.wLength = %d\n", ctrl.wLength); DEBUG_SETUP("CTRL.wValue = %d (%d)\n", ctrl.wValue, ctrl.wValue >> 8); DEBUG_SETUP("CTRL.wIndex = %d\n", ctrl.wIndex); /* Set direction of EP0 */ if (likely(ctrl.bRequestType & USB_DIR_IN)) { ep->bEndpointAddress |= USB_DIR_IN; is_in = 1; } else { ep->bEndpointAddress &= ~USB_DIR_IN; is_in = 0; } dev->req_pending = 1; /* Handle some SETUP packets ourselves */ switch (ctrl.bRequest) { case USB_REQ_SET_ADDRESS: if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; DEBUG_SETUP("USB_REQ_SET_ADDRESS (%d)\n", ctrl.wValue); udc_set_address(dev, ctrl.wValue); usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR); return; case USB_REQ_GET_STATUS:{ if (lh7a40x_handle_get_status(dev, &ctrl) == 0) return; case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: if (ctrl.bRequestType == USB_RECIP_ENDPOINT) { struct lh7a40x_ep *qep; int ep_num = (ctrl.wIndex & 0x0f); /* Support only HALT feature */ if (ctrl.wValue != 0 || ctrl.wLength != 0 || ep_num > 3 || ep_num < 1) break; qep = &dev->ep[ep_num]; spin_unlock(&dev->lock); if (ctrl.bRequest == USB_REQ_SET_FEATURE) { DEBUG_SETUP("SET_FEATURE (%d)\n", ep_num); lh7a40x_set_halt(&qep->ep, 1); } else { DEBUG_SETUP("CLR_FEATURE (%d)\n", ep_num); lh7a40x_set_halt(&qep->ep, 0); } spin_lock(&dev->lock); usb_set_index(0); /* Reply with a ZLP on next IN token */ usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR); return; } break; } default: break; } if (likely(dev->driver)) { /* device-2-host (IN) or no data setup command, process immediately */ spin_unlock(&dev->lock); i = dev->driver->setup(&dev->gadget, &ctrl); spin_lock(&dev->lock); if (i < 0) { /* setup processing failed, force stall */ DEBUG_SETUP (" --> ERROR: gadget setup FAILED (stalling), setup returned %d\n", i); usb_set_index(0); usb_set((EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL), USB_EP0_CSR); /* ep->stopped = 1; */ dev->ep0state = WAIT_FOR_SETUP; } } } /* * DATA_STATE_NEED_ZLP */ static void lh7a40x_ep0_in_zlp(struct lh7a40x_udc *dev, u32 csr) { DEBUG_EP0("%s: %x\n", __func__, csr); /* c.f. Table 15-14 */ usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR); dev->ep0state = WAIT_FOR_SETUP; } /* * handle ep0 interrupt */ static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr) { struct lh7a40x_ep *ep = &dev->ep[0]; u32 csr; /* Set index 0 */ usb_set_index(0); csr = usb_read(USB_EP0_CSR); DEBUG_EP0("%s: csr = %x\n", __func__, csr); /* * For overview of what we should be doing see c.f. Chapter 18.1.2.4 * We will follow that outline here modified by our own global state * indication which provides hints as to what we think should be * happening.. */ /* * if SENT_STALL is set * - clear the SENT_STALL bit */ if (csr & EP0_SENT_STALL) { DEBUG_EP0("%s: EP0_SENT_STALL is set: %x\n", __func__, csr); usb_clear((EP0_SENT_STALL | EP0_SEND_STALL), USB_EP0_CSR); nuke(ep, -ECONNABORTED); dev->ep0state = WAIT_FOR_SETUP; return; } /* * if a transfer is in progress && IN_PKT_RDY and OUT_PKT_RDY are clear * - fill EP0 FIFO * - if last packet * - set IN_PKT_RDY | DATA_END * - else * set IN_PKT_RDY */ if (!(csr & (EP0_IN_PKT_RDY | EP0_OUT_PKT_RDY))) { DEBUG_EP0("%s: IN_PKT_RDY and OUT_PKT_RDY are clear\n", __func__); switch (dev->ep0state) { case DATA_STATE_XMIT: DEBUG_EP0("continue with DATA_STATE_XMIT\n"); lh7a40x_ep0_in(dev, csr); return; case DATA_STATE_NEED_ZLP: DEBUG_EP0("continue with DATA_STATE_NEED_ZLP\n"); lh7a40x_ep0_in_zlp(dev, csr); return; default: /* Stall? */ DEBUG_EP0("Odd state!! state = %s\n", state_names[dev->ep0state]); dev->ep0state = WAIT_FOR_SETUP; /* nuke(ep, 0); */ /* usb_set(EP0_SEND_STALL, ep->csr1); */ break; } } /* * if SETUP_END is set * - abort the last transfer * - set SERVICED_SETUP_END_BIT */ if (csr & EP0_SETUP_END) { DEBUG_EP0("%s: EP0_SETUP_END is set: %x\n", __func__, csr); usb_set(EP0_CLR_SETUP_END, USB_EP0_CSR); nuke(ep, 0); dev->ep0state = WAIT_FOR_SETUP; } /* * if EP0_OUT_PKT_RDY is set * - read data packet from EP0 FIFO * - decode command * - if error * set SERVICED_OUT_PKT_RDY | DATA_END bits | SEND_STALL * - else * set SERVICED_OUT_PKT_RDY | DATA_END bits */ if (csr & EP0_OUT_PKT_RDY) { DEBUG_EP0("%s: EP0_OUT_PKT_RDY is set: %x\n", __func__, csr); switch (dev->ep0state) { case WAIT_FOR_SETUP: DEBUG_EP0("WAIT_FOR_SETUP\n"); lh7a40x_ep0_setup(dev, csr); break; case DATA_STATE_RECV: DEBUG_EP0("DATA_STATE_RECV\n"); lh7a40x_ep0_out(dev, csr); break; default: /* send stall? */ DEBUG_EP0("strange state!! 2. send stall? state = %d\n", dev->ep0state); break; } } } static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep) { u32 csr; usb_set_index(0); csr = usb_read(USB_EP0_CSR); DEBUG_EP0("%s: %x\n", __func__, csr); /* Clear "out packet ready" */ usb_set(EP0_CLR_OUT, USB_EP0_CSR); if (ep_is_in(ep)) { dev->ep0state = DATA_STATE_XMIT; lh7a40x_ep0_in(dev, csr); } else { dev->ep0state = DATA_STATE_RECV; lh7a40x_ep0_out(dev, csr); } } /* --------------------------------------------------------------------------- * device-scoped parts of the api to the usb controller hardware * --------------------------------------------------------------------------- */ static int lh7a40x_udc_get_frame(struct usb_gadget *_gadget) { u32 frame1 = usb_read(USB_FRM_NUM1); /* Least significant 8 bits */ u32 frame2 = usb_read(USB_FRM_NUM2); /* Most significant 3 bits */ DEBUG("%s, %p\n", __func__, _gadget); return ((frame2 & 0x07) << 8) | (frame1 & 0xff); } static int lh7a40x_udc_wakeup(struct usb_gadget *_gadget) { /* host may not have enabled remote wakeup */ /*if ((UDCCS0 & UDCCS0_DRWF) == 0) return -EHOSTUNREACH; udc_set_mask_UDCCR(UDCCR_RSM); */ return -ENOTSUPP; } static const struct usb_gadget_ops lh7a40x_udc_ops = { .get_frame = lh7a40x_udc_get_frame, .wakeup = lh7a40x_udc_wakeup, /* current versions must always be self-powered */ }; static void nop_release(struct device *dev) { DEBUG("%s %s\n", __func__, dev_name(dev)); } static struct lh7a40x_udc memory = { .usb_address = 0, .gadget = { .ops = &lh7a40x_udc_ops, .ep0 = &memory.ep[0].ep, .name = driver_name, .dev = { .init_name = "gadget", .release = nop_release, }, }, /* control endpoint */ .ep[0] = { .ep = { .name = ep0name, .ops = &lh7a40x_ep_ops, .maxpacket = EP0_PACKETSIZE, }, .dev = &memory, .bEndpointAddress = 0, .bmAttributes = 0, .ep_type = ep_control, .fifo = io_p2v(USB_EP0_FIFO), .csr1 = USB_EP0_CSR, .csr2 = USB_EP0_CSR, }, /* first group of endpoints */ .ep[1] = { .ep = { .name = "ep1in-bulk", .ops = &lh7a40x_ep_ops, .maxpacket = 64, }, .dev = &memory, .bEndpointAddress = USB_DIR_IN | 1, .bmAttributes = USB_ENDPOINT_XFER_BULK, .ep_type = ep_bulk_in, .fifo = io_p2v(USB_EP1_FIFO), .csr1 = USB_IN_CSR1, .csr2 = USB_IN_CSR2, }, .ep[2] = { .ep = { .name = "ep2out-bulk", .ops = &lh7a40x_ep_ops, .maxpacket = 64, }, .dev = &memory, .bEndpointAddress = 2, .bmAttributes = USB_ENDPOINT_XFER_BULK, .ep_type = ep_bulk_out, .fifo = io_p2v(USB_EP2_FIFO), .csr1 = USB_OUT_CSR1, .csr2 = USB_OUT_CSR2, }, .ep[3] = { .ep = { .name = "ep3in-int", .ops = &lh7a40x_ep_ops, .maxpacket = 64, }, .dev = &memory, .bEndpointAddress = USB_DIR_IN | 3, .bmAttributes = USB_ENDPOINT_XFER_INT, .ep_type = ep_interrupt, .fifo = io_p2v(USB_EP3_FIFO), .csr1 = USB_IN_CSR1, .csr2 = USB_IN_CSR2, }, }; /* * probe - binds to the platform device */ static int lh7a40x_udc_probe(struct platform_device *pdev) { struct lh7a40x_udc *dev = &memory; int retval; DEBUG("%s: %p\n", __func__, pdev); spin_lock_init(&dev->lock); dev->dev = &pdev->dev; device_initialize(&dev->gadget.dev); dev->gadget.dev.parent = &pdev->dev; the_controller = dev; platform_set_drvdata(pdev, dev); udc_disable(dev); udc_reinit(dev); /* irq setup after old hardware state is cleaned up */ retval = request_irq(IRQ_USBINTR, lh7a40x_udc_irq, IRQF_DISABLED, driver_name, dev); if (retval != 0) { DEBUG(KERN_ERR "%s: can't get irq %i, err %d\n", driver_name, IRQ_USBINTR, retval); return -EBUSY; } create_proc_files(); return retval; } static int lh7a40x_udc_remove(struct platform_device *pdev) { struct lh7a40x_udc *dev = platform_get_drvdata(pdev); DEBUG("%s: %p\n", __func__, pdev); if (dev->driver) return -EBUSY; udc_disable(dev); remove_proc_files(); free_irq(IRQ_USBINTR, dev); platform_set_drvdata(pdev, 0); the_controller = 0; return 0; } /*-------------------------------------------------------------------------*/ static struct platform_driver udc_driver = { .probe = lh7a40x_udc_probe, .remove = lh7a40x_udc_remove, /* FIXME power management support */ /* .suspend = ... disable UDC */ /* .resume = ... re-enable UDC */ .driver = { .name = (char *)driver_name, .owner = THIS_MODULE, }, }; static int __init udc_init(void) { DEBUG("%s: %s version %s\n", __func__, driver_name, DRIVER_VERSION); return platform_driver_register(&udc_driver); } static void __exit udc_exit(void) { platform_driver_unregister(&udc_driver); } module_init(udc_init); module_exit(udc_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Mikko Lahteenmaki, Bo Henriksen"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lh7a40x_udc");
gpl-2.0
weizhenwei/mi1_kernel
drivers/tty/vt/vt_ioctl.c
1161
33451
/* * Copyright (C) 1992 obz under the linux copyright * * Dynamic diacritical handling - aeb@cwi.nl - Dec 1993 * Dynamic keymap and string allocation - aeb@cwi.nl - May 1994 * Restrict VT switching via ioctl() - grif@cs.ucr.edu - Dec 1995 * Some code moved for less code duplication - Andi Kleen - Mar 1997 * Check put/get_user, cleanups - acme@conectiva.com.br - Jun 2001 */ #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/kernel.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/kd.h> #include <linux/vt.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/console.h> #include <linux/consolemap.h> #include <linux/signal.h> #include <linux/timex.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/kbd_kern.h> #include <linux/vt_kern.h> #include <linux/kbd_diacr.h> #include <linux/selection.h> char vt_dont_switch; extern struct tty_driver *console_driver; #define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count) #define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons) /* * Console (vt and kd) routines, as defined by USL SVR4 manual, and by * experimentation and study of X386 SYSV handling. * * One point of difference: SYSV vt's are /dev/vtX, which X >= 0, and * /dev/console is a separate ttyp. Under Linux, /dev/tty0 is /dev/console, * and the vc start at /dev/ttyX, X >= 1. We maintain that here, so we will * always treat our set of vt as numbered 1..MAX_NR_CONSOLES (corresponding to * ttys 0..MAX_NR_CONSOLES-1). Explicitly naming VT 0 is illegal, but using * /dev/tty0 (fg_console) as a target is legal, since an implicit aliasing * to the current console is done by the main ioctl code. */ #ifdef CONFIG_X86 #include <linux/syscalls.h> #endif static void complete_change_console(struct vc_data *vc); /* * User space VT_EVENT handlers */ struct vt_event_wait { struct list_head list; struct vt_event event; int done; }; static LIST_HEAD(vt_events); static DEFINE_SPINLOCK(vt_event_lock); static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue); /** * vt_event_post * @event: the event that occurred * @old: old console * @new: new console * * Post an VT event to interested VT handlers */ void vt_event_post(unsigned int event, unsigned int old, unsigned int new) { struct list_head *pos, *head; unsigned long flags; int wake = 0; spin_lock_irqsave(&vt_event_lock, flags); head = &vt_events; list_for_each(pos, head) { struct vt_event_wait *ve = list_entry(pos, struct vt_event_wait, list); if (!(ve->event.event & event)) continue; ve->event.event = event; /* kernel view is consoles 0..n-1, user space view is console 1..n with 0 meaning current, so we must bias */ ve->event.oldev = old + 1; ve->event.newev = new + 1; wake = 1; ve->done = 1; } spin_unlock_irqrestore(&vt_event_lock, flags); if (wake) wake_up_interruptible(&vt_event_waitqueue); } /** * vt_event_wait - wait for an event * @vw: our event * * Waits for an event to occur which completes our vt_event_wait * structure. On return the structure has wv->done set to 1 for success * or 0 if some event such as a signal ended the wait. */ static void vt_event_wait(struct vt_event_wait *vw) { unsigned long flags; /* Prepare the event */ INIT_LIST_HEAD(&vw->list); vw->done = 0; /* Queue our event */ spin_lock_irqsave(&vt_event_lock, flags); list_add(&vw->list, &vt_events); spin_unlock_irqrestore(&vt_event_lock, flags); /* Wait for it to pass */ wait_event_interruptible(vt_event_waitqueue, vw->done); /* Dequeue it */ spin_lock_irqsave(&vt_event_lock, flags); list_del(&vw->list); spin_unlock_irqrestore(&vt_event_lock, flags); } /** * vt_event_wait_ioctl - event ioctl handler * @arg: argument to ioctl * * Implement the VT_WAITEVENT ioctl using the VT event interface */ static int vt_event_wait_ioctl(struct vt_event __user *event) { struct vt_event_wait vw; if (copy_from_user(&vw.event, event, sizeof(struct vt_event))) return -EFAULT; /* Highest supported event for now */ if (vw.event.event & ~VT_MAX_EVENT) return -EINVAL; vt_event_wait(&vw); /* If it occurred report it */ if (vw.done) { if (copy_to_user(event, &vw.event, sizeof(struct vt_event))) return -EFAULT; return 0; } return -EINTR; } /** * vt_waitactive - active console wait * @event: event code * @n: new console * * Helper for event waits. Used to implement the legacy * event waiting ioctls in terms of events */ int vt_waitactive(int n) { struct vt_event_wait vw; do { if (n == fg_console + 1) break; vw.event.event = VT_EVENT_SWITCH; vt_event_wait(&vw); if (vw.done == 0) return -EINTR; } while (vw.event.newev != n); return 0; } /* * these are the valid i/o ports we're allowed to change. they map all the * video ports */ #define GPFIRST 0x3b4 #define GPLAST 0x3df #define GPNUM (GPLAST - GPFIRST + 1) static inline int do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) { struct consolefontdesc cfdarg; int i; if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc))) return -EFAULT; switch (cmd) { case PIO_FONTX: if (!perm) return -EPERM; op->op = KD_FONT_OP_SET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; return con_font_op(vc_cons[fg_console].d, op); case GIO_FONTX: { op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; i = con_font_op(vc_cons[fg_console].d, op); if (i) return i; cfdarg.charheight = op->height; cfdarg.charcount = op->charcount; if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) return -EFAULT; return 0; } } return -EINVAL; } static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_data *vc) { struct unimapdesc tmp; if (copy_from_user(&tmp, user_ud, sizeof tmp)) return -EFAULT; if (tmp.entries) if (!access_ok(VERIFY_WRITE, tmp.entries, tmp.entry_ct*sizeof(struct unipair))) return -EFAULT; switch (cmd) { case PIO_UNIMAP: if (!perm) return -EPERM; return con_set_unimap(vc, tmp.entry_ct, tmp.entries); case GIO_UNIMAP: if (!perm && fg_console != vc->vc_num) return -EPERM; return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp.entries); } return 0; } /* * We handle the console-specific ioctl's here. We allow the * capability to modify any console, not just the fg_console. */ int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ unsigned int console; unsigned char ucval; unsigned int uival; void __user *up = (void __user *)arg; int i, perm; int ret = 0; console = vc->vc_num; if (!vc_cons_allocated(console)) { /* impossible? */ ret = -ENOIOCTLCMD; goto out; } /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ perm = 0; if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) perm = 1; switch (cmd) { case TIOCLINUX: ret = tioclinux(tty, arg); break; case KIOCSOUND: if (!perm) return -EPERM; /* * The use of PIT_TICK_RATE is historic, it used to be * the platform-dependent CLOCK_TICK_RATE between 2.6.12 * and 2.6.36, which was a minor but unfortunate ABI * change. kd_mksound is locked by the input layer. */ if (arg) arg = PIT_TICK_RATE / arg; kd_mksound(arg, 0); break; case KDMKTONE: if (!perm) return -EPERM; { unsigned int ticks, count; /* * Generate the tone for the appropriate number of ticks. * If the time is zero, turn off sound ourselves. */ ticks = HZ * ((arg >> 16) & 0xffff) / 1000; count = ticks ? (arg & 0xffff) : 0; if (count) count = PIT_TICK_RATE / count; kd_mksound(count, ticks); break; } case KDGKBTYPE: /* * this is naïve. */ ucval = KB_101; ret = put_user(ucval, (char __user *)arg); break; /* * These cannot be implemented on any machine that implements * ioperm() in user level (such as Alpha PCs) or not at all. * * XXX: you should never use these, just call ioperm directly.. */ #ifdef CONFIG_X86 case KDADDIO: case KDDELIO: /* * KDADDIO and KDDELIO may be able to add ports beyond what * we reject here, but to be safe... * * These are locked internally via sys_ioperm */ if (arg < GPFIRST || arg > GPLAST) { ret = -EINVAL; break; } ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0; break; case KDENABIO: case KDDISABIO: ret = sys_ioperm(GPFIRST, GPNUM, (cmd == KDENABIO)) ? -ENXIO : 0; break; #endif /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */ case KDKBDREP: { struct kbd_repeat kbrep; if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) { ret = -EFAULT; break; } ret = kbd_rate(&kbrep); if (ret) break; if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat))) ret = -EFAULT; break; } case KDSETMODE: /* * currently, setting the mode from KD_TEXT to KD_GRAPHICS * doesn't do a whole lot. i'm not sure if it should do any * restoration of modes or what... * * XXX It should at least call into the driver, fbdev's definitely * need to restore their engine state. --BenH */ if (!perm) return -EPERM; switch (arg) { case KD_GRAPHICS: break; case KD_TEXT0: case KD_TEXT1: arg = KD_TEXT; case KD_TEXT: break; default: ret = -EINVAL; goto out; } /* FIXME: this needs the console lock extending */ if (vc->vc_mode == (unsigned char) arg) break; vc->vc_mode = (unsigned char) arg; if (console != fg_console) break; /* * explicitly blank/unblank the screen if switching modes */ console_lock(); if (arg == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); console_unlock(); break; case KDGETMODE: uival = vc->vc_mode; goto setint; case KDMAPDISP: case KDUNMAPDISP: /* * these work like a combination of mmap and KDENABIO. * this could be easily finished. */ ret = -EINVAL; break; case KDSKBMODE: if (!perm) return -EPERM; ret = vt_do_kdskbmode(console, arg); if (ret == 0) tty_ldisc_flush(tty); break; case KDGKBMODE: uival = vt_do_kdgkbmode(console); ret = put_user(uival, (int __user *)arg); break; /* this could be folded into KDSKBMODE, but for compatibility reasons it is not so easy to fold KDGKBMETA into KDGKBMODE */ case KDSKBMETA: ret = vt_do_kdskbmeta(console, arg); break; case KDGKBMETA: /* FIXME: should review whether this is worth locking */ uival = vt_do_kdgkbmeta(console); setint: ret = put_user(uival, (int __user *)arg); break; case KDGETKEYCODE: case KDSETKEYCODE: if(!capable(CAP_SYS_TTY_CONFIG)) perm = 0; ret = vt_do_kbkeycode_ioctl(cmd, up, perm); break; case KDGKBENT: case KDSKBENT: ret = vt_do_kdsk_ioctl(cmd, up, perm, console); break; case KDGKBSENT: case KDSKBSENT: ret = vt_do_kdgkb_ioctl(cmd, up, perm); break; /* Diacritical processing. Handled in keyboard.c as it has to operate on the keyboard locks and structures */ case KDGKBDIACR: case KDGKBDIACRUC: case KDSKBDIACR: case KDSKBDIACRUC: ret = vt_do_diacrit(cmd, up, perm); break; /* the ioctls below read/set the flags usually shown in the leds */ /* don't use them - they will go away without warning */ case KDGKBLED: case KDSKBLED: case KDGETLED: case KDSETLED: ret = vt_do_kdskled(console, cmd, arg, perm); break; /* * A process can indicate its willingness to accept signals * generated by pressing an appropriate key combination. * Thus, one can have a daemon that e.g. spawns a new console * upon a keypress and then changes to it. * See also the kbrequest field of inittab(5). */ case KDSIGACCEPT: { if (!perm || !capable(CAP_KILL)) return -EPERM; if (!valid_signal(arg) || arg < 1 || arg == SIGKILL) ret = -EINVAL; else { spin_lock_irq(&vt_spawn_con.lock); put_pid(vt_spawn_con.pid); vt_spawn_con.pid = get_pid(task_pid(current)); vt_spawn_con.sig = arg; spin_unlock_irq(&vt_spawn_con.lock); } break; } case VT_SETMODE: { struct vt_mode tmp; if (!perm) return -EPERM; if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) { ret = -EFAULT; goto out; } if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { ret = -EINVAL; goto out; } console_lock(); vc->vt_mode = tmp; /* the frsig is ignored, so we set it to 0 */ vc->vt_mode.frsig = 0; put_pid(vc->vt_pid); vc->vt_pid = get_pid(task_pid(current)); /* no switch is required -- saw@shade.msu.ru */ vc->vt_newvt = -1; console_unlock(); break; } case VT_GETMODE: { struct vt_mode tmp; int rc; console_lock(); memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode)); console_unlock(); rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); if (rc) ret = -EFAULT; break; } /* * Returns global vt state. Note that VT 0 is always open, since * it's an alias for the current VT, and people can't use it here. * We cannot return state for more than 16 VTs, since v_state is short. */ case VT_GETSTATE: { struct vt_stat __user *vtstat = up; unsigned short state, mask; /* Review: FIXME: Console lock ? */ if (put_user(fg_console + 1, &vtstat->v_active)) ret = -EFAULT; else { state = 1; /* /dev/tty0 is always open */ for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) if (VT_IS_IN_USE(i)) state |= mask; ret = put_user(state, &vtstat->v_state); } break; } /* * Returns the first available (non-opened) console. */ case VT_OPENQRY: /* FIXME: locking ? - but then this is a stupid API */ for (i = 0; i < MAX_NR_CONSOLES; ++i) if (! VT_IS_IN_USE(i)) break; uival = i < MAX_NR_CONSOLES ? (i+1) : -1; goto setint; /* * ioctl(fd, VT_ACTIVATE, num) will cause us to switch to vt # num, * with num >= 1 (switches to vt 0, our console, are not allowed, just * to preserve sanity). */ case VT_ACTIVATE: if (!perm) return -EPERM; if (arg == 0 || arg > MAX_NR_CONSOLES) ret = -ENXIO; else { arg--; console_lock(); ret = vc_allocate(arg); console_unlock(); if (ret) break; set_console(arg); } break; case VT_SETACTIVATE: { struct vt_setactivate vsa; if (!perm) return -EPERM; if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, sizeof(struct vt_setactivate))) { ret = -EFAULT; goto out; } if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { vsa.console--; console_lock(); ret = vc_allocate(vsa.console); if (ret == 0) { struct vc_data *nvc; /* This is safe providing we don't drop the console sem between vc_allocate and finishing referencing nvc */ nvc = vc_cons[vsa.console].d; nvc->vt_mode = vsa.mode; nvc->vt_mode.frsig = 0; put_pid(nvc->vt_pid); nvc->vt_pid = get_pid(task_pid(current)); } console_unlock(); if (ret) break; /* Commence switch and lock */ /* Review set_console locks */ set_console(vsa.console); } break; } /* * wait until the specified VT has been activated */ case VT_WAITACTIVE: if (!perm) return -EPERM; if (arg == 0 || arg > MAX_NR_CONSOLES) ret = -ENXIO; else ret = vt_waitactive(arg); break; /* * If a vt is under process control, the kernel will not switch to it * immediately, but postpone the operation until the process calls this * ioctl, allowing the switch to complete. * * According to the X sources this is the behavior: * 0: pending switch-from not OK * 1: pending switch-from OK * 2: completed switch-to OK */ case VT_RELDISP: if (!perm) return -EPERM; console_lock(); if (vc->vt_mode.mode != VT_PROCESS) { console_unlock(); ret = -EINVAL; break; } /* * Switching-from response */ if (vc->vt_newvt >= 0) { if (arg == 0) /* * Switch disallowed, so forget we were trying * to do it. */ vc->vt_newvt = -1; else { /* * The current vt has been released, so * complete the switch. */ int newvt; newvt = vc->vt_newvt; vc->vt_newvt = -1; ret = vc_allocate(newvt); if (ret) { console_unlock(); break; } /* * When we actually do the console switch, * make sure we are atomic with respect to * other console switches.. */ complete_change_console(vc_cons[newvt].d); } } else { /* * Switched-to response */ /* * If it's just an ACK, ignore it */ if (arg != VT_ACKACQ) ret = -EINVAL; } console_unlock(); break; /* * Disallocate memory associated to VT (but leave VT1) */ case VT_DISALLOCATE: if (arg > MAX_NR_CONSOLES) { ret = -ENXIO; break; } if (arg == 0) { /* deallocate all unused consoles, but leave 0 */ console_lock(); for (i=1; i<MAX_NR_CONSOLES; i++) if (! VT_BUSY(i)) vc_deallocate(i); console_unlock(); } else { /* deallocate a single console, if possible */ arg--; if (VT_BUSY(arg)) ret = -EBUSY; else if (arg) { /* leave 0 */ console_lock(); vc_deallocate(arg); console_unlock(); } } break; case VT_RESIZE: { struct vt_sizes __user *vtsizes = up; struct vc_data *vc; ushort ll,cc; if (!perm) return -EPERM; if (get_user(ll, &vtsizes->v_rows) || get_user(cc, &vtsizes->v_cols)) ret = -EFAULT; else { console_lock(); for (i = 0; i < MAX_NR_CONSOLES; i++) { vc = vc_cons[i].d; if (vc) { vc->vc_resize_user = 1; /* FIXME: review v tty lock */ vc_resize(vc_cons[i].d, cc, ll); } } console_unlock(); } break; } case VT_RESIZEX: { struct vt_consize __user *vtconsize = up; ushort ll,cc,vlin,clin,vcol,ccol; if (!perm) return -EPERM; if (!access_ok(VERIFY_READ, vtconsize, sizeof(struct vt_consize))) { ret = -EFAULT; break; } /* FIXME: Should check the copies properly */ __get_user(ll, &vtconsize->v_rows); __get_user(cc, &vtconsize->v_cols); __get_user(vlin, &vtconsize->v_vlin); __get_user(clin, &vtconsize->v_clin); __get_user(vcol, &vtconsize->v_vcol); __get_user(ccol, &vtconsize->v_ccol); vlin = vlin ? vlin : vc->vc_scan_lines; if (clin) { if (ll) { if (ll != vlin/clin) { /* Parameters don't add up */ ret = -EINVAL; break; } } else ll = vlin/clin; } if (vcol && ccol) { if (cc) { if (cc != vcol/ccol) { ret = -EINVAL; break; } } else cc = vcol/ccol; } if (clin > 32) { ret = -EINVAL; break; } for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons[i].d) continue; console_lock(); if (vlin) vc_cons[i].d->vc_scan_lines = vlin; if (clin) vc_cons[i].d->vc_font.height = clin; vc_cons[i].d->vc_resize_user = 1; vc_resize(vc_cons[i].d, cc, ll); console_unlock(); } break; } case PIO_FONT: { if (!perm) return -EPERM; op.op = KD_FONT_OP_SET; op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ op.width = 8; op.height = 0; op.charcount = 256; op.data = up; ret = con_font_op(vc_cons[fg_console].d, &op); break; } case GIO_FONT: { op.op = KD_FONT_OP_GET; op.flags = KD_FONT_FLAG_OLD; op.width = 8; op.height = 32; op.charcount = 256; op.data = up; ret = con_font_op(vc_cons[fg_console].d, &op); break; } case PIO_CMAP: if (!perm) ret = -EPERM; else ret = con_set_cmap(up); break; case GIO_CMAP: ret = con_get_cmap(up); break; case PIO_FONTX: case GIO_FONTX: ret = do_fontx_ioctl(cmd, up, perm, &op); break; case PIO_FONTRESET: { if (!perm) return -EPERM; #ifdef BROKEN_GRAPHICS_PROGRAMS /* With BROKEN_GRAPHICS_PROGRAMS defined, the default font is not saved. */ ret = -ENOSYS; break; #else { op.op = KD_FONT_OP_SET_DEFAULT; op.data = NULL; ret = con_font_op(vc_cons[fg_console].d, &op); if (ret) break; con_set_default_unimap(vc_cons[fg_console].d); break; } #endif } case KDFONTOP: { if (copy_from_user(&op, up, sizeof(op))) { ret = -EFAULT; break; } if (!perm && op.op != KD_FONT_OP_GET) return -EPERM; ret = con_font_op(vc, &op); if (ret) break; if (copy_to_user(up, &op, sizeof(op))) ret = -EFAULT; break; } case PIO_SCRNMAP: if (!perm) ret = -EPERM; else { tty_lock(); ret = con_set_trans_old(up); tty_unlock(); } break; case GIO_SCRNMAP: tty_lock(); ret = con_get_trans_old(up); tty_unlock(); break; case PIO_UNISCRNMAP: if (!perm) ret = -EPERM; else { tty_lock(); ret = con_set_trans_new(up); tty_unlock(); } break; case GIO_UNISCRNMAP: tty_lock(); ret = con_get_trans_new(up); tty_unlock(); break; case PIO_UNIMAPCLR: { struct unimapinit ui; if (!perm) return -EPERM; ret = copy_from_user(&ui, up, sizeof(struct unimapinit)); if (ret) ret = -EFAULT; else { tty_lock(); con_clear_unimap(vc, &ui); tty_unlock(); } break; } case PIO_UNIMAP: case GIO_UNIMAP: tty_lock(); ret = do_unimap_ioctl(cmd, up, perm, vc); tty_unlock(); break; case VT_LOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; vt_dont_switch = 1; break; case VT_UNLOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; vt_dont_switch = 0; break; case VT_GETHIFONTMASK: ret = put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); break; case VT_WAITEVENT: ret = vt_event_wait_ioctl((struct vt_event __user *)arg); break; default: ret = -ENOIOCTLCMD; } out: return ret; } void reset_vc(struct vc_data *vc) { vc->vc_mode = KD_TEXT; vt_reset_unicode(vc->vc_num); vc->vt_mode.mode = VT_AUTO; vc->vt_mode.waitv = 0; vc->vt_mode.relsig = 0; vc->vt_mode.acqsig = 0; vc->vt_mode.frsig = 0; put_pid(vc->vt_pid); vc->vt_pid = NULL; vc->vt_newvt = -1; if (!in_interrupt()) /* Via keyboard.c:SAK() - akpm */ reset_palette(vc); } void vc_SAK(struct work_struct *work) { struct vc *vc_con = container_of(work, struct vc, SAK_work); struct vc_data *vc; struct tty_struct *tty; console_lock(); vc = vc_con->d; if (vc) { /* FIXME: review tty ref counting */ tty = vc->port.tty; /* * SAK should also work in all raw modes and reset * them properly. */ if (tty) __do_SAK(tty); reset_vc(vc); } console_unlock(); } #ifdef CONFIG_COMPAT struct compat_consolefontdesc { unsigned short charcount; /* characters in font (256 or 512) */ unsigned short charheight; /* scan lines per character (1-32) */ compat_caddr_t chardata; /* font data in expanded form */ }; static inline int compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) { struct compat_consolefontdesc cfdarg; int i; if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) return -EFAULT; switch (cmd) { case PIO_FONTX: if (!perm) return -EPERM; op->op = KD_FONT_OP_SET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); return con_font_op(vc_cons[fg_console].d, op); case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); i = con_font_op(vc_cons[fg_console].d, op); if (i) return i; cfdarg.charheight = op->height; cfdarg.charcount = op->charcount; if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) return -EFAULT; return 0; } return -EINVAL; } struct compat_console_font_op { compat_uint_t op; /* operation code KD_FONT_OP_* */ compat_uint_t flags; /* KD_FONT_FLAG_* */ compat_uint_t width, height; /* font size */ compat_uint_t charcount; compat_caddr_t data; /* font data with height fixed to 32 */ }; static inline int compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop, int perm, struct console_font_op *op, struct vc_data *vc) { int i; if (copy_from_user(op, fontop, sizeof(struct compat_console_font_op))) return -EFAULT; if (!perm && op->op != KD_FONT_OP_GET) return -EPERM; op->data = compat_ptr(((struct compat_console_font_op *)op)->data); i = con_font_op(vc, op); if (i) return i; ((struct compat_console_font_op *)op)->data = (unsigned long)op->data; if (copy_to_user(fontop, op, sizeof(struct compat_console_font_op))) return -EFAULT; return 0; } struct compat_unimapdesc { unsigned short entry_ct; compat_caddr_t entries; }; static inline int compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud, int perm, struct vc_data *vc) { struct compat_unimapdesc tmp; struct unipair __user *tmp_entries; if (copy_from_user(&tmp, user_ud, sizeof tmp)) return -EFAULT; tmp_entries = compat_ptr(tmp.entries); if (tmp_entries) if (!access_ok(VERIFY_WRITE, tmp_entries, tmp.entry_ct*sizeof(struct unipair))) return -EFAULT; switch (cmd) { case PIO_UNIMAP: if (!perm) return -EPERM; return con_set_unimap(vc, tmp.entry_ct, tmp_entries); case GIO_UNIMAP: if (!perm && fg_console != vc->vc_num) return -EPERM; return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp_entries); } return 0; } long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ unsigned int console; void __user *up = (void __user *)arg; int perm; int ret = 0; console = vc->vc_num; if (!vc_cons_allocated(console)) { /* impossible? */ ret = -ENOIOCTLCMD; goto out; } /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ perm = 0; if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) perm = 1; switch (cmd) { /* * these need special handlers for incompatible data structures */ case PIO_FONTX: case GIO_FONTX: ret = compat_fontx_ioctl(cmd, up, perm, &op); break; case KDFONTOP: ret = compat_kdfontop_ioctl(up, perm, &op, vc); break; case PIO_UNIMAP: case GIO_UNIMAP: tty_lock(); ret = compat_unimap_ioctl(cmd, up, perm, vc); tty_unlock(); break; /* * all these treat 'arg' as an integer */ case KIOCSOUND: case KDMKTONE: #ifdef CONFIG_X86 case KDADDIO: case KDDELIO: #endif case KDSETMODE: case KDMAPDISP: case KDUNMAPDISP: case KDSKBMODE: case KDSKBMETA: case KDSKBLED: case KDSETLED: case KDSIGACCEPT: case VT_ACTIVATE: case VT_WAITACTIVE: case VT_RELDISP: case VT_DISALLOCATE: case VT_RESIZE: case VT_RESIZEX: goto fallback; /* * the rest has a compatible data structure behind arg, * but we have to convert it to a proper 64 bit pointer. */ default: arg = (unsigned long)compat_ptr(arg); goto fallback; } out: return ret; fallback: return vt_ioctl(tty, cmd, arg); } #endif /* CONFIG_COMPAT */ /* * Performs the back end of a vt switch. Called under the console * semaphore. */ static void complete_change_console(struct vc_data *vc) { unsigned char old_vc_mode; int old = fg_console; last_console = fg_console; /* * If we're switching, we could be going from KD_GRAPHICS to * KD_TEXT mode or vice versa, which means we need to blank or * unblank the screen later. */ old_vc_mode = vc_cons[fg_console].d->vc_mode; switch_screen(vc); /* * This can't appear below a successful kill_pid(). If it did, * then the *blank_screen operation could occur while X, having * received acqsig, is waking up on another processor. This * condition can lead to overlapping accesses to the VGA range * and the framebuffer (causing system lockups). * * To account for this we duplicate this code below only if the * controlling process is gone and we've called reset_vc. */ if (old_vc_mode != vc->vc_mode) { if (vc->vc_mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); } /* * If this new console is under process control, send it a signal * telling it that it has acquired. Also check if it has died and * clean up (similar to logic employed in change_console()) */ if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else * is awry */ if (kill_pid(vc->vt_pid, vc->vt_mode.acqsig, 1) != 0) { /* * The controlling process has died, so we revert back to * normal operation. In this case, we'll also change back * to KD_TEXT mode. I'm not sure if this is strictly correct * but it saves the agony when the X server dies and the screen * remains blanked due to KD_GRAPHICS! It would be nice to do * this outside of VT_PROCESS but there is no single process * to account for and tracking tty count may be undesirable. */ reset_vc(vc); if (old_vc_mode != vc->vc_mode) { if (vc->vc_mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); } } } /* * Wake anyone waiting for their VT to activate */ vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num); return; } /* * Performs the front-end of a vt switch */ void change_console(struct vc_data *new_vc) { struct vc_data *vc; if (!new_vc || new_vc->vc_num == fg_console || vt_dont_switch) return; /* * If this vt is in process mode, then we need to handshake with * that process before switching. Essentially, we store where that * vt wants to switch to and wait for it to tell us when it's done * (via VT_RELDISP ioctl). * * We also check to see if the controlling process still exists. * If it doesn't, we reset this vt to auto mode and continue. * This is a cheap way to track process control. The worst thing * that can happen is: we send a signal to a process, it dies, and * the switch gets "lost" waiting for a response; hopefully, the * user will try again, we'll detect the process is gone (unless * the user waits just the right amount of time :-) and revert the * vt to auto control. */ vc = vc_cons[fg_console].d; if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else * is awry. * * We need to set vt_newvt *before* sending the signal or we * have a race. */ vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { /* * It worked. Mark the vt to switch to and * return. The process needs to send us a * VT_RELDISP ioctl to complete the switch. */ return; } /* * The controlling process has died, so we revert back to * normal operation. In this case, we'll also change back * to KD_TEXT mode. I'm not sure if this is strictly correct * but it saves the agony when the X server dies and the screen * remains blanked due to KD_GRAPHICS! It would be nice to do * this outside of VT_PROCESS but there is no single process * to account for and tracking tty count may be undesirable. */ reset_vc(vc); /* * Fall through to normal (VT_AUTO) handling of the switch... */ } /* * Ignore all switches in KD_GRAPHICS+VT_AUTO mode */ if (vc->vc_mode == KD_GRAPHICS) return; complete_change_console(new_vc); } /* Perform a kernel triggered VT switch for suspend/resume */ static int disable_vt_switch; int vt_move_to_console(unsigned int vt, int alloc) { int prev; console_lock(); /* Graphics mode - up to X */ if (disable_vt_switch) { console_unlock(); return 0; } prev = fg_console; if (alloc && vc_allocate(vt)) { /* we can't have a free VC for now. Too bad, * we don't want to mess the screen for now. */ console_unlock(); return -ENOSPC; } if (set_console(vt)) { /* * We're unable to switch to the SUSPEND_CONSOLE. * Let the calling function know so it can decide * what to do. */ console_unlock(); return -EIO; } console_unlock(); if (vt_waitactive(vt + 1)) { pr_debug("Suspend: Can't switch VCs."); return -EINTR; } return prev; } /* * Normally during a suspend, we allocate a new console and switch to it. * When we resume, we switch back to the original console. This switch * can be slow, so on systems where the framebuffer can handle restoration * of video registers anyways, there's little point in doing the console * switch. This function allows you to disable it by passing it '0'. */ void pm_set_vt_switch(int do_switch) { console_lock(); disable_vt_switch = !do_switch; console_unlock(); } EXPORT_SYMBOL(pm_set_vt_switch);
gpl-2.0
BigBrother1984/android_kernel_samsung_tuna
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
2185
10493
/* * MPC8610 HPCD board specific routines * * Initial author: Xianghua Xiao <x.xiao@freescale.com> * Recode: Jason Jin <jason.jin@freescale.com> * York Sun <yorksun@freescale.com> * * Rewrite the interrupt routing. remove the 8259PIC support, * All the integrated device in ULI use sideband interrupt. * * Copyright 2008 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <linux/of_platform.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include <sysdev/simple_gpio.h> #include "mpc86xx.h" static struct device_node *pixis_node; static unsigned char *pixis_bdcfg0, *pixis_arch; #ifdef CONFIG_SUSPEND static irqreturn_t mpc8610_sw9_irq(int irq, void *data) { pr_debug("%s: PIXIS' event (sw9/wakeup) IRQ handled\n", __func__); return IRQ_HANDLED; } static void __init mpc8610_suspend_init(void) { int irq; int ret; if (!pixis_node) return; irq = irq_of_parse_and_map(pixis_node, 0); if (!irq) { pr_err("%s: can't map pixis event IRQ.\n", __func__); return; } ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL); if (ret) { pr_err("%s: can't request pixis event IRQ: %d\n", __func__, ret); irq_dispose_mapping(irq); } enable_irq_wake(irq); } #else static inline void mpc8610_suspend_init(void) { } #endif /* CONFIG_SUSPEND */ static struct of_device_id __initdata mpc8610_ids[] = { { .compatible = "fsl,mpc8610-immr", }, { .compatible = "fsl,mpc8610-guts", }, { .compatible = "simple-bus", }, /* So that the DMA channel nodes can be probed individually: */ { .compatible = "fsl,eloplus-dma", }, {} }; static int __init mpc8610_declare_of_platform_devices(void) { /* Firstly, register PIXIS GPIOs. */ simple_gpiochip_init("fsl,fpga-pixis-gpio-bank"); /* Enable wakeup on PIXIS' event IRQ. */ mpc8610_suspend_init(); /* Without this call, the SSI device driver won't get probed. */ of_platform_bus_probe(NULL, mpc8610_ids, NULL); return 0; } machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) /* * DIU Area Descriptor * * The MPC8610 reference manual shows the bits of the AD register in * little-endian order, which causes the BLUE_C field to be split into two * parts. To simplify the definition of the MAKE_AD() macro, we define the * fields in big-endian order and byte-swap the result. * * So even though the registers don't look like they're in the * same bit positions as they are on the P1022, the same value is written to * the AD register on the MPC8610 and on the P1022. */ #define AD_BYTE_F 0x10000000 #define AD_ALPHA_C_MASK 0x0E000000 #define AD_ALPHA_C_SHIFT 25 #define AD_BLUE_C_MASK 0x01800000 #define AD_BLUE_C_SHIFT 23 #define AD_GREEN_C_MASK 0x00600000 #define AD_GREEN_C_SHIFT 21 #define AD_RED_C_MASK 0x00180000 #define AD_RED_C_SHIFT 19 #define AD_PALETTE 0x00040000 #define AD_PIXEL_S_MASK 0x00030000 #define AD_PIXEL_S_SHIFT 16 #define AD_COMP_3_MASK 0x0000F000 #define AD_COMP_3_SHIFT 12 #define AD_COMP_2_MASK 0x00000F00 #define AD_COMP_2_SHIFT 8 #define AD_COMP_1_MASK 0x000000F0 #define AD_COMP_1_SHIFT 4 #define AD_COMP_0_MASK 0x0000000F #define AD_COMP_0_SHIFT 0 #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, int monitor_port) { static const unsigned long pixelformat[][3] = { { MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8), MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0), MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0) }, { MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8), MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0), MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0) }, }; unsigned int arch_monitor; /* The DVI port is mis-wired on revision 1 of this board. */ arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; switch (bits_per_pixel) { case 32: return pixelformat[arch_monitor][0]; case 24: return pixelformat[arch_monitor][1]; case 16: return pixelformat[arch_monitor][2]; default: pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); return 0; } } void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) { int i; if (monitor_port == 2) { /* dual link LVDS */ for (i = 0; i < 256*3; i++) gamma_table_base[i] = (gamma_table_base[i] << 2) | ((gamma_table_base[i] >> 6) & 0x03); } } #define PX_BRDCFG0_DVISEL (1 << 3) #define PX_BRDCFG0_DLINK (1 << 4) #define PX_BRDCFG0_DIU_MASK (PX_BRDCFG0_DVISEL | PX_BRDCFG0_DLINK) void mpc8610hpcd_set_monitor_port(int monitor_port) { static const u8 bdcfg[] = { PX_BRDCFG0_DVISEL | PX_BRDCFG0_DLINK, PX_BRDCFG0_DLINK, 0, }; if (monitor_port < 3) clrsetbits_8(pixis_bdcfg0, PX_BRDCFG0_DIU_MASK, bdcfg[monitor_port]); } void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) { u32 __iomem *clkdvdr; u32 temp; /* variables for pixel clock calcs */ ulong bestval, bestfreq, speed_ccb, minpixclock, maxpixclock; ulong pixval; long err; int i; clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32)); if (!clkdvdr) { printk(KERN_ERR "Err: can't map clock divider register!\n"); return; } /* Pixel Clock configuration */ speed_ccb = fsl_get_sys_freq(); /* Calculate the pixel clock with the smallest error */ /* calculate the following in steps to avoid overflow */ pr_debug("DIU pixclock in ps - %d\n", pixclock); temp = 1000000000/pixclock; temp *= 1000; pixclock = temp; pr_debug("DIU pixclock freq - %u\n", pixclock); temp = pixclock * 5 / 100; pr_debug("deviation = %d\n", temp); minpixclock = pixclock - temp; maxpixclock = pixclock + temp; pr_debug("DIU minpixclock - %lu\n", minpixclock); pr_debug("DIU maxpixclock - %lu\n", maxpixclock); pixval = speed_ccb/pixclock; pr_debug("DIU pixval = %lu\n", pixval); err = 100000000; bestval = pixval; pr_debug("DIU bestval = %lu\n", bestval); bestfreq = 0; for (i = -1; i <= 1; i++) { temp = speed_ccb / ((pixval+i) + 1); pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n", i, pixval, temp); if ((temp < minpixclock) || (temp > maxpixclock)) pr_debug("DIU exceeds monitor range (%lu to %lu)\n", minpixclock, maxpixclock); else if (abs(temp - pixclock) < err) { pr_debug("Entered the else if block %d\n", i); err = abs(temp - pixclock); bestval = pixval+i; bestfreq = temp; } } pr_debug("DIU chose = %lx\n", bestval); pr_debug("DIU error = %ld\n NomPixClk ", err); pr_debug("DIU: Best Freq = %lx\n", bestfreq); /* Modify PXCLK in GUTS CLKDVDR */ pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr)); temp = (*clkdvdr) & 0x2000FFFF; *clkdvdr = temp; /* turn off clock */ *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16); pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr)); iounmap(clkdvdr); } ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf) { return snprintf(buf, PAGE_SIZE, "%c0 - DVI\n" "%c1 - Single link LVDS\n" "%c2 - Dual link LVDS\n", monitor_port == 0 ? '*' : ' ', monitor_port == 1 ? '*' : ' ', monitor_port == 2 ? '*' : ' '); } int mpc8610hpcd_set_sysfs_monitor_port(int val) { return val < 3 ? val : 0; } #endif static void __init mpc86xx_hpcd_setup_arch(void) { struct resource r; struct device_node *np; unsigned char *pixis; if (ppc_md.progress) ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8610-pci") || of_device_is_compatible(np, "fsl,mpc8641-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0xa000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); } } #endif #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format; diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table; diu_ops.set_monitor_port = mpc8610hpcd_set_monitor_port; diu_ops.set_pixel_clock = mpc8610hpcd_set_pixel_clock; diu_ops.show_monitor_port = mpc8610hpcd_show_monitor_port; diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port; #endif pixis_node = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis"); if (pixis_node) { of_address_to_resource(pixis_node, 0, &r); of_node_put(pixis_node); pixis = ioremap(r.start, 32); if (!pixis) { printk(KERN_ERR "Err: can't map FPGA cfg register!\n"); return; } pixis_bdcfg0 = pixis + 8; pixis_arch = pixis + 1; } else printk(KERN_ERR "Err: " "can't find device node 'fsl,fpga-pixis'\n"); printk("MPC86xx HPCD board from Freescale Semiconductor\n"); } /* * Called very early, device-tree isn't unflattened */ static int __init mpc86xx_hpcd_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,MPC8610HPCD")) return 1; /* Looks good */ return 0; } static long __init mpc86xx_time_init(void) { unsigned int temp; /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); asm volatile("isync"); return 0; } define_machine(mpc86xx_hpcd) { .name = "MPC86xx HPCD", .probe = mpc86xx_hpcd_probe, .setup_arch = mpc86xx_hpcd_setup_arch, .init_IRQ = mpc86xx_init_irq, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .pcibios_fixup_bus = fsl_pcibios_fixup_bus, };
gpl-2.0
CoderSherlock/Mondroid
kernelsource/msm/net/l2tp/l2tp_netlink.c
2185
23087
/* * L2TP netlink layer, for management * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * Partly based on the IrDA nelink implementation * (see net/irda/irnetlink.c) which is: * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> * which is in turn partly based on the wireless netlink code: * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <net/sock.h> #include <net/genetlink.h> #include <net/udp.h> #include <linux/in.h> #include <linux/udp.h> #include <linux/socket.h> #include <linux/module.h> #include <linux/list.h> #include <net/net_namespace.h> #include <linux/l2tp.h> #include "l2tp_core.h" static struct genl_family l2tp_nl_family = { .id = GENL_ID_GENERATE, .name = L2TP_GENL_NAME, .version = L2TP_GENL_VERSION, .hdrsize = 0, .maxattr = L2TP_ATTR_MAX, .netnsok = true, }; /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) { u32 tunnel_id; u32 session_id; char *ifname; struct l2tp_tunnel *tunnel; struct l2tp_session *session = NULL; struct net *net = genl_info_net(info); if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); session = l2tp_session_find_by_ifname(net, ifname); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) session = l2tp_session_find(net, tunnel, session_id); } return session; } static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr; int ret = -ENOBUFS; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &l2tp_nl_family, 0, L2TP_CMD_NOOP); if (!hdr) { ret = -EMSGSIZE; goto err_out; } genlmsg_end(msg, hdr); return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) { u32 tunnel_id; u32 peer_tunnel_id; int proto_version; int fd; int ret = 0; struct l2tp_tunnel_cfg cfg = { 0, }; struct l2tp_tunnel *tunnel; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { ret = -EINVAL; goto out; } peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { ret = -EINVAL; goto out; } proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { ret = -EINVAL; goto out; } cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); fd = -1; if (info->attrs[L2TP_ATTR_FD]) { fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); } else { #if IS_ENABLED(CONFIG_IPV6) if (info->attrs[L2TP_ATTR_IP6_SADDR] && info->attrs[L2TP_ATTR_IP6_DADDR]) { cfg.local_ip6 = nla_data( info->attrs[L2TP_ATTR_IP6_SADDR]); cfg.peer_ip6 = nla_data( info->attrs[L2TP_ATTR_IP6_DADDR]); } else #endif if (info->attrs[L2TP_ATTR_IP_SADDR] && info->attrs[L2TP_ATTR_IP_DADDR]) { cfg.local_ip.s_addr = nla_get_be32( info->attrs[L2TP_ATTR_IP_SADDR]); cfg.peer_ip.s_addr = nla_get_be32( info->attrs[L2TP_ATTR_IP_DADDR]); } else { ret = -EINVAL; goto out; } if (info->attrs[L2TP_ATTR_UDP_SPORT]) cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); if (info->attrs[L2TP_ATTR_UDP_DPORT]) cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); if (info->attrs[L2TP_ATTR_UDP_CSUM]) cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); } if (info->attrs[L2TP_ATTR_DEBUG]) cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel != NULL) { ret = -EEXIST; goto out; } ret = -EINVAL; switch (cfg.encap) { case L2TP_ENCAPTYPE_UDP: case L2TP_ENCAPTYPE_IP: ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id, peer_tunnel_id, &cfg, &tunnel); break; } out: return ret; } static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; u32 tunnel_id; int ret = 0; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } (void) l2tp_tunnel_delete(tunnel); out: return ret; } static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; u32 tunnel_id; int ret = 0; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } if (info->attrs[L2TP_ATTR_DEBUG]) tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); out: return ret; } static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = NULL; #endif hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, atomic_long_read(&tunnel->stats.tx_packets)) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, atomic_long_read(&tunnel->stats.tx_bytes)) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, atomic_long_read(&tunnel->stats.tx_errors)) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, atomic_long_read(&tunnel->stats.rx_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, atomic_long_read(&tunnel->stats.rx_bytes)) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, atomic_long_read(&tunnel->stats.rx_seq_discards)) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, atomic_long_read(&tunnel->stats.rx_oos_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, atomic_long_read(&tunnel->stats.rx_errors))) goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) np = inet6_sk(sk); #endif inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT))) goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (np) { if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr), &np->saddr) || nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr), &np->daddr)) goto nla_put_failure; } else #endif if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) goto nla_put_failure; break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; } static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; struct sk_buff *msg; u32 tunnel_id; int ret = -ENOBUFS; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, NLM_F_ACK, tunnel); if (ret < 0) goto err_out; return genlmsg_unicast(net, msg, info->snd_portid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb) { int ti = cb->args[0]; struct l2tp_tunnel *tunnel; struct net *net = sock_net(skb->sk); for (;;) { tunnel = l2tp_tunnel_find_nth(net, ti); if (tunnel == NULL) goto out; if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, tunnel) <= 0) goto out; ti++; } out: cb->args[0] = ti; return skb->len; } static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info) { u32 tunnel_id = 0; u32 session_id; u32 peer_session_id; int ret = 0; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_session_cfg cfg = { 0, }; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (!tunnel) { ret = -ENODEV; goto out; } if (!info->attrs[L2TP_ATTR_SESSION_ID]) { ret = -EINVAL; goto out; } session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); session = l2tp_session_find(net, tunnel, session_id); if (session) { ret = -EEXIST; goto out; } if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { ret = -EINVAL; goto out; } peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); if (!info->attrs[L2TP_ATTR_PW_TYPE]) { ret = -EINVAL; goto out; } cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { ret = -EINVAL; goto out; } if (tunnel->version > 2) { if (info->attrs[L2TP_ATTR_OFFSET]) cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); if (info->attrs[L2TP_ATTR_DATA_SEQ]) cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT; if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); cfg.l2specific_len = 4; if (info->attrs[L2TP_ATTR_L2SPEC_LEN]) cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]); if (info->attrs[L2TP_ATTR_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); if (len > 8) { ret = -EINVAL; goto out; } cfg.cookie_len = len; memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); } if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); if (len > 8) { ret = -EINVAL; goto out; } cfg.peer_cookie_len = len; memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); } if (info->attrs[L2TP_ATTR_IFNAME]) cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); if (info->attrs[L2TP_ATTR_VLAN_ID]) cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); } if (info->attrs[L2TP_ATTR_DEBUG]) cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); if (info->attrs[L2TP_ATTR_RECV_SEQ]) cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); if (info->attrs[L2TP_ATTR_SEND_SEQ]) cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); if (info->attrs[L2TP_ATTR_LNS_MODE]) cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); if (info->attrs[L2TP_ATTR_MTU]) cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); if (info->attrs[L2TP_ATTR_MRU]) cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { ret = -EPROTONOSUPPORT; goto out; } /* Check that pseudowire-specific params are present */ switch (cfg.pw_type) { case L2TP_PWTYPE_NONE: break; case L2TP_PWTYPE_ETH_VLAN: if (!info->attrs[L2TP_ATTR_VLAN_ID]) { ret = -EINVAL; goto out; } break; case L2TP_PWTYPE_ETH: break; case L2TP_PWTYPE_PPP: case L2TP_PWTYPE_PPP_AC: break; case L2TP_PWTYPE_IP: default: ret = -EPROTONOSUPPORT; break; } ret = -EPROTONOSUPPORT; if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, session_id, peer_session_id, &cfg); out: return ret; } static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info) { int ret = 0; struct l2tp_session *session; u16 pw_type; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } pw_type = session->pwtype; if (pw_type < __L2TP_PWTYPE_MAX) if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); out: return ret; } static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info) { int ret = 0; struct l2tp_session *session; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } if (info->attrs[L2TP_ATTR_DEBUG]) session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); if (info->attrs[L2TP_ATTR_DATA_SEQ]) session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); if (info->attrs[L2TP_ATTR_RECV_SEQ]) session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); if (info->attrs[L2TP_ATTR_SEND_SEQ]) session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); if (info->attrs[L2TP_ATTR_LNS_MODE]) session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); if (info->attrs[L2TP_ATTR_MTU]) session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); if (info->attrs[L2TP_ATTR_MRU]) session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); out: return ret; } static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; sk = tunnel->sock; hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) || (session->mru && nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) goto nla_put_failure; if ((session->ifname[0] && nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || (session->cookie_len && nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0])) || (session->peer_cookie_len && nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0])) || nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || #ifdef CONFIG_XFRM (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) && nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || #endif (session->reorder_timeout && nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, atomic_long_read(&session->stats.tx_packets)) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, atomic_long_read(&session->stats.tx_bytes)) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, atomic_long_read(&session->stats.tx_errors)) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, atomic_long_read(&session->stats.rx_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, atomic_long_read(&session->stats.rx_bytes)) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, atomic_long_read(&session->stats.rx_seq_discards)) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, atomic_long_read(&session->stats.rx_oos_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, atomic_long_read(&session->stats.rx_errors))) goto nla_put_failure; nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; } static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) { struct l2tp_session *session; struct sk_buff *msg; int ret; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session); if (ret < 0) goto err_out; return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int ti = cb->args[0]; int si = cb->args[1]; for (;;) { if (tunnel == NULL) { tunnel = l2tp_tunnel_find_nth(net, ti); if (tunnel == NULL) goto out; } session = l2tp_session_find_nth(tunnel, si); if (session == NULL) { ti++; tunnel = NULL; si = 0; continue; } if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, session) <= 0) break; si++; } out: cb->args[0] = ti; cb->args[1] = si; return skb->len; } static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, }, [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, }, [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, }, [L2TP_ATTR_OFFSET] = { .type = NLA_U16, }, [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, }, [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, }, [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, }, [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, }, [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, }, [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, }, [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, }, [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, }, [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, }, [L2TP_ATTR_DEBUG] = { .type = NLA_U32, }, [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, }, [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, }, [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, }, [L2TP_ATTR_FD] = { .type = NLA_U32, }, [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, }, [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, }, [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, }, [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, }, [L2TP_ATTR_MTU] = { .type = NLA_U16, }, [L2TP_ATTR_MRU] = { .type = NLA_U16, }, [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, [L2TP_ATTR_IP6_SADDR] = { .type = NLA_BINARY, .len = sizeof(struct in6_addr), }, [L2TP_ATTR_IP6_DADDR] = { .type = NLA_BINARY, .len = sizeof(struct in6_addr), }, [L2TP_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1, }, [L2TP_ATTR_COOKIE] = { .type = NLA_BINARY, .len = 8, }, [L2TP_ATTR_PEER_COOKIE] = { .type = NLA_BINARY, .len = 8, }, }; static struct genl_ops l2tp_nl_ops[] = { { .cmd = L2TP_CMD_NOOP, .doit = l2tp_nl_cmd_noop, .policy = l2tp_nl_policy, /* can be retrieved by unprivileged users */ }, { .cmd = L2TP_CMD_TUNNEL_CREATE, .doit = l2tp_nl_cmd_tunnel_create, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_DELETE, .doit = l2tp_nl_cmd_tunnel_delete, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_MODIFY, .doit = l2tp_nl_cmd_tunnel_modify, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_GET, .doit = l2tp_nl_cmd_tunnel_get, .dumpit = l2tp_nl_cmd_tunnel_dump, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_CREATE, .doit = l2tp_nl_cmd_session_create, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_DELETE, .doit = l2tp_nl_cmd_session_delete, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_MODIFY, .doit = l2tp_nl_cmd_session_modify, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_GET, .doit = l2tp_nl_cmd_session_get, .dumpit = l2tp_nl_cmd_session_dump, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, }; int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops) { int ret; ret = -EINVAL; if (pw_type >= __L2TP_PWTYPE_MAX) goto err; genl_lock(); ret = -EBUSY; if (l2tp_nl_cmd_ops[pw_type]) goto out; l2tp_nl_cmd_ops[pw_type] = ops; ret = 0; out: genl_unlock(); err: return ret; } EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type) { if (pw_type < __L2TP_PWTYPE_MAX) { genl_lock(); l2tp_nl_cmd_ops[pw_type] = NULL; genl_unlock(); } } EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); static int l2tp_nl_init(void) { int err; pr_info("L2TP netlink interface\n"); err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, ARRAY_SIZE(l2tp_nl_ops)); return err; } static void l2tp_nl_cleanup(void) { genl_unregister_family(&l2tp_nl_family); } module_init(l2tp_nl_init); module_exit(l2tp_nl_cleanup); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP netlink"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); MODULE_ALIAS_GENL_FAMILY("l2tp");
gpl-2.0
zarboz/HTC-Jewel-Kernel-OC
drivers/net/can/flexcan.c
2697
26146
/* * flexcan.c - FLEXCAN CAN controller driver * * Copyright (c) 2005-2006 Varma Electronics Oy * Copyright (c) 2009 Sascha Hauer, Pengutronix * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix * * Based on code originally by Andrey Volkov <avolkov@varma-el.com> * * LICENCE: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/netdevice.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/platform/flexcan.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <mach/clock.h> #define DRV_NAME "flexcan" /* 8 for RX fifo and 2 error handling */ #define FLEXCAN_NAPI_WEIGHT (8 + 2) /* FLEXCAN module configuration register (CANMCR) bits */ #define FLEXCAN_MCR_MDIS BIT(31) #define FLEXCAN_MCR_FRZ BIT(30) #define FLEXCAN_MCR_FEN BIT(29) #define FLEXCAN_MCR_HALT BIT(28) #define FLEXCAN_MCR_NOT_RDY BIT(27) #define FLEXCAN_MCR_WAK_MSK BIT(26) #define FLEXCAN_MCR_SOFTRST BIT(25) #define FLEXCAN_MCR_FRZ_ACK BIT(24) #define FLEXCAN_MCR_SUPV BIT(23) #define FLEXCAN_MCR_SLF_WAK BIT(22) #define FLEXCAN_MCR_WRN_EN BIT(21) #define FLEXCAN_MCR_LPM_ACK BIT(20) #define FLEXCAN_MCR_WAK_SRC BIT(19) #define FLEXCAN_MCR_DOZE BIT(18) #define FLEXCAN_MCR_SRX_DIS BIT(17) #define FLEXCAN_MCR_BCC BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) #define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) #define FLEXCAN_MCR_IDAM_A (0 << 8) #define FLEXCAN_MCR_IDAM_B (1 << 8) #define FLEXCAN_MCR_IDAM_C (2 << 8) #define FLEXCAN_MCR_IDAM_D (3 << 8) /* FLEXCAN control register (CANCTRL) bits */ #define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24) #define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22) #define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19) #define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16) #define FLEXCAN_CTRL_BOFF_MSK BIT(15) #define FLEXCAN_CTRL_ERR_MSK BIT(14) #define FLEXCAN_CTRL_CLK_SRC BIT(13) #define FLEXCAN_CTRL_LPB BIT(12) #define FLEXCAN_CTRL_TWRN_MSK BIT(11) #define FLEXCAN_CTRL_RWRN_MSK BIT(10) #define FLEXCAN_CTRL_SMP BIT(7) #define FLEXCAN_CTRL_BOFF_REC BIT(6) #define FLEXCAN_CTRL_TSYN BIT(5) #define FLEXCAN_CTRL_LBUF BIT(4) #define FLEXCAN_CTRL_LOM BIT(3) #define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07) #define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK) #define FLEXCAN_CTRL_ERR_STATE \ (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \ FLEXCAN_CTRL_BOFF_MSK) #define FLEXCAN_CTRL_ERR_ALL \ (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) /* FLEXCAN error and status register (ESR) bits */ #define FLEXCAN_ESR_TWRN_INT BIT(17) #define FLEXCAN_ESR_RWRN_INT BIT(16) #define FLEXCAN_ESR_BIT1_ERR BIT(15) #define FLEXCAN_ESR_BIT0_ERR BIT(14) #define FLEXCAN_ESR_ACK_ERR BIT(13) #define FLEXCAN_ESR_CRC_ERR BIT(12) #define FLEXCAN_ESR_FRM_ERR BIT(11) #define FLEXCAN_ESR_STF_ERR BIT(10) #define FLEXCAN_ESR_TX_WRN BIT(9) #define FLEXCAN_ESR_RX_WRN BIT(8) #define FLEXCAN_ESR_IDLE BIT(7) #define FLEXCAN_ESR_TXRX BIT(6) #define FLEXCAN_EST_FLT_CONF_SHIFT (4) #define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_BOFF_INT BIT(2) #define FLEXCAN_ESR_ERR_INT BIT(1) #define FLEXCAN_ESR_WAK_INT BIT(0) #define FLEXCAN_ESR_ERR_BUS \ (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \ FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \ FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR) #define FLEXCAN_ESR_ERR_STATE \ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) #define FLEXCAN_ESR_ERR_ALL \ (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) /* FLEXCAN interrupt flag register (IFLAG) bits */ #define FLEXCAN_TX_BUF_ID 8 #define FLEXCAN_IFLAG_BUF(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) #define FLEXCAN_IFLAG_DEFAULT \ (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \ FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID)) /* FLEXCAN message buffers */ #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) /* Structure of the message buffer */ struct flexcan_mb { u32 can_ctrl; u32 can_id; u32 data[2]; }; /* Structure of the hardware registers */ struct flexcan_regs { u32 mcr; /* 0x00 */ u32 ctrl; /* 0x04 */ u32 timer; /* 0x08 */ u32 _reserved1; /* 0x0c */ u32 rxgmask; /* 0x10 */ u32 rx14mask; /* 0x14 */ u32 rx15mask; /* 0x18 */ u32 ecr; /* 0x1c */ u32 esr; /* 0x20 */ u32 imask2; /* 0x24 */ u32 imask1; /* 0x28 */ u32 iflag2; /* 0x2c */ u32 iflag1; /* 0x30 */ u32 _reserved2[19]; struct flexcan_mb cantxfg[64]; }; struct flexcan_priv { struct can_priv can; struct net_device *dev; struct napi_struct napi; void __iomem *base; u32 reg_esr; u32 reg_ctrl_default; struct clk *clk; struct flexcan_platform_data *pdata; }; static struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* * Swtich transceiver on or off */ static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on) { if (priv->pdata && priv->pdata->transceiver_switch) priv->pdata->transceiver_switch(on); } static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, u32 reg_esr) { return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && (reg_esr & FLEXCAN_ESR_ERR_BUS); } static inline void flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; u32 reg; reg = readl(&regs->mcr); reg &= ~FLEXCAN_MCR_MDIS; writel(reg, &regs->mcr); udelay(10); } static inline void flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; u32 reg; reg = readl(&regs->mcr); reg |= FLEXCAN_MCR_MDIS; writel(reg, &regs->mcr); } static int flexcan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; u32 reg = readl(&regs->ecr); bec->txerr = (reg >> 0) & 0xff; bec->rxerr = (reg >> 8) & 0xff; return 0; } static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct flexcan_regs __iomem *regs = priv->base; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16); if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); if (cf->can_id & CAN_EFF_FLAG) { can_id = cf->can_id & CAN_EFF_MASK; ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; } else { can_id = (cf->can_id & CAN_SFF_MASK) << 18; } if (cf->can_id & CAN_RTR_FLAG) ctrl |= FLEXCAN_MB_CNT_RTR; if (cf->can_dlc > 0) { u32 data = be32_to_cpup((__be32 *)&cf->data[0]); writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]); } if (cf->can_dlc > 3) { u32 data = be32_to_cpup((__be32 *)&cf->data[4]); writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]); } writel(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id); writel(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); kfree_skb(skb); /* tx_packets is incremented in flexcan_irq */ stats->tx_bytes += cf->can_dlc; return NETDEV_TX_OK; } static void do_bus_err(struct net_device *dev, struct can_frame *cf, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); int rx_errors = 0, tx_errors = 0; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { dev_dbg(dev->dev.parent, "BIT1_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; tx_errors = 1; } if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { dev_dbg(dev->dev.parent, "BIT0_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; tx_errors = 1; } if (reg_esr & FLEXCAN_ESR_ACK_ERR) { dev_dbg(dev->dev.parent, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; cf->data[3] |= CAN_ERR_PROT_LOC_ACK; tx_errors = 1; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { dev_dbg(dev->dev.parent, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { dev_dbg(dev->dev.parent, "FRM_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_FORM; rx_errors = 1; } if (reg_esr & FLEXCAN_ESR_STF_ERR) { dev_dbg(dev->dev.parent, "STF_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; rx_errors = 1; } priv->can.can_stats.bus_error++; if (rx_errors) dev->stats.rx_errors++; if (tx_errors) dev->stats.tx_errors++; } static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; do_bus_err(dev, cf, reg_esr); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; return 1; } static void do_state(struct net_device *dev, struct can_frame *cf, enum can_state new_state) { struct flexcan_priv *priv = netdev_priv(dev); struct can_berr_counter bec; flexcan_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: /* * from: ERROR_ACTIVE * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF * => : there was a warning int */ if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) { dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } case CAN_STATE_ERROR_WARNING: /* fallthrough */ /* * from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF * => : error passive int */ if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) { dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); priv->can.can_stats.error_passive++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } break; case CAN_STATE_BUS_OFF: dev_err(dev->dev.parent, "BUG! hardware recovered automatically from BUS_OFF\n"); break; default: break; } /* process state changes depending on the new state */ switch (new_state) { case CAN_STATE_ERROR_ACTIVE: dev_dbg(dev->dev.parent, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; break; case CAN_STATE_BUS_OFF: cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(dev); break; default: break; } } static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; enum can_state new_state; int flt; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN | FLEXCAN_ESR_RX_WRN)))) new_state = CAN_STATE_ERROR_ACTIVE; else new_state = CAN_STATE_ERROR_WARNING; } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; else new_state = CAN_STATE_BUS_OFF; /* state hasn't changed */ if (likely(new_state == priv->can.state)) return 0; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; do_state(dev, cf, new_state); priv->can.state = new_state; netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; return 1; } static void flexcan_read_fifo(const struct net_device *dev, struct can_frame *cf) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; struct flexcan_mb __iomem *mb = &regs->cantxfg[0]; u32 reg_ctrl, reg_id; reg_ctrl = readl(&mb->can_ctrl); reg_id = readl(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (reg_id >> 18) & CAN_SFF_MASK; if (reg_ctrl & FLEXCAN_MB_CNT_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0])); *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1])); /* mark as read */ writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); readl(&regs->timer); } static int flexcan_read_frame(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; skb = alloc_can_skb(dev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return 0; } flexcan_read_fifo(dev, cf); netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; return 1; } static int flexcan_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; u32 reg_iflag1, reg_esr; int work_done = 0; /* * The error bits are cleared on read, * use saved value from irq handler. */ reg_esr = readl(&regs->esr) | priv->reg_esr; /* handle state changes */ work_done += flexcan_poll_state(dev, reg_esr); /* handle RX-FIFO */ reg_iflag1 = readl(&regs->iflag1); while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE && work_done < quota) { work_done += flexcan_read_frame(dev); reg_iflag1 = readl(&regs->iflag1); } /* report bus errors */ if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota) work_done += flexcan_poll_bus_err(dev, reg_esr); if (work_done < quota) { napi_complete(napi); /* enable IRQs */ writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1); writel(priv->reg_ctrl_default, &regs->ctrl); } return work_done; } static irqreturn_t flexcan_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct net_device_stats *stats = &dev->stats; struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; u32 reg_iflag1, reg_esr; reg_iflag1 = readl(&regs->iflag1); reg_esr = readl(&regs->esr); writel(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */ /* * schedule NAPI in case of: * - rx IRQ * - state change IRQ * - bus error IRQ and bus error reporting is activated */ if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || (reg_esr & FLEXCAN_ESR_ERR_STATE) || flexcan_has_and_handle_berr(priv, reg_esr)) { /* * The error bits are cleared on read, * save them for later use. */ priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1); writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, &regs->ctrl); napi_schedule(&priv->napi); } /* FIFO overflow */ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1); dev->stats.rx_over_errors++; dev->stats.rx_errors++; } /* transmission complete interrupt */ if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { /* tx_bytes is incremented in flexcan_start_xmit */ stats->tx_packets++; writel((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1); netif_wake_queue(dev); } return IRQ_HANDLED; } static void flexcan_set_bittiming(struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; struct flexcan_regs __iomem *regs = priv->base; u32 reg; reg = readl(&regs->ctrl); reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | FLEXCAN_CTRL_RJW(0x3) | FLEXCAN_CTRL_PSEG1(0x7) | FLEXCAN_CTRL_PSEG2(0x7) | FLEXCAN_CTRL_PROPSEG(0x7) | FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP | FLEXCAN_CTRL_LOM); reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) | FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) | FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) | FLEXCAN_CTRL_RJW(bt->sjw - 1) | FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg |= FLEXCAN_CTRL_LPB; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) reg |= FLEXCAN_CTRL_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) reg |= FLEXCAN_CTRL_SMP; dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg); writel(reg, &regs->ctrl); /* print chip status */ dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, readl(&regs->mcr), readl(&regs->ctrl)); } /* * flexcan_chip_start * * this functions is entered with clocks enabled * */ static int flexcan_chip_start(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; unsigned int i; int err; u32 reg_mcr, reg_ctrl; /* enable module */ flexcan_chip_enable(priv); /* soft reset */ writel(FLEXCAN_MCR_SOFTRST, &regs->mcr); udelay(10); reg_mcr = readl(&regs->mcr); if (reg_mcr & FLEXCAN_MCR_SOFTRST) { dev_err(dev->dev.parent, "Failed to softreset can module (mcr=0x%08x)\n", reg_mcr); err = -ENODEV; goto out; } flexcan_set_bittiming(dev); /* * MCR * * enable freeze * enable fifo * halt now * only supervisor access * enable warning int * choose format C * */ reg_mcr = readl(&regs->mcr); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IDAM_C; dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr); writel(reg_mcr, &regs->mcr); /* * CTRL * * disable timer sync feature * * disable auto busoff recovery * transmit lowest buffer first * * enable tx and rx warning interrupt * enable bus off interrupt * (== FLEXCAN_CTRL_ERR_STATE) * * _note_: we enable the "error interrupt" * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any * warning or bus passive interrupts. */ reg_ctrl = readl(&regs->ctrl); reg_ctrl &= ~FLEXCAN_CTRL_TSYN; reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK; /* save for later use */ priv->reg_ctrl_default = reg_ctrl; dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); writel(reg_ctrl, &regs->ctrl); for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { writel(0, &regs->cantxfg[i].can_ctrl); writel(0, &regs->cantxfg[i].can_id); writel(0, &regs->cantxfg[i].data[0]); writel(0, &regs->cantxfg[i].data[1]); /* put MB into rx queue */ writel(FLEXCAN_MB_CNT_CODE(0x4), &regs->cantxfg[i].can_ctrl); } /* acceptance mask/acceptance code (accept everything) */ writel(0x0, &regs->rxgmask); writel(0x0, &regs->rx14mask); writel(0x0, &regs->rx15mask); flexcan_transceiver_switch(priv, 1); /* synchronize with the can bus */ reg_mcr = readl(&regs->mcr); reg_mcr &= ~FLEXCAN_MCR_HALT; writel(reg_mcr, &regs->mcr); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* enable FIFO interrupts */ writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1); /* print chip status */ dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, readl(&regs->mcr), readl(&regs->ctrl)); return 0; out: flexcan_chip_disable(priv); return err; } /* * flexcan_chip_stop * * this functions is entered with clocks enabled * */ static void flexcan_chip_stop(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; u32 reg; /* Disable all interrupts */ writel(0, &regs->imask1); /* Disable + halt module */ reg = readl(&regs->mcr); reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; writel(reg, &regs->mcr); flexcan_transceiver_switch(priv, 0); priv->can.state = CAN_STATE_STOPPED; return; } static int flexcan_open(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); int err; clk_enable(priv->clk); err = open_candev(dev); if (err) goto out; err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) goto out_close; /* start chip and queuing */ err = flexcan_chip_start(dev); if (err) goto out_close; napi_enable(&priv->napi); netif_start_queue(dev); return 0; out_close: close_candev(dev); out: clk_disable(priv->clk); return err; } static int flexcan_close(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); flexcan_chip_stop(dev); free_irq(dev->irq, dev); clk_disable(priv->clk); close_candev(dev); return 0; } static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) { int err; switch (mode) { case CAN_MODE_START: err = flexcan_chip_start(dev); if (err) return err; netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static const struct net_device_ops flexcan_netdev_ops = { .ndo_open = flexcan_open, .ndo_stop = flexcan_close, .ndo_start_xmit = flexcan_start_xmit, }; static int __devinit register_flexcandev(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; u32 reg, err; clk_enable(priv->clk); /* select "bus clock", chip must be disabled */ flexcan_chip_disable(priv); reg = readl(&regs->ctrl); reg |= FLEXCAN_CTRL_CLK_SRC; writel(reg, &regs->ctrl); flexcan_chip_enable(priv); /* set freeze, halt and activate FIFO, restrict register access */ reg = readl(&regs->mcr); reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; writel(reg, &regs->mcr); /* * Currently we only support newer versions of this core * featuring a RX FIFO. Older cores found on some Coldfire * derivates are not yet supported. */ reg = readl(&regs->mcr); if (!(reg & FLEXCAN_MCR_FEN)) { dev_err(dev->dev.parent, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; goto out; } err = register_candev(dev); out: /* disable core and turn off clocks */ flexcan_chip_disable(priv); clk_disable(priv->clk); return err; } static void __devexit unregister_flexcandev(struct net_device *dev) { unregister_candev(dev); } static int __devinit flexcan_probe(struct platform_device *pdev) { struct net_device *dev; struct flexcan_priv *priv; struct resource *mem; struct clk *clk; void __iomem *base; resource_size_t mem_size; int err, irq; clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "no clock defined\n"); err = PTR_ERR(clk); goto failed_clock; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!mem || irq <= 0) { err = -ENODEV; goto failed_get; } mem_size = resource_size(mem); if (!request_mem_region(mem->start, mem_size, pdev->name)) { err = -EBUSY; goto failed_get; } base = ioremap(mem->start, mem_size); if (!base) { err = -ENOMEM; goto failed_map; } dev = alloc_candev(sizeof(struct flexcan_priv), 0); if (!dev) { err = -ENOMEM; goto failed_alloc; } dev->netdev_ops = &flexcan_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; /* we support local echo in hardware */ priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); priv->can.bittiming_const = &flexcan_bittiming_const; priv->can.do_set_mode = flexcan_set_mode; priv->can.do_get_berr_counter = flexcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING; priv->base = base; priv->dev = dev; priv->clk = clk; priv->pdata = pdev->dev.platform_data; netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_flexcandev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); goto failed_register; } dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->base, dev->irq); return 0; failed_register: free_candev(dev); failed_alloc: iounmap(base); failed_map: release_mem_region(mem->start, mem_size); failed_get: clk_put(clk); failed_clock: return err; } static int __devexit flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct flexcan_priv *priv = netdev_priv(dev); struct resource *mem; unregister_flexcandev(dev); platform_set_drvdata(pdev, NULL); iounmap(priv->base); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); clk_put(priv->clk); free_candev(dev); return 0; } static struct platform_driver flexcan_driver = { .driver.name = DRV_NAME, .probe = flexcan_probe, .remove = __devexit_p(flexcan_remove), }; static int __init flexcan_init(void) { pr_info("%s netdevice driver\n", DRV_NAME); return platform_driver_register(&flexcan_driver); } static void __exit flexcan_exit(void) { platform_driver_unregister(&flexcan_driver); pr_info("%s: driver removed\n", DRV_NAME); } module_init(flexcan_init); module_exit(flexcan_exit); MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, " "Marc Kleine-Budde <kernel@pengutronix.de>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN port driver for flexcan based chip");
gpl-2.0
G5Devs/android_kernel_lge_msm8996
drivers/mtd/maps/dc21285.c
2953
5539
/* * MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip) * * (C) 2000 Nicolas Pitre <nico@fluxnic.net> * * This code is GPL */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <asm/hardware/dec21285.h> #include <asm/mach-types.h> static struct mtd_info *dc21285_mtd; #ifdef CONFIG_ARCH_NETWINDER /* * This is really ugly, but it seams to be the only * realiable way to do it, as the cpld state machine * is unpredictible. So we have a 25us penalty per * write access. */ static void nw_en_write(void) { unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... */ udelay(25); } #else #define nw_en_write() do { } while (0) #endif static map_word dc21285_read8(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint8_t*)(map->virt + ofs); return val; } static map_word dc21285_read16(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint16_t*)(map->virt + ofs); return val; } static map_word dc21285_read32(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint32_t*)(map->virt + ofs); return val; } static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { memcpy(to, (void*)(map->virt + from), len); } static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *CSR_ROMWRITEREG = adr & 3; adr &= ~3; *(uint8_t*)(map->virt + adr) = d.x[0]; } static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *CSR_ROMWRITEREG = adr & 3; adr &= ~3; *(uint16_t*)(map->virt + adr) = d.x[0]; } static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *(uint32_t*)(map->virt + adr) = d.x[0]; } static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len) { while (len > 0) { map_word d; d.x[0] = *((uint32_t*)from); dc21285_write32(map, d, to); from += 4; to += 4; len -= 4; } } static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len) { while (len > 0) { map_word d; d.x[0] = *((uint16_t*)from); dc21285_write16(map, d, to); from += 2; to += 2; len -= 2; } } static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) { map_word d; d.x[0] = *((uint8_t*)from); dc21285_write8(map, d, to); from++; to++; len--; } static struct map_info dc21285_map = { .name = "DC21285 flash", .phys = NO_XIP, .size = 16*1024*1024, .copy_from = dc21285_copy_from, }; /* Partition stuff */ static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int __init init_dc21285(void) { /* Determine bankwidth */ switch (*CSR_SA110_CNTL & (3<<14)) { case SA110_CNTL_ROMWIDTH_8: dc21285_map.bankwidth = 1; dc21285_map.read = dc21285_read8; dc21285_map.write = dc21285_write8; dc21285_map.copy_to = dc21285_copy_to_8; break; case SA110_CNTL_ROMWIDTH_16: dc21285_map.bankwidth = 2; dc21285_map.read = dc21285_read16; dc21285_map.write = dc21285_write16; dc21285_map.copy_to = dc21285_copy_to_16; break; case SA110_CNTL_ROMWIDTH_32: dc21285_map.bankwidth = 4; dc21285_map.read = dc21285_read32; dc21285_map.write = dc21285_write32; dc21285_map.copy_to = dc21285_copy_to_32; break; default: printk (KERN_ERR "DC21285 flash: undefined bankwidth\n"); return -ENXIO; } printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n", dc21285_map.bankwidth*8); /* Let's map the flash area */ dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024); if (!dc21285_map.virt) { printk("Failed to ioremap\n"); return -EIO; } if (machine_is_ebsa285()) { dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map); } else { dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map); } if (!dc21285_mtd) { iounmap(dc21285_map.virt); return -ENXIO; } dc21285_mtd->owner = THIS_MODULE; mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0); if(machine_is_ebsa285()) { /* * Flash timing is determined with bits 19-16 of the * CSR_SA110_CNTL. The value is the number of wait cycles, or * 0 for 16 cycles (the default). Cycles are 20 ns. * Here we use 7 for 140 ns flash chips. */ /* access time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16)); /* burst time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20)); /* tristate time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24)); } return 0; } static void __exit cleanup_dc21285(void) { mtd_device_unregister(dc21285_mtd); map_destroy(dc21285_mtd); iounmap(dc21285_map.virt); } module_init(init_dc21285); module_exit(cleanup_dc21285); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>"); MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
gpl-2.0
hausdorff/linux
arch/arm/mach-sa1100/pleb.c
3209
2944
/* * linux/arch/arm/mach-sa1100/pleb.c */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <mach/irqs.h> #include "generic.h" /* * Ethernet IRQ mappings */ #define PLEB_ETH0_P (0x20000300) /* Ethernet 0 in PCMCIA0 IO */ #define PLEB_ETH0_V (0xf6000300) #define GPIO_ETH0_IRQ GPIO_GPIO(21) #define GPIO_ETH0_EN GPIO_GPIO(26) #define IRQ_GPIO_ETH0_IRQ IRQ_GPIO21 static struct resource smc91x_resources[] = { [0] = DEFINE_RES_MEM(PLEB_ETH0_P, 0x04000000), #if 0 /* Autoprobe instead, to get rising/falling edge characteristic right */ [1] = DEFINE_RES_IRQ(IRQ_GPIO_ETH0_IRQ), #endif }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct platform_device *devices[] __initdata = { &smc91x_device, }; /* * Pleb's memory map * has flash memory (typically 4 or 8 meg) selected by * the two SA1100 lowest chip select outputs. */ static struct resource pleb_flash_resources[] = { [0] = DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_8M), [1] = DEFINE_RES_MEM(SA1100_CS1_PHYS, SZ_8M), }; static struct mtd_partition pleb_partitions[] = { { .name = "blob", .offset = 0, .size = 0x00020000, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = 0x000e0000, }, { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = 0x00300000, } }; static struct flash_platform_data pleb_flash_data = { .map_name = "cfi_probe", .parts = pleb_partitions, .nr_parts = ARRAY_SIZE(pleb_partitions), }; static void __init pleb_init(void) { sa11x0_register_mtd(&pleb_flash_data, pleb_flash_resources, ARRAY_SIZE(pleb_flash_resources)); platform_add_devices(devices, ARRAY_SIZE(devices)); } static void __init pleb_map_io(void) { sa1100_map_io(); sa1100_register_uart(0, 3); sa1100_register_uart(1, 1); GAFR |= (GPIO_UART_TXD | GPIO_UART_RXD); GPDR |= GPIO_UART_TXD; GPDR &= ~GPIO_UART_RXD; PPAR |= PPAR_UPR; /* * Fix expansion memory timing for network card */ MECR = ((2<<10) | (2<<5) | (2<<0)); /* * Enable the SMC ethernet controller */ GPDR |= GPIO_ETH0_EN; /* set to output */ GPCR = GPIO_ETH0_EN; /* clear MCLK (enable smc) */ GPDR &= ~GPIO_ETH0_IRQ; irq_set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING); } MACHINE_START(PLEB, "PLEB") .map_io = pleb_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .init_time = sa1100_timer_init, .init_machine = pleb_init, .init_late = sa11x0_init_late, .restart = sa11x0_restart, MACHINE_END
gpl-2.0
nel82/android_zenfone4_kernel
drivers/gpu/drm/i2c/ch7006_mode.c
4233
15869
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "ch7006_priv.h" char *ch7006_tv_norm_names[] = { [TV_NORM_PAL] = "PAL", [TV_NORM_PAL_M] = "PAL-M", [TV_NORM_PAL_N] = "PAL-N", [TV_NORM_PAL_NC] = "PAL-Nc", [TV_NORM_PAL_60] = "PAL-60", [TV_NORM_NTSC_M] = "NTSC-M", [TV_NORM_NTSC_J] = "NTSC-J", }; #define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \ .vdisplay = 480, \ .vtotal = 525, \ .hvirtual = 660 #define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \ .vdisplay = 576, \ .vtotal = 625, \ .hvirtual = 810 struct ch7006_tv_norm_info ch7006_tv_norms[] = { [TV_NORM_NTSC_M] = { NTSC_LIKE_TIMINGS, .black_level = 0.339 * fixed1, .subc_freq = 3579545 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC), .voffset = 0, }, [TV_NORM_NTSC_J] = { NTSC_LIKE_TIMINGS, .black_level = 0.286 * fixed1, .subc_freq = 3579545 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J), .voffset = 0, }, [TV_NORM_PAL] = { PAL_LIKE_TIMINGS, .black_level = 0.3 * fixed1, .subc_freq = 4433618.75 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), .voffset = 0, }, [TV_NORM_PAL_M] = { NTSC_LIKE_TIMINGS, .black_level = 0.339 * fixed1, .subc_freq = 3575611.433 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M), .voffset = 16, }, /* The following modes seem to work right but they're * undocumented */ [TV_NORM_PAL_N] = { PAL_LIKE_TIMINGS, .black_level = 0.339 * fixed1, .subc_freq = 4433618.75 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), .voffset = 0, }, [TV_NORM_PAL_NC] = { PAL_LIKE_TIMINGS, .black_level = 0.3 * fixed1, .subc_freq = 3582056.25 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), .voffset = 0, }, [TV_NORM_PAL_60] = { NTSC_LIKE_TIMINGS, .black_level = 0.3 * fixed1, .subc_freq = 4433618.75 * fixed1, .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M), .voffset = 16, }, }; #define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \ .mode = { \ .name = #hd "x" #vd, \ .status = 0, \ .type = DRM_MODE_TYPE_DRIVER, \ .clock = f, \ .hdisplay = hd, \ .hsync_start = e_hd + 16, \ .hsync_end = e_hd + 80, \ .htotal = ht, \ .hskew = 0, \ .vdisplay = vd, \ .vsync_start = vd + 10, \ .vsync_end = vd + 26, \ .vtotal = vt, \ .vscan = 0, \ .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \ DRM_MODE_FLAG_##vsynp##VSYNC, \ .vrefresh = 0, \ }, \ .enc_hdisp = e_hd, \ .enc_vdisp = e_vd, \ .subc_coeff = subc * fixed1, \ .dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \ bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \ .valid_scales = scale_mask, \ .valid_norms = norm_mask \ } #define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ subc, scale, scale_mask, norm_mask) \ __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \ scale_mask, norm_mask, hd, vd) #define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \ 1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60) #define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC) struct ch7006_mode ch7006_modes[] = { MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE), MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE), MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE), MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE), MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE), MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE), MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE), MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE), MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE), MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE), MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE), MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE), MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE), MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE), MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE), MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE), MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE), MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE), MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE), __MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600), MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE), MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE), MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE), MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE), MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE), {} }; struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, const struct drm_display_mode *drm_mode) { struct ch7006_priv *priv = to_ch7006_priv(encoder); struct ch7006_mode *mode; for (mode = ch7006_modes; mode->mode.clock; mode++) { if (~mode->valid_norms & 1<<priv->norm) continue; if (mode->mode.hdisplay != drm_mode->hdisplay || mode->mode.vdisplay != drm_mode->vdisplay || mode->mode.vtotal != drm_mode->vtotal || mode->mode.htotal != drm_mode->htotal || mode->mode.clock != drm_mode->clock) continue; return mode; } return NULL; } /* Some common HW state calculation code */ void ch7006_setup_levels(struct drm_encoder *encoder) { struct i2c_client *client = drm_i2c_encoder_get_client(encoder); struct ch7006_priv *priv = to_ch7006_priv(encoder); uint8_t *regs = priv->state.regs; struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; int gain; int black_level; /* Set DAC_GAIN if the voltage drop between white and black is * high enough. */ if (norm->black_level < 339*fixed1/1000) { gain = 76; regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN; } else { gain = 71; regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN; } black_level = round_fixed(norm->black_level*26625)/gain; /* Correct it with the specified brightness. */ black_level = interpolate(90, black_level, 208, priv->brightness); regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level); ch7006_dbg(client, "black level: %d\n", black_level); } void ch7006_setup_subcarrier(struct drm_encoder *encoder) { struct i2c_client *client = drm_i2c_encoder_get_client(encoder); struct ch7006_priv *priv = to_ch7006_priv(encoder); struct ch7006_state *state = &priv->state; struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; struct ch7006_mode *mode = priv->mode; uint32_t subc_inc; subc_inc = round_fixed((mode->subc_coeff >> 8) * (norm->subc_freq >> 24)); setbitf(state, CH7006_SUBC_INC0, 28, subc_inc); setbitf(state, CH7006_SUBC_INC1, 24, subc_inc); setbitf(state, CH7006_SUBC_INC2, 20, subc_inc); setbitf(state, CH7006_SUBC_INC3, 16, subc_inc); setbitf(state, CH7006_SUBC_INC4, 12, subc_inc); setbitf(state, CH7006_SUBC_INC5, 8, subc_inc); setbitf(state, CH7006_SUBC_INC6, 4, subc_inc); setbitf(state, CH7006_SUBC_INC7, 0, subc_inc); ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc); } void ch7006_setup_pll(struct drm_encoder *encoder) { struct i2c_client *client = drm_i2c_encoder_get_client(encoder); struct ch7006_priv *priv = to_ch7006_priv(encoder); uint8_t *regs = priv->state.regs; struct ch7006_mode *mode = priv->mode; int n, best_n = 0; int m, best_m = 0; int freq, best_freq = 0; for (n = 0; n < CH7006_MAXN; n++) { for (m = 0; m < CH7006_MAXM; m++) { freq = CH7006_FREQ0*(n+2)/(m+2); if (abs(freq - mode->mode.clock) < abs(best_freq - mode->mode.clock)) { best_freq = freq; best_n = n; best_m = m; } } } regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) | bitf(CH7006_PLLOV_M_8, best_m); regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m); regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n); if (best_n < 108) regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR; else regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR; ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n", best_n, best_m, best_freq, best_n < 108); } void ch7006_setup_power_state(struct drm_encoder *encoder) { struct ch7006_priv *priv = to_ch7006_priv(encoder); uint8_t *power = &priv->state.regs[CH7006_POWER]; int subconnector; subconnector = priv->select_subconnector ? priv->select_subconnector : priv->subconnector; *power = CH7006_POWER_RESET; if (priv->last_dpms == DRM_MODE_DPMS_ON) { switch (subconnector) { case DRM_MODE_SUBCONNECTOR_SVIDEO: *power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF); break; case DRM_MODE_SUBCONNECTOR_Composite: *power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF); break; case DRM_MODE_SUBCONNECTOR_SCART: *power |= bitfs(CH7006_POWER_LEVEL, NORMAL) | CH7006_POWER_SCART; break; } } else { if (priv->chip_version >= 0x20) *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF); else *power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF); } } void ch7006_setup_properties(struct drm_encoder *encoder) { struct i2c_client *client = drm_i2c_encoder_get_client(encoder); struct ch7006_priv *priv = to_ch7006_priv(encoder); struct ch7006_state *state = &priv->state; struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; struct ch7006_mode *ch_mode = priv->mode; struct drm_display_mode *mode = &ch_mode->mode; uint8_t *regs = state->regs; int flicker, contrast, hpos, vpos; uint64_t scale, aspect; flicker = interpolate(0, 2, 3, priv->flicker); regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) | bitf(CH7006_FFILTER_LUMA, flicker) | bitf(CH7006_FFILTER_CHROMA, 1); contrast = interpolate(0, 5, 7, priv->contrast); regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast); scale = norm->vtotal*fixed1; do_div(scale, mode->vtotal); aspect = ch_mode->enc_hdisp*fixed1; do_div(aspect, ch_mode->enc_vdisp); hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale) * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4; setbitf(state, CH7006_POV, HPOS_8, hpos); setbitf(state, CH7006_HPOS, 0, hpos); vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale) + norm->voffset) * priv->vmargin / 100 / 2; setbitf(state, CH7006_POV, VPOS_8, vpos); setbitf(state, CH7006_VPOS, 0, vpos); ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos); } /* HW access functions */ void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val) { uint8_t buf[] = {addr, val}; int ret; ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); if (ret < 0) ch7006_err(client, "Error %d writing to subaddress 0x%x\n", ret, addr); } uint8_t ch7006_read(struct i2c_client *client, uint8_t addr) { uint8_t val; int ret; ret = i2c_master_send(client, &addr, sizeof(addr)); if (ret < 0) goto fail; ret = i2c_master_recv(client, &val, sizeof(val)); if (ret < 0) goto fail; return val; fail: ch7006_err(client, "Error %d reading from subaddress 0x%x\n", ret, addr); return 0; } void ch7006_state_load(struct i2c_client *client, struct ch7006_state *state) { ch7006_load_reg(client, state, CH7006_POWER); ch7006_load_reg(client, state, CH7006_DISPMODE); ch7006_load_reg(client, state, CH7006_FFILTER); ch7006_load_reg(client, state, CH7006_BWIDTH); ch7006_load_reg(client, state, CH7006_INPUT_FORMAT); ch7006_load_reg(client, state, CH7006_CLKMODE); ch7006_load_reg(client, state, CH7006_START_ACTIVE); ch7006_load_reg(client, state, CH7006_POV); ch7006_load_reg(client, state, CH7006_BLACK_LEVEL); ch7006_load_reg(client, state, CH7006_HPOS); ch7006_load_reg(client, state, CH7006_VPOS); ch7006_load_reg(client, state, CH7006_INPUT_SYNC); ch7006_load_reg(client, state, CH7006_DETECT); ch7006_load_reg(client, state, CH7006_CONTRAST); ch7006_load_reg(client, state, CH7006_PLLOV); ch7006_load_reg(client, state, CH7006_PLLM); ch7006_load_reg(client, state, CH7006_PLLN); ch7006_load_reg(client, state, CH7006_BCLKOUT); ch7006_load_reg(client, state, CH7006_SUBC_INC0); ch7006_load_reg(client, state, CH7006_SUBC_INC1); ch7006_load_reg(client, state, CH7006_SUBC_INC2); ch7006_load_reg(client, state, CH7006_SUBC_INC3); ch7006_load_reg(client, state, CH7006_SUBC_INC4); ch7006_load_reg(client, state, CH7006_SUBC_INC5); ch7006_load_reg(client, state, CH7006_SUBC_INC6); ch7006_load_reg(client, state, CH7006_SUBC_INC7); ch7006_load_reg(client, state, CH7006_PLL_CONTROL); ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0); } void ch7006_state_save(struct i2c_client *client, struct ch7006_state *state) { ch7006_save_reg(client, state, CH7006_POWER); ch7006_save_reg(client, state, CH7006_DISPMODE); ch7006_save_reg(client, state, CH7006_FFILTER); ch7006_save_reg(client, state, CH7006_BWIDTH); ch7006_save_reg(client, state, CH7006_INPUT_FORMAT); ch7006_save_reg(client, state, CH7006_CLKMODE); ch7006_save_reg(client, state, CH7006_START_ACTIVE); ch7006_save_reg(client, state, CH7006_POV); ch7006_save_reg(client, state, CH7006_BLACK_LEVEL); ch7006_save_reg(client, state, CH7006_HPOS); ch7006_save_reg(client, state, CH7006_VPOS); ch7006_save_reg(client, state, CH7006_INPUT_SYNC); ch7006_save_reg(client, state, CH7006_DETECT); ch7006_save_reg(client, state, CH7006_CONTRAST); ch7006_save_reg(client, state, CH7006_PLLOV); ch7006_save_reg(client, state, CH7006_PLLM); ch7006_save_reg(client, state, CH7006_PLLN); ch7006_save_reg(client, state, CH7006_BCLKOUT); ch7006_save_reg(client, state, CH7006_SUBC_INC0); ch7006_save_reg(client, state, CH7006_SUBC_INC1); ch7006_save_reg(client, state, CH7006_SUBC_INC2); ch7006_save_reg(client, state, CH7006_SUBC_INC3); ch7006_save_reg(client, state, CH7006_SUBC_INC4); ch7006_save_reg(client, state, CH7006_SUBC_INC5); ch7006_save_reg(client, state, CH7006_SUBC_INC6); ch7006_save_reg(client, state, CH7006_SUBC_INC7); ch7006_save_reg(client, state, CH7006_PLL_CONTROL); ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0); state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) | (state->regs[CH7006_FFILTER] & 0x0c) >> 2 | (state->regs[CH7006_FFILTER] & 0x03) << 2; }
gpl-2.0
lyapota/m8_gpe_lollipop
net/irda/ircomm/ircomm_tty.c
4489
39081
/********************************************************************* * * Filename: ircomm_tty.c * Version: 1.0 * Description: IrCOMM serial TTY driver * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Jun 6 21:00:56 1999 * Modified at: Wed Feb 23 00:09:02 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: serial.c and previous IrCOMM work by Takahide Higuchi * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/interrupt.h> #include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/ircomm_core.h> #include <net/irda/ircomm_param.h> #include <net/irda/ircomm_tty_attach.h> #include <net/irda/ircomm_tty.h> static int ircomm_tty_open(struct tty_struct *tty, struct file *filp); static void ircomm_tty_close(struct tty_struct * tty, struct file *filp); static int ircomm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count); static int ircomm_tty_write_room(struct tty_struct *tty); static void ircomm_tty_throttle(struct tty_struct *tty); static void ircomm_tty_unthrottle(struct tty_struct *tty); static int ircomm_tty_chars_in_buffer(struct tty_struct *tty); static void ircomm_tty_flush_buffer(struct tty_struct *tty); static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); static void ircomm_tty_hangup(struct tty_struct *tty); static void ircomm_tty_do_softint(struct work_struct *work); static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); static void ircomm_tty_stop(struct tty_struct *tty); static int ircomm_tty_data_indication(void *instance, void *sap, struct sk_buff *skb); static int ircomm_tty_control_indication(void *instance, void *sap, struct sk_buff *skb); static void ircomm_tty_flow_indication(void *instance, void *sap, LOCAL_FLOW cmd); #ifdef CONFIG_PROC_FS static const struct file_operations ircomm_tty_proc_fops; #endif /* CONFIG_PROC_FS */ static struct tty_driver *driver; static hashbin_t *ircomm_tty = NULL; static const struct tty_operations ops = { .open = ircomm_tty_open, .close = ircomm_tty_close, .write = ircomm_tty_write, .write_room = ircomm_tty_write_room, .chars_in_buffer = ircomm_tty_chars_in_buffer, .flush_buffer = ircomm_tty_flush_buffer, .ioctl = ircomm_tty_ioctl, /* ircomm_tty_ioctl.c */ .tiocmget = ircomm_tty_tiocmget, /* ircomm_tty_ioctl.c */ .tiocmset = ircomm_tty_tiocmset, /* ircomm_tty_ioctl.c */ .throttle = ircomm_tty_throttle, .unthrottle = ircomm_tty_unthrottle, .send_xchar = ircomm_tty_send_xchar, .set_termios = ircomm_tty_set_termios, .stop = ircomm_tty_stop, .start = ircomm_tty_start, .hangup = ircomm_tty_hangup, .wait_until_sent = ircomm_tty_wait_until_sent, #ifdef CONFIG_PROC_FS .proc_fops = &ircomm_tty_proc_fops, #endif /* CONFIG_PROC_FS */ }; /* * Function ircomm_tty_init() * * Init IrCOMM TTY layer/driver * */ static int __init ircomm_tty_init(void) { driver = alloc_tty_driver(IRCOMM_TTY_PORTS); if (!driver) return -ENOMEM; ircomm_tty = hashbin_new(HB_LOCK); if (ircomm_tty == NULL) { IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); put_tty_driver(driver); return -ENOMEM; } driver->driver_name = "ircomm"; driver->name = "ircomm"; driver->major = IRCOMM_TTY_MAJOR; driver->minor_start = IRCOMM_TTY_MINOR; driver->type = TTY_DRIVER_TYPE_SERIAL; driver->subtype = SERIAL_TYPE_NORMAL; driver->init_termios = tty_std_termios; driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &ops); if (tty_register_driver(driver)) { IRDA_ERROR("%s(): Couldn't register serial driver\n", __func__); put_tty_driver(driver); return -1; } return 0; } static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) { IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); ircomm_tty_shutdown(self); self->magic = 0; kfree(self); } /* * Function ircomm_tty_cleanup () * * Remove IrCOMM TTY layer/driver * */ static void __exit ircomm_tty_cleanup(void) { int ret; IRDA_DEBUG(4, "%s()\n", __func__ ); ret = tty_unregister_driver(driver); if (ret) { IRDA_ERROR("%s(), failed to unregister driver\n", __func__); return; } hashbin_delete(ircomm_tty, (FREE_FUNC) __ircomm_tty_cleanup); put_tty_driver(driver); } /* * Function ircomm_startup (self) * * * */ static int ircomm_tty_startup(struct ircomm_tty_cb *self) { notify_t notify; int ret = -ENODEV; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if already open */ if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); return 0; } /* Register with IrCOMM */ irda_notify_init(&notify); /* These callbacks we must handle ourselves */ notify.data_indication = ircomm_tty_data_indication; notify.udata_indication = ircomm_tty_control_indication; notify.flow_indication = ircomm_tty_flow_indication; /* Use the ircomm_tty interface for these ones */ notify.disconnect_indication = ircomm_tty_disconnect_indication; notify.connect_confirm = ircomm_tty_connect_confirm; notify.connect_indication = ircomm_tty_connect_indication; strlcpy(notify.name, "ircomm_tty", sizeof(notify.name)); notify.instance = self; if (!self->ircomm) { self->ircomm = ircomm_open(&notify, self->service_type, self->line); } if (!self->ircomm) goto err; self->slsap_sel = self->ircomm->slsap_sel; /* Connect IrCOMM link with remote device */ ret = ircomm_tty_attach_cable(self); if (ret < 0) { IRDA_ERROR("%s(), error attaching cable!\n", __func__); goto err; } return 0; err: clear_bit(ASYNC_B_INITIALIZED, &self->flags); return ret; } /* * Function ircomm_block_til_ready (self, filp) * * * */ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, struct file *filp) { DECLARE_WAITQUEUE(wait, current); int retval; int do_clocal = 0, extra_count = 0; unsigned long flags; struct tty_struct *tty; IRDA_DEBUG(2, "%s()\n", __func__ ); tty = self->tty; /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ /* nonblock mode is set or port is not enabled */ self->flags |= ASYNC_NORMAL_ACTIVE; IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); return 0; } if (tty->termios->c_cflag & CLOCAL) { IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); do_clocal = 1; } /* Wait for carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, self->open_count is dropped by one, so that * mgsl_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&self->open_wait, &wait); IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count ); /* As far as I can see, we protect open_count - Jean II */ spin_lock_irqsave(&self->spinlock, flags); if (!tty_hung_up_p(filp)) { extra_count = 1; self->open_count--; } spin_unlock_irqrestore(&self->spinlock, flags); self->blocked_open++; while (1) { if (tty->termios->c_cflag & CBAUD) { /* Here, we use to lock those two guys, but * as ircomm_param_request() does it itself, * I don't see the point (and I see the deadlock). * Jean II */ self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR; ircomm_param_request(self, IRCOMM_DTE, TRUE); } current->state = TASK_INTERRUPTIBLE; if (tty_hung_up_p(filp) || !test_bit(ASYNC_B_INITIALIZED, &self->flags)) { retval = (self->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; break; } /* * Check if link is ready now. Even if CLOCAL is * specified, we cannot return before the IrCOMM link is * ready */ if (!test_bit(ASYNC_B_CLOSING, &self->flags) && (do_clocal || (self->settings.dce & IRCOMM_CD)) && self->state == IRCOMM_TTY_READY) { break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count ); schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&self->open_wait, &wait); if (extra_count) { /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->open_count++; spin_unlock_irqrestore(&self->spinlock, flags); } self->blocked_open--; IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count); if (!retval) self->flags |= ASYNC_NORMAL_ACTIVE; return retval; } /* * Function ircomm_tty_open (tty, filp) * * This routine is called when a particular tty device is opened. This * routine is mandatory; if this routine is not filled in, the attempted * open will fail with ENODEV. */ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) { struct ircomm_tty_cb *self; unsigned int line = tty->index; unsigned long flags; int ret; IRDA_DEBUG(2, "%s()\n", __func__ ); /* Check if instance already exists */ self = hashbin_lock_find(ircomm_tty, line, NULL); if (!self) { /* No, so make new instance */ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); if (self == NULL) { IRDA_ERROR("%s(), kmalloc failed!\n", __func__); return -ENOMEM; } self->magic = IRCOMM_TTY_MAGIC; self->flow = FLOW_STOP; self->line = line; INIT_WORK(&self->tqueue, ircomm_tty_do_softint); self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; self->close_delay = 5*HZ/10; self->closing_wait = 30*HZ; /* Init some important stuff */ init_timer(&self->watchdog_timer); init_waitqueue_head(&self->open_wait); init_waitqueue_head(&self->close_wait); spin_lock_init(&self->spinlock); /* * Force TTY into raw mode by default which is usually what * we want for IrCOMM and IrLPT. This way applications will * not have to twiddle with printcap etc. * * Note this is completely usafe and doesn't work properly */ tty->termios->c_iflag = 0; tty->termios->c_oflag = 0; /* Insert into hash */ hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); } /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->open_count++; tty->driver_data = self; self->tty = tty; spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, self->line, self->open_count); /* Not really used by us, but lets do it anyway */ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || test_bit(ASYNC_B_CLOSING, &self->flags)) { /* Hm, why are we blocking on ASYNC_CLOSING if we * do return -EAGAIN/-ERESTARTSYS below anyway? * IMHO it's either not needed in the first place * or for some reason we need to make sure the async * closing has been finished - if so, wouldn't we * probably better sleep uninterruptible? */ if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", __func__); return -ERESTARTSYS; } #ifdef SERIAL_DO_RESTART return (self->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; #else return -EAGAIN; #endif } /* Check if this is a "normal" ircomm device, or an irlpt device */ if (line < 0x10) { self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE; self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ ); } else { IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ ); self->service_type = IRCOMM_3_WIRE_RAW; self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ } ret = ircomm_tty_startup(self); if (ret) return ret; ret = ircomm_tty_block_til_ready(self, filp); if (ret) { IRDA_DEBUG(2, "%s(), returning after block_til_ready with %d\n", __func__ , ret); return ret; } return 0; } /* * Function ircomm_tty_close (tty, filp) * * This routine is called when a particular tty device is closed. * */ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); spin_lock_irqsave(&self->spinlock, flags); if (tty_hung_up_p(filp)) { spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(0, "%s(), returning 1\n", __func__ ); return; } if ((tty->count == 1) && (self->open_count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ IRDA_DEBUG(0, "%s(), bad serial port count; " "tty->count is 1, state->count is %d\n", __func__ , self->open_count); self->open_count = 1; } if (--self->open_count < 0) { IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", __func__, self->line, self->open_count); self->open_count = 0; } if (self->open_count) { spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); return; } /* Hum... Should be test_and_set_bit ??? - Jean II */ set_bit(ASYNC_B_CLOSING, &self->flags); /* We need to unlock here (we were unlocking at the end of this * function), because tty_wait_until_sent() may schedule. * I don't know if the rest should be protected somehow, * so someone should check. - Jean II */ spin_unlock_irqrestore(&self->spinlock, flags); /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ tty->closing = 1; if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent_from_close(tty, self->closing_wait); ircomm_tty_shutdown(self); tty_driver_flush_buffer(tty); tty_ldisc_flush(tty); tty->closing = 0; self->tty = NULL; if (self->blocked_open) { if (self->close_delay) schedule_timeout_interruptible(self->close_delay); wake_up_interruptible(&self->open_wait); } self->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&self->close_wait); } /* * Function ircomm_tty_flush_buffer (tty) * * * */ static void ircomm_tty_flush_buffer(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* * Let do_softint() do this to avoid race condition with * do_softint() ;-) */ schedule_work(&self->tqueue); } /* * Function ircomm_tty_do_softint (work) * * We use this routine to give the write wakeup to the user at at a * safe time (as fast as possible after write have completed). This * can be compared to the Tx interrupt. */ static void ircomm_tty_do_softint(struct work_struct *work) { struct ircomm_tty_cb *self = container_of(work, struct ircomm_tty_cb, tqueue); struct tty_struct *tty; unsigned long flags; struct sk_buff *skb, *ctrl_skb; IRDA_DEBUG(2, "%s()\n", __func__ ); if (!self || self->magic != IRCOMM_TTY_MAGIC) return; tty = self->tty; if (!tty) return; /* Unlink control buffer */ spin_lock_irqsave(&self->spinlock, flags); ctrl_skb = self->ctrl_skb; self->ctrl_skb = NULL; spin_unlock_irqrestore(&self->spinlock, flags); /* Flush control buffer if any */ if(ctrl_skb) { if(self->flow == FLOW_START) ircomm_control_request(self->ircomm, ctrl_skb); /* Drop reference count - see ircomm_ttp_data_request(). */ dev_kfree_skb(ctrl_skb); } if (tty->hw_stopped) return; /* Unlink transmit buffer */ spin_lock_irqsave(&self->spinlock, flags); skb = self->tx_skb; self->tx_skb = NULL; spin_unlock_irqrestore(&self->spinlock, flags); /* Flush transmit buffer if any */ if (skb) { ircomm_tty_do_event(self, IRCOMM_TTY_DATA_REQUEST, skb, NULL); /* Drop reference count - see ircomm_ttp_data_request(). */ dev_kfree_skb(skb); } /* Check if user (still) wants to be waken up */ tty_wakeup(tty); } /* * Function ircomm_tty_write (tty, buf, count) * * This routine is called by the kernel to write a series of characters * to the tty device. The characters may come from user space or kernel * space. This routine will return the number of characters actually * accepted for writing. This routine is mandatory. */ static int ircomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; struct sk_buff *skb; int tailroom = 0; int len = 0; int size; IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count, tty->hw_stopped); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* We may receive packets from the TTY even before we have finished * our setup. Not cool. * The problem is that we don't know the final header and data size * to create the proper skb, so any skb we would create would have * bogus header and data size, so need care. * We use a bogus header size to safely detect this condition. * Another problem is that hw_stopped was set to 0 way before it * should be, so we would drop this skb. It should now be fixed. * One option is to not accept data until we are properly setup. * But, I suspect that when it happens, the ppp line discipline * just "drops" the data, which might screw up connect scripts. * The second option is to create a "safe skb", with large header * and small size (see ircomm_tty_open() for values). * We just need to make sure that when the real values get filled, * we don't mess up the original "safe skb" (see tx_data_size). * Jean II */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { IRDA_DEBUG(1, "%s() : not initialised\n", __func__); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* We didn't consume anything, TTY will retry */ return 0; #endif } if (count < 1) return 0; /* Protect our manipulation of self->tx_skb and related */ spin_lock_irqsave(&self->spinlock, flags); /* Fetch current transmit buffer */ skb = self->tx_skb; /* * Send out all the data we get, possibly as multiple fragmented * frames, but this will only happen if the data is larger than the * max data size. The normal case however is just the opposite, and * this function may be called multiple times, and will then actually * defragment the data and send it out as one packet as soon as * possible, but at a safer point in time */ while (count) { size = count; /* Adjust data size to the max data size */ if (size > self->max_data_size) size = self->max_data_size; /* * Do we already have a buffer ready for transmit, or do * we need to allocate a new frame */ if (skb) { /* * Any room for more data at the end of the current * transmit buffer? Cannot use skb_tailroom, since * dev_alloc_skb gives us a larger skb than we * requested * Note : use tx_data_size, because max_data_size * may have changed and we don't want to overwrite * the skb. - Jean II */ if ((tailroom = (self->tx_data_size - skb->len)) > 0) { /* Adjust data to tailroom */ if (size > tailroom) size = tailroom; } else { /* * Current transmit frame is full, so break * out, so we can send it as soon as possible */ break; } } else { /* Prepare a full sized frame */ skb = alloc_skb(self->max_data_size+ self->max_header_size, GFP_ATOMIC); if (!skb) { spin_unlock_irqrestore(&self->spinlock, flags); return -ENOBUFS; } skb_reserve(skb, self->max_header_size); self->tx_skb = skb; /* Remember skb size because max_data_size may * change later on - Jean II */ self->tx_data_size = self->max_data_size; } /* Copy data */ memcpy(skb_put(skb,size), buf + len, size); count -= size; len += size; } spin_unlock_irqrestore(&self->spinlock, flags); /* * Schedule a new thread which will transmit the frame as soon * as possible, but at a safe point in time. We do this so the * "user" can give us data multiple times, as PPP does (because of * its 256 byte tx buffer). We will then defragment and send out * all this data as one single packet. */ schedule_work(&self->tqueue); return len; } /* * Function ircomm_tty_write_room (tty) * * This routine returns the numbers of characters the tty driver will * accept for queuing to be written. This number is subject to change as * output buffers get emptied, or if the output flow control is acted. */ static int ircomm_tty_write_room(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* max_header_size tells us if the channel is initialised or not. */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) /* Don't bother us yet */ return 0; #endif /* Check if we are allowed to transmit any data. * hw_stopped is the regular flow control. * Jean II */ if (tty->hw_stopped) ret = 0; else { spin_lock_irqsave(&self->spinlock, flags); if (self->tx_skb) ret = self->tx_data_size - self->tx_skb->len; else ret = self->max_data_size; spin_unlock_irqrestore(&self->spinlock, flags); } IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret); return ret; } /* * Function ircomm_tty_wait_until_sent (tty, timeout) * * This routine waits until the device has written out all of the * characters in its transmitter FIFO. */ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long orig_jiffies, poll_time; unsigned long flags; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); orig_jiffies = jiffies; /* Set poll time to 200 ms */ poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); spin_lock_irqsave(&self->spinlock, flags); while (self->tx_skb && self->tx_skb->len) { spin_unlock_irqrestore(&self->spinlock, flags); schedule_timeout_interruptible(poll_time); spin_lock_irqsave(&self->spinlock, flags); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } spin_unlock_irqrestore(&self->spinlock, flags); current->state = TASK_RUNNING; } /* * Function ircomm_tty_throttle (tty) * * This routine notifies the tty driver that input buffers for the line * discipline are close to full, and it should somehow signal that no * more characters should be sent to the tty. */ static void ircomm_tty_throttle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* Software flow control? */ if (I_IXOFF(tty)) ircomm_tty_send_xchar(tty, STOP_CHAR(tty)); /* Hardware flow control? */ if (tty->termios->c_cflag & CRTSCTS) { self->settings.dte &= ~IRCOMM_RTS; self->settings.dte |= IRCOMM_DELTA_RTS; ircomm_param_request(self, IRCOMM_DTE, TRUE); } ircomm_flow_request(self->ircomm, FLOW_STOP); } /* * Function ircomm_tty_unthrottle (tty) * * This routine notifies the tty drivers that it should signals that * characters can now be sent to the tty without fear of overrunning the * input buffers of the line disciplines. */ static void ircomm_tty_unthrottle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* Using software flow control? */ if (I_IXOFF(tty)) { ircomm_tty_send_xchar(tty, START_CHAR(tty)); } /* Using hardware flow control? */ if (tty->termios->c_cflag & CRTSCTS) { self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ ); } ircomm_flow_request(self->ircomm, FLOW_START); } /* * Function ircomm_tty_chars_in_buffer (tty) * * Indicates if there are any data in the buffer * */ static int ircomm_tty_chars_in_buffer(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; int len = 0; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); spin_lock_irqsave(&self->spinlock, flags); if (self->tx_skb) len = self->tx_skb->len; spin_unlock_irqrestore(&self->spinlock, flags); return len; } static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) { unsigned long flags; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); IRDA_DEBUG(0, "%s()\n", __func__ ); if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) return; ircomm_tty_detach_cable(self); spin_lock_irqsave(&self->spinlock, flags); del_timer(&self->watchdog_timer); /* Free parameter buffer */ if (self->ctrl_skb) { dev_kfree_skb(self->ctrl_skb); self->ctrl_skb = NULL; } /* Free transmit buffer */ if (self->tx_skb) { dev_kfree_skb(self->tx_skb); self->tx_skb = NULL; } if (self->ircomm) { ircomm_close(self->ircomm); self->ircomm = NULL; } spin_unlock_irqrestore(&self->spinlock, flags); } /* * Function ircomm_tty_hangup (tty) * * This routine notifies the tty driver that it should hangup the tty * device. * */ static void ircomm_tty_hangup(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* ircomm_tty_flush_buffer(tty); */ ircomm_tty_shutdown(self); /* I guess we need to lock here - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->flags &= ~ASYNC_NORMAL_ACTIVE; self->tty = NULL; self->open_count = 0; spin_unlock_irqrestore(&self->spinlock, flags); wake_up_interruptible(&self->open_wait); } /* * Function ircomm_tty_send_xchar (tty, ch) * * This routine is used to send a high-priority XON/XOFF character to * the device. */ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) { IRDA_DEBUG(0, "%s(), not impl\n", __func__ ); } /* * Function ircomm_tty_start (tty) * * This routine notifies the tty driver that it resume sending * characters to the tty device. */ void ircomm_tty_start(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; ircomm_flow_request(self->ircomm, FLOW_START); } /* * Function ircomm_tty_stop (tty) * * This routine notifies the tty driver that it should stop outputting * characters to the tty device. */ static void ircomm_tty_stop(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); ircomm_flow_request(self->ircomm, FLOW_STOP); } /* * Function ircomm_check_modem_status (self) * * Check for any changes in the DCE's line settings. This function should * be called whenever the dce parameter settings changes, to update the * flow control settings and other things */ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) { struct tty_struct *tty; int status; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = self->tty; status = self->settings.dce; if (status & IRCOMM_DCE_DELTA_ANY) { /*wake_up_interruptible(&self->delta_msr_wait);*/ } if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { IRDA_DEBUG(2, "%s(), ircomm%d CD now %s...\n", __func__ , self->line, (status & IRCOMM_CD) ? "on" : "off"); if (status & IRCOMM_CD) { wake_up_interruptible(&self->open_wait); } else { IRDA_DEBUG(2, "%s(), Doing serial hangup..\n", __func__ ); if (tty) tty_hangup(tty); /* Hangup will remote the tty, so better break out */ return; } } if (self->flags & ASYNC_CTS_FLOW) { if (tty->hw_stopped) { if (status & IRCOMM_CTS) { IRDA_DEBUG(2, "%s(), CTS tx start...\n", __func__ ); tty->hw_stopped = 0; /* Wake up processes blocked on open */ wake_up_interruptible(&self->open_wait); schedule_work(&self->tqueue); return; } } else { if (!(status & IRCOMM_CTS)) { IRDA_DEBUG(2, "%s(), CTS tx stop...\n", __func__ ); tty->hw_stopped = 1; } } } } /* * Function ircomm_tty_data_indication (instance, sap, skb) * * Handle incoming data, and deliver it to the line discipline * */ static int ircomm_tty_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); if (!self->tty) { IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); return 0; } /* * If we receive data when hardware is stopped then something is wrong. * We try to poll the peers line settings to check if we are up todate. * Devices like WinCE can do this, and since they don't send any * params, we can just as well declare the hardware for running. */ if (self->tty->hw_stopped && (self->flow == FLOW_START)) { IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); ircomm_param_request(self, IRCOMM_POLL, TRUE); /* We can just as well declare the hardware for running */ ircomm_tty_send_initial_parameters(self); ircomm_tty_link_established(self); } /* * Use flip buffer functions since the code may be called from interrupt * context */ tty_insert_flip_string(self->tty, skb->data, skb->len); tty_flip_buffer_push(self->tty); /* No need to kfree_skb - see ircomm_ttp_data_indication() */ return 0; } /* * Function ircomm_tty_control_indication (instance, sap, skb) * * Parse all incoming parameters (easy!) * */ static int ircomm_tty_control_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); clen = skb->data[0]; irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, clen), &ircomm_param_info); /* No need to kfree_skb - see ircomm_control_indication() */ return 0; } /* * Function ircomm_tty_flow_indication (instance, sap, cmd) * * This function is called by IrTTP when it wants us to slow down the * transmission of data. We just mark the hardware as stopped, and wait * for IrTTP to notify us that things are OK again. */ static void ircomm_tty_flow_indication(void *instance, void *sap, LOCAL_FLOW cmd) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = self->tty; switch (cmd) { case FLOW_START: IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); tty->hw_stopped = 0; /* ircomm_tty_do_softint will take care of the rest */ schedule_work(&self->tqueue); break; default: /* If we get here, something is very wrong, better stop */ case FLOW_STOP: IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); tty->hw_stopped = 1; break; } self->flow = cmd; } #ifdef CONFIG_PROC_FS static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) { char sep; seq_printf(m, "State: %s\n", ircomm_tty_state[self->state]); seq_puts(m, "Service type: "); if (self->service_type & IRCOMM_9_WIRE) seq_puts(m, "9_WIRE"); else if (self->service_type & IRCOMM_3_WIRE) seq_puts(m, "3_WIRE"); else if (self->service_type & IRCOMM_3_WIRE_RAW) seq_puts(m, "3_WIRE_RAW"); else seq_puts(m, "No common service type!\n"); seq_putc(m, '\n'); seq_printf(m, "Port name: %s\n", self->settings.port_name); seq_printf(m, "DTE status:"); sep = ' '; if (self->settings.dte & IRCOMM_RTS) { seq_printf(m, "%cRTS", sep); sep = '|'; } if (self->settings.dte & IRCOMM_DTR) { seq_printf(m, "%cDTR", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "DCE status:"); sep = ' '; if (self->settings.dce & IRCOMM_CTS) { seq_printf(m, "%cCTS", sep); sep = '|'; } if (self->settings.dce & IRCOMM_DSR) { seq_printf(m, "%cDSR", sep); sep = '|'; } if (self->settings.dce & IRCOMM_CD) { seq_printf(m, "%cCD", sep); sep = '|'; } if (self->settings.dce & IRCOMM_RI) { seq_printf(m, "%cRI", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "Configuration: "); if (!self->settings.null_modem) seq_puts(m, "DTE <-> DCE\n"); else seq_puts(m, "DTE <-> DTE (null modem emulation)\n"); seq_printf(m, "Data rate: %d\n", self->settings.data_rate); seq_puts(m, "Flow control:"); sep = ' '; if (self->settings.flow_control & IRCOMM_XON_XOFF_IN) { seq_printf(m, "%cXON_XOFF_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_XON_XOFF_OUT) { seq_printf(m, "%cXON_XOFF_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_RTS_CTS_IN) { seq_printf(m, "%cRTS_CTS_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_RTS_CTS_OUT) { seq_printf(m, "%cRTS_CTS_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_DSR_DTR_IN) { seq_printf(m, "%cDSR_DTR_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_DSR_DTR_OUT) { seq_printf(m, "%cDSR_DTR_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_ENQ_ACK_IN) { seq_printf(m, "%cENQ_ACK_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_ENQ_ACK_OUT) { seq_printf(m, "%cENQ_ACK_OUT", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "Flags:"); sep = ' '; if (self->flags & ASYNC_CTS_FLOW) { seq_printf(m, "%cASYNC_CTS_FLOW", sep); sep = '|'; } if (self->flags & ASYNC_CHECK_CD) { seq_printf(m, "%cASYNC_CHECK_CD", sep); sep = '|'; } if (self->flags & ASYNC_INITIALIZED) { seq_printf(m, "%cASYNC_INITIALIZED", sep); sep = '|'; } if (self->flags & ASYNC_LOW_LATENCY) { seq_printf(m, "%cASYNC_LOW_LATENCY", sep); sep = '|'; } if (self->flags & ASYNC_CLOSING) { seq_printf(m, "%cASYNC_CLOSING", sep); sep = '|'; } if (self->flags & ASYNC_NORMAL_ACTIVE) { seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep); sep = '|'; } seq_putc(m, '\n'); seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); seq_printf(m, "Open count: %d\n", self->open_count); seq_printf(m, "Max data size: %d\n", self->max_data_size); seq_printf(m, "Max header size: %d\n", self->max_header_size); if (self->tty) seq_printf(m, "Hardware: %s\n", self->tty->hw_stopped ? "Stopped" : "Running"); } static int ircomm_tty_proc_show(struct seq_file *m, void *v) { struct ircomm_tty_cb *self; unsigned long flags; spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags); self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty); while (self != NULL) { if (self->magic != IRCOMM_TTY_MAGIC) break; ircomm_tty_line_info(self, m); self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty); } spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags); return 0; } static int ircomm_tty_proc_open(struct inode *inode, struct file *file) { return single_open(file, ircomm_tty_proc_show, NULL); } static const struct file_operations ircomm_tty_proc_fops = { .owner = THIS_MODULE, .open = ircomm_tty_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_PROC_FS */ MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("IrCOMM serial TTY driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(IRCOMM_TTY_MAJOR); module_init(ircomm_tty_init); module_exit(ircomm_tty_cleanup);
gpl-2.0
anryl/shooteru_HTC
tools/perf/util/usage.c
4745
1690
/* * GIT - The information manager from hell * * Copyright (C) Linus Torvalds, 2005 */ #include "util.h" static void report(const char *prefix, const char *err, va_list params) { char msg[1024]; vsnprintf(msg, sizeof(msg), err, params); fprintf(stderr, " %s%s\n", prefix, msg); } static NORETURN void usage_builtin(const char *err) { fprintf(stderr, "\n Usage: %s\n", err); exit(129); } static NORETURN void die_builtin(const char *err, va_list params) { report(" Fatal: ", err, params); exit(128); } static void error_builtin(const char *err, va_list params) { report(" Error: ", err, params); } static void warn_builtin(const char *warn, va_list params) { report(" Warning: ", warn, params); } /* If we are in a dlopen()ed .so write to a global variable would segfault * (ugh), so keep things static. */ static void (*usage_routine)(const char *err) NORETURN = usage_builtin; static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; static void (*error_routine)(const char *err, va_list params) = error_builtin; static void (*warn_routine)(const char *err, va_list params) = warn_builtin; void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) { die_routine = routine; } void usage(const char *err) { usage_routine(err); } void die(const char *err, ...) { va_list params; va_start(params, err); die_routine(err, params); va_end(params); } int error(const char *err, ...) { va_list params; va_start(params, err); error_routine(err, params); va_end(params); return -1; } void warning(const char *warn, ...) { va_list params; va_start(params, warn); warn_routine(warn, params); va_end(params); }
gpl-2.0
messi2050/android_kernel_huawei_msm8610
arch/arm/mach-pxa/tavorevb3.c
5001
3033
/* * linux/arch/arm/mach-pxa/tavorevb3.c * * Support for the Marvell EVB3 Development Platform. * * Copyright: (C) Copyright 2008-2010 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/gpio.h> #include <linux/mfd/88pm860x.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa930.h> #include "devices.h" #include "generic.h" #define TAVOREVB3_NR_IRQS (IRQ_BOARD_START + 24) static mfp_cfg_t evb3_mfp_cfg[] __initdata = { /* UART */ GPIO53_UART1_TXD, GPIO54_UART1_RXD, /* PMIC */ PMIC_INT_GPIO83, }; #if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE) static struct pm860x_touch_pdata evb3_touch = { .gpadc_prebias = 1, .slot_cycle = 1, .tsi_prebias = 6, .pen_prebias = 16, .pen_prechg = 2, .res_x = 300, }; static struct pm860x_backlight_pdata evb3_backlight[] = { { .id = PM8606_ID_BACKLIGHT, .iset = PM8606_WLED_CURRENT(24), .flags = PM8606_BACKLIGHT1, }, {}, }; static struct pm860x_led_pdata evb3_led[] = { { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED1_RED, }, { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED1_GREEN, }, { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED1_BLUE, }, { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED2_RED, }, { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED2_GREEN, }, { .id = PM8606_ID_LED, .iset = PM8606_LED_CURRENT(12), .flags = PM8606_LED2_BLUE, }, }; static struct pm860x_platform_data evb3_pm8607_info = { .touch = &evb3_touch, .backlight = &evb3_backlight[0], .led = &evb3_led[0], .companion_addr = 0x10, .irq_mode = 0, .irq_base = IRQ_BOARD_START, .i2c_port = GI2C_PORT, }; static struct i2c_board_info evb3_i2c_info[] = { { .type = "88PM860x", .addr = 0x34, .platform_data = &evb3_pm8607_info, .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO83)), }, }; static void __init evb3_init_i2c(void) { pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(evb3_i2c_info)); } #else static inline void evb3_init_i2c(void) {} #endif static void __init evb3_init(void) { /* initialize MFP configurations */ pxa3xx_mfp_config(ARRAY_AND_SIZE(evb3_mfp_cfg)); pxa_set_ffuart_info(NULL); evb3_init_i2c(); } MACHINE_START(TAVOREVB3, "PXA950 Evaluation Board (aka TavorEVB3)") .atag_offset = 0x100, .map_io = pxa3xx_map_io, .nr_irqs = TAVOREVB3_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .timer = &pxa_timer, .init_machine = evb3_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
jamison904/kernel_m919
drivers/gpu/drm/nouveau/nv50_crtc.c
5257
22474
/* * Copyright (C) 2008 Maarten Maathuis. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_mode.h" #include "drm_crtc_helper.h" #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) #include "nouveau_reg.h" #include "nouveau_drv.h" #include "nouveau_hw.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include "nouveau_fb.h" #include "nouveau_connector.h" #include "nv50_display.h" static void nv50_crtc_lut_load(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); int i; NV_DEBUG_KMS(crtc->dev, "\n"); for (i = 0; i < 256; i++) { writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2); writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4); } if (nv_crtc->lut.depth == 30) { writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0); writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2); writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4); } } int nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) { struct drm_device *dev = nv_crtc->base.dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = nv50_display(dev)->master; int index = nv_crtc->index, ret; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked"); if (blanked) { nv_crtc->cursor.hide(nv_crtc, false); ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5); if (ret) { NV_ERROR(dev, "no space while blanking crtc\n"); return ret; } BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); OUT_RING(evo, 0); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); } BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); } else { if (nv_crtc->cursor.visible) nv_crtc->cursor.show(nv_crtc, false); else nv_crtc->cursor.hide(nv_crtc, false); ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8); if (ret) { NV_ERROR(dev, "no space while unblanking crtc\n"); return ret; } BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); OUT_RING(evo, nv_crtc->lut.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NvEvoVRAM); } BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); OUT_RING(evo, nv_crtc->fb.offset >> 8); OUT_RING(evo, 0); BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); if (dev_priv->chipset != 0x50) if (nv_crtc->fb.tile_flags == 0x7a00 || nv_crtc->fb.tile_flags == 0xfe00) OUT_RING(evo, NvEvoFB32); else if (nv_crtc->fb.tile_flags == 0x7000) OUT_RING(evo, NvEvoFB16); else OUT_RING(evo, NvEvoVRAM_LP); else OUT_RING(evo, NvEvoVRAM_LP); } nv_crtc->fb.blanked = blanked; return 0; } static int nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) { struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master; struct nouveau_connector *nv_connector; struct drm_connector *connector; int head = nv_crtc->index, ret; u32 mode = 0x00; nv_connector = nouveau_crtc_connector_get(nv_crtc); connector = &nv_connector->base; if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) mode = DITHERING_MODE_DYNAMIC2X2; } else { mode = nv_connector->dithering_mode; } if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { if (connector->display_info.bpc >= 8) mode |= DITHERING_DEPTH_8BPC; } else { mode |= nv_connector->dithering_depth; } ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); if (ret == 0) { BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1); OUT_RING (evo, mode); if (update) { BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING (evo, 0); FIRE_RING (evo); } } return ret; } static int nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) { struct drm_device *dev = nv_crtc->base.dev; struct nouveau_channel *evo = nv50_display(dev)->master; int ret; int adj; u32 hue, vib; NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n", nv_crtc->color_vibrance, nv_crtc->vibrant_hue); ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); if (ret) { NV_ERROR(dev, "no space while setting color vibrance\n"); return ret; } adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); OUT_RING (evo, (hue << 20) | (vib << 8)); if (update) { BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING (evo, 0); FIRE_RING (evo); } return 0; } struct nouveau_connector * nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; struct drm_connector *connector; struct drm_crtc *crtc = to_drm_crtc(nv_crtc); /* The safest approach is to find an encoder with the right crtc, that * is also linked to a connector. */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder) if (connector->encoder->crtc == crtc) return nouveau_connector(connector); } return NULL; } static int nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) { struct nouveau_connector *nv_connector; struct drm_crtc *crtc = &nv_crtc->base; struct drm_device *dev = crtc->dev; struct nouveau_channel *evo = nv50_display(dev)->master; struct drm_display_mode *umode = &crtc->mode; struct drm_display_mode *omode; int scaling_mode, ret; u32 ctrl = 0, oX, oY; NV_DEBUG_KMS(dev, "\n"); nv_connector = nouveau_crtc_connector_get(nv_crtc); if (!nv_connector || !nv_connector->native_mode) { NV_ERROR(dev, "no native mode, forcing panel scaling\n"); scaling_mode = DRM_MODE_SCALE_NONE; } else { scaling_mode = nv_connector->scaling_mode; } /* start off at the resolution we programmed the crtc for, this * effectively handles NONE/FULL scaling */ if (scaling_mode != DRM_MODE_SCALE_NONE) omode = nv_connector->native_mode; else omode = umode; oX = omode->hdisplay; oY = omode->vdisplay; if (omode->flags & DRM_MODE_FLAG_DBLSCAN) oY *= 2; /* add overscan compensation if necessary, will keep the aspect * ratio the same as the backend mode unless overridden by the * user setting both hborder and vborder properties. */ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || (nv_connector->underscan == UNDERSCAN_AUTO && nv_connector->edid && drm_detect_hdmi_monitor(nv_connector->edid)))) { u32 bX = nv_connector->underscan_hborder; u32 bY = nv_connector->underscan_vborder; u32 aspect = (oY << 19) / oX; if (bX) { oX -= (bX * 2); if (bY) oY -= (bY * 2); else oY = ((oX * aspect) + (aspect / 2)) >> 19; } else { oX -= (oX >> 4) + 32; if (bY) oY -= (bY * 2); else oY = ((oX * aspect) + (aspect / 2)) >> 19; } } /* handle CENTER/ASPECT scaling, taking into account the areas * removed already for overscan compensation */ switch (scaling_mode) { case DRM_MODE_SCALE_CENTER: oX = min((u32)umode->hdisplay, oX); oY = min((u32)umode->vdisplay, oY); /* fall-through */ case DRM_MODE_SCALE_ASPECT: if (oY < oX) { u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; oX = ((oY * aspect) + (aspect / 2)) >> 19; } else { u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; oY = ((oX * aspect) + (aspect / 2)) >> 19; } break; default: break; } if (umode->hdisplay != oX || umode->vdisplay != oY || umode->flags & DRM_MODE_FLAG_INTERLACE || umode->flags & DRM_MODE_FLAG_DBLSCAN) ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE; ret = RING_SPACE(evo, 5); if (ret) return ret; BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); OUT_RING (evo, ctrl); BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); OUT_RING (evo, oY << 16 | oX); OUT_RING (evo, oY << 16 | oX); if (update) { nv50_display_flip_stop(crtc); nv50_display_sync(dev); nv50_display_flip_next(crtc, crtc->fb, NULL); } return 0; } int nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pll_lims pll; uint32_t reg1, reg2; int ret, N1, M1, N2, M2, P; ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll); if (ret) return ret; if (pll.vco2.maxfreq) { ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); if (ret <= 0) return 0; NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", pclk, ret, N1, M1, N2, M2, P); reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00; reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00; nv_wr32(dev, pll.reg + 0, 0x10000611); nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1); nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); } else if (dev_priv->chipset < NV_C0) { ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); if (ret <= 0) return 0; NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000; nv_wr32(dev, pll.reg + 0, 0x50000610); nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); nv_wr32(dev, pll.reg + 8, N2); } else { ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); if (ret <= 0) return 0; NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1); nv_wr32(dev, pll.reg + 0x10, N2 << 16); } return 0; } static void nv50_crtc_destroy(struct drm_crtc *crtc) { struct drm_device *dev; struct nouveau_crtc *nv_crtc; if (!crtc) return; dev = crtc->dev; nv_crtc = nouveau_crtc(crtc); NV_DEBUG_KMS(dev, "\n"); drm_crtc_cleanup(&nv_crtc->base); nouveau_bo_unmap(nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc); } int nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t buffer_handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_bo *cursor = NULL; struct drm_gem_object *gem; int ret = 0, i; if (!buffer_handle) { nv_crtc->cursor.hide(nv_crtc, true); return 0; } if (width != 64 || height != 64) return -EINVAL; gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); if (!gem) return -ENOENT; cursor = nouveau_gem_object(gem); ret = nouveau_bo_map(cursor); if (ret) goto out; /* The simple will do for now. */ for (i = 0; i < 64 * 64; i++) nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i)); nouveau_bo_unmap(cursor); nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); nv_crtc->cursor.show(nv_crtc, true); out: drm_gem_object_unreference_unlocked(gem); return ret; } int nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); nv_crtc->cursor.set_pos(nv_crtc, x, y); return 0; } static void nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { int end = (start + size > 256) ? 256 : start + size, i; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); for (i = start; i < end; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; } /* We need to know the depth before we upload, but it's possible to * get called before a framebuffer is bound. If this is the case, * mark the lut values as dirty by setting depth==0, and it'll be * uploaded on the first mode_set_base() */ if (!nv_crtc->base.fb) { nv_crtc->lut.depth = 0; return; } nv50_crtc_lut_load(crtc); } static void nv50_crtc_save(struct drm_crtc *crtc) { NV_ERROR(crtc->dev, "!!\n"); } static void nv50_crtc_restore(struct drm_crtc *crtc) { NV_ERROR(crtc->dev, "!!\n"); } static const struct drm_crtc_funcs nv50_crtc_funcs = { .save = nv50_crtc_save, .restore = nv50_crtc_restore, .cursor_set = nv50_crtc_cursor_set, .cursor_move = nv50_crtc_cursor_move, .gamma_set = nv50_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .page_flip = nouveau_crtc_page_flip, .destroy = nv50_crtc_destroy, }; static void nv50_crtc_dpms(struct drm_crtc *crtc, int mode) { } static void nv50_crtc_prepare(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); nv50_display_flip_stop(crtc); drm_vblank_pre_modeset(dev, nv_crtc->index); nv50_crtc_blank(nv_crtc, true); } static void nv50_crtc_commit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); nv50_crtc_blank(nv_crtc, false); drm_vblank_post_modeset(dev, nv_crtc->index); nv50_display_sync(dev); nv50_display_flip_next(crtc, crtc->fb, NULL); } static bool nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static int nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_framebuffer *passed_fb, int x, int y, bool atomic) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = nv_crtc->base.dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = nv50_display(dev)->master; struct drm_framebuffer *drm_fb; struct nouveau_framebuffer *fb; int ret; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); /* no fb bound */ if (!atomic && !crtc->fb) { NV_DEBUG_KMS(dev, "No FB bound\n"); return 0; } /* If atomic, we want to switch to the fb we were passed, so * now we update pointers to do that. (We don't pin; just * assume we're already pinned and update the base address.) */ if (atomic) { drm_fb = passed_fb; fb = nouveau_framebuffer(passed_fb); } else { drm_fb = crtc->fb; fb = nouveau_framebuffer(crtc->fb); /* If not atomic, we can go ahead and pin, and unpin the * old fb we were passed. */ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); if (ret) return ret; if (passed_fb) { struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); nouveau_bo_unpin(ofb->nvbo); } } nv_crtc->fb.offset = fb->nvbo->bo.offset; nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { ret = RING_SPACE(evo, 2); if (ret) return ret; BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); OUT_RING (evo, fb->r_dma); } ret = RING_SPACE(evo, 12); if (ret) return ret; BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); OUT_RING (evo, nv_crtc->fb.offset >> 8); OUT_RING (evo, 0); OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); OUT_RING (evo, fb->r_pitch); OUT_RING (evo, fb->r_format); BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); OUT_RING (evo, fb->base.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); OUT_RING (evo, (y << 16) | x); if (nv_crtc->lut.depth != fb->base.depth) { nv_crtc->lut.depth = fb->base.depth; nv50_crtc_lut_load(crtc); } return 0; } static int nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct nouveau_channel *evo = nv50_display(dev)->master; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); u32 head = nv_crtc->index * 0x400; u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; u32 vblan2e = 0, vblan2s = 1; int ret; /* hw timing description looks like this: * * <sync> <back porch> <---------display---------> <front porch> * ______ * |____________|---------------------------|____________| * * ^ synce ^ blanke ^ blanks ^ active * * interlaced modes also have 2 additional values pointing at the end * and start of the next field's blanking period. */ hactive = mode->htotal; hsynce = mode->hsync_end - mode->hsync_start - 1; hbackp = mode->htotal - mode->hsync_end; hblanke = hsynce + hbackp; hfrontp = mode->hsync_start - mode->hdisplay; hblanks = mode->htotal - hfrontp - 1; vactive = mode->vtotal * vscan / ilace; vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; vblanke = vsynce + vbackp; vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; vblanks = vactive - vfrontp - 1; if (mode->flags & DRM_MODE_FLAG_INTERLACE) { vblan2e = vactive + vsynce + vbackp; vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); vactive = (vactive * 2) + 1; } ret = RING_SPACE(evo, 18); if (ret == 0) { BEGIN_RING(evo, 0, 0x0804 + head, 2); OUT_RING (evo, 0x00800000 | mode->clock); OUT_RING (evo, (ilace == 2) ? 2 : 0); BEGIN_RING(evo, 0, 0x0810 + head, 6); OUT_RING (evo, 0x00000000); /* border colour */ OUT_RING (evo, (vactive << 16) | hactive); OUT_RING (evo, ( vsynce << 16) | hsynce); OUT_RING (evo, (vblanke << 16) | hblanke); OUT_RING (evo, (vblanks << 16) | hblanks); OUT_RING (evo, (vblan2e << 16) | vblan2s); BEGIN_RING(evo, 0, 0x082c + head, 1); OUT_RING (evo, 0x00000000); BEGIN_RING(evo, 0, 0x0900 + head, 1); OUT_RING (evo, 0x00000311); /* makes sync channel work */ BEGIN_RING(evo, 0, 0x08c8 + head, 1); OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay); BEGIN_RING(evo, 0, 0x08d4 + head, 1); OUT_RING (evo, 0x00000000); /* screen position */ } nv_crtc->set_dither(nv_crtc, false); nv_crtc->set_scale(nv_crtc, false); nv_crtc->set_color_vibrance(nv_crtc, false); return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); } static int nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { int ret; nv50_display_flip_stop(crtc); ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); if (ret) return ret; ret = nv50_display_sync(crtc->dev); if (ret) return ret; return nv50_display_flip_next(crtc, crtc->fb, NULL); } static int nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { int ret; nv50_display_flip_stop(crtc); ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true); if (ret) return ret; return nv50_display_sync(crtc->dev); } static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { .dpms = nv50_crtc_dpms, .prepare = nv50_crtc_prepare, .commit = nv50_crtc_commit, .mode_fixup = nv50_crtc_mode_fixup, .mode_set = nv50_crtc_mode_set, .mode_set_base = nv50_crtc_mode_set_base, .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, .load_lut = nv50_crtc_lut_load, }; int nv50_crtc_create(struct drm_device *dev, int index) { struct nouveau_crtc *nv_crtc = NULL; int ret, i; NV_DEBUG_KMS(dev, "\n"); nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); if (!nv_crtc) return -ENOMEM; nv_crtc->color_vibrance = 50; nv_crtc->vibrant_hue = 0; /* Default CLUT parameters, will be activated on the hw upon * first mode set. */ for (i = 0; i < 256; i++) { nv_crtc->lut.r[i] = i << 8; nv_crtc->lut.g[i] = i << 8; nv_crtc->lut.b[i] = i << 8; } nv_crtc->lut.depth = 0; ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->lut.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(nv_crtc->lut.nvbo); if (ret) nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); } if (ret) { kfree(nv_crtc); return ret; } nv_crtc->index = index; /* set function pointers */ nv_crtc->set_dither = nv50_crtc_set_dither; nv_crtc->set_scale = nv50_crtc_set_scale; nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance; drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); } nv50_cursor_init(nv_crtc); return 0; }
gpl-2.0
GAXUSXX/GaXusKernel
arch/frv/mm/kmap.c
9097
1295
/* kmap.c: ioremapping handlers * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/mm/kmap.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> #undef DEBUG /*****************************************************************************/ /* * Map some physical address range into the kernel address space. */ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { return (void __iomem *)physaddr; } /* * Unmap a ioremap()ed region again */ void iounmap(void volatile __iomem *addr) { } /* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. */ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) { }
gpl-2.0
fhasovic/LG-G2-D802-Kernel
arch/frv/mm/kmap.c
9097
1295
/* kmap.c: ioremapping handlers * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/mm/kmap.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> #undef DEBUG /*****************************************************************************/ /* * Map some physical address range into the kernel address space. */ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { return (void __iomem *)physaddr; } /* * Unmap a ioremap()ed region again */ void iounmap(void volatile __iomem *addr) { } /* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. */ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) { }
gpl-2.0
wimpknocker/lge-kernel-lproj
drivers/leds/leds-locomo.c
9353
2370
/* * linux/drivers/leds/leds-locomo.c * * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/hardware/locomo.h> static void locomoled_brightness_set(struct led_classdev *led_cdev, enum led_brightness value, int offset) { struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->dev->parent); unsigned long flags; local_irq_save(flags); if (value) locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset); else locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset); local_irq_restore(flags); } static void locomoled_brightness_set0(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0); } static void locomoled_brightness_set1(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1); } static struct led_classdev locomo_led0 = { .name = "locomo:amber:charge", .default_trigger = "main-battery-charging", .brightness_set = locomoled_brightness_set0, }; static struct led_classdev locomo_led1 = { .name = "locomo:green:mail", .default_trigger = "nand-disk", .brightness_set = locomoled_brightness_set1, }; static int locomoled_probe(struct locomo_dev *ldev) { int ret; ret = led_classdev_register(&ldev->dev, &locomo_led0); if (ret < 0) return ret; ret = led_classdev_register(&ldev->dev, &locomo_led1); if (ret < 0) led_classdev_unregister(&locomo_led0); return ret; } static int locomoled_remove(struct locomo_dev *dev) { led_classdev_unregister(&locomo_led0); led_classdev_unregister(&locomo_led1); return 0; } static struct locomo_driver locomoled_driver = { .drv = { .name = "locomoled" }, .devid = LOCOMO_DEVID_LED, .probe = locomoled_probe, .remove = locomoled_remove, }; static int __init locomoled_init(void) { return locomo_driver_register(&locomoled_driver); } module_init(locomoled_init); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); MODULE_DESCRIPTION("Locomo LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
BrickedGrouperCandy/kernel_asus_grouper
drivers/net/skfp/pcmplc.c
12681
49275
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ /* PCM Physical Connection Management */ /* * Hardware independent state machine implemantation * The following external SMT functions are referenced : * * queue_event() * smt_timer_start() * smt_timer_stop() * * The following external HW dependent functions are referenced : * sm_pm_control() * sm_ph_linestate() * sm_pm_ls_latch() * * The following HW dependent events are required : * PC_QLS * PC_ILS * PC_HLS * PC_MLS * PC_NSE * PC_LEM * */ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #include "h/supern_2.h" #define KERNEL #include "h/smtstate.h" #ifndef lint static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ; #endif #ifdef FDDI_MIB extern int snmp_fddi_trap( #ifdef ANSIC struct s_smc * smc, int type, int index #endif ); #endif #ifdef CONCENTRATOR extern int plc_is_installed( #ifdef ANSIC struct s_smc *smc , int p #endif ) ; #endif /* * FSM Macros */ #define AFLAG (0x20) #define GO_STATE(x) (mib->fddiPORTPCMState = (x)|AFLAG) #define ACTIONS_DONE() (mib->fddiPORTPCMState &= ~AFLAG) #define ACTIONS(x) (x|AFLAG) /* * PCM states */ #define PC0_OFF 0 #define PC1_BREAK 1 #define PC2_TRACE 2 #define PC3_CONNECT 3 #define PC4_NEXT 4 #define PC5_SIGNAL 5 #define PC6_JOIN 6 #define PC7_VERIFY 7 #define PC8_ACTIVE 8 #define PC9_MAINT 9 #ifdef DEBUG /* * symbolic state names */ static const char * const pcm_states[] = { "PC0_OFF","PC1_BREAK","PC2_TRACE","PC3_CONNECT","PC4_NEXT", "PC5_SIGNAL","PC6_JOIN","PC7_VERIFY","PC8_ACTIVE","PC9_MAINT" } ; /* * symbolic event names */ static const char * const pcm_events[] = { "NONE","PC_START","PC_STOP","PC_LOOP","PC_JOIN","PC_SIGNAL", "PC_REJECT","PC_MAINT","PC_TRACE","PC_PDR", "PC_ENABLE","PC_DISABLE", "PC_QLS","PC_ILS","PC_MLS","PC_HLS","PC_LS_PDR","PC_LS_NONE", "PC_TIMEOUT_TB_MAX","PC_TIMEOUT_TB_MIN", "PC_TIMEOUT_C_MIN","PC_TIMEOUT_T_OUT", "PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT", "PC_NSE","PC_LEM" } ; #endif #ifdef MOT_ELM /* * PCL-S control register * this register in the PLC-S controls the scrambling parameters */ #define PLCS_CONTROL_C_U 0 #define PLCS_CONTROL_C_S (PL_C_SDOFF_ENABLE | PL_C_SDON_ENABLE | \ PL_C_CIPHER_ENABLE) #define PLCS_FASSERT_U 0 #define PLCS_FASSERT_S 0xFd76 /* 52.0 us */ #define PLCS_FDEASSERT_U 0 #define PLCS_FDEASSERT_S 0 #else /* nMOT_ELM */ /* * PCL-S control register * this register in the PLC-S controls the scrambling parameters * can be patched for ANSI compliance if standard changes */ static const u_char plcs_control_c_u[17] = "PLC_CNTRL_C_U=\0\0" ; static const u_char plcs_control_c_s[17] = "PLC_CNTRL_C_S=\01\02" ; #define PLCS_CONTROL_C_U (plcs_control_c_u[14] | (plcs_control_c_u[15]<<8)) #define PLCS_CONTROL_C_S (plcs_control_c_s[14] | (plcs_control_c_s[15]<<8)) #endif /* nMOT_ELM */ /* * external vars */ /* struct definition see 'cmtdef.h' (also used by CFM) */ #define PS_OFF 0 #define PS_BIT3 1 #define PS_BIT4 2 #define PS_BIT7 3 #define PS_LCT 4 #define PS_BIT8 5 #define PS_JOIN 6 #define PS_ACTIVE 7 #define LCT_LEM_MAX 255 /* * PLC timing parameter */ #define PLC_MS(m) ((int)((0x10000L-(m*100000L/2048)))) #define SLOW_TL_MIN PLC_MS(6) #define SLOW_C_MIN PLC_MS(10) static const struct plt { int timer ; /* relative plc timer address */ int para ; /* default timing parameters */ } pltm[] = { { PL_C_MIN, SLOW_C_MIN }, /* min t. to remain Connect State */ { PL_TL_MIN, SLOW_TL_MIN }, /* min t. to transmit a Line State */ { PL_TB_MIN, TP_TB_MIN }, /* min break time */ { PL_T_OUT, TP_T_OUT }, /* Signaling timeout */ { PL_LC_LENGTH, TP_LC_LENGTH }, /* Link Confidence Test Time */ { PL_T_SCRUB, TP_T_SCRUB }, /* Scrub Time == MAC TVX time ! */ { PL_NS_MAX, TP_NS_MAX }, /* max t. that noise is tolerated */ { 0,0 } } ; /* * interrupt mask */ #ifdef SUPERNET_3 /* * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 * PLL bug? */ static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; #else /* SUPERNET_3 */ /* * We do NOT need the elasticity buffer error during signaling. */ static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | PL_PCM_ENABLED | PL_SELF_TEST ; #endif /* SUPERNET_3 */ static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; /* internal functions */ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd); static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy); static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy); static void reset_lem_struct(struct s_phy *phy); static void plc_init(struct s_smc *smc, int p); static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold); static void sm_ph_lem_stop(struct s_smc *smc, int np); static void sm_ph_linestate(struct s_smc *smc, int phy, int ls); static void real_init_plc(struct s_smc *smc); /* * SMT timer interface * start PCM timer 0 */ static void start_pcm_timer0(struct s_smc *smc, u_long value, int event, struct s_phy *phy) { phy->timer0_exp = FALSE ; /* clear timer event flag */ smt_timer_start(smc,&phy->pcm_timer0,value, EV_TOKEN(EVENT_PCM+phy->np,event)) ; } /* * SMT timer interface * stop PCM timer 0 */ static void stop_pcm_timer0(struct s_smc *smc, struct s_phy *phy) { if (phy->pcm_timer0.tm_active) smt_timer_stop(smc,&phy->pcm_timer0) ; } /* init PCM state machine (called by driver) clear all PCM vars and flags */ void pcm_init(struct s_smc *smc) { int i ; int np ; struct s_phy *phy ; struct fddi_mib_p *mib ; for (np = 0,phy = smc->y ; np < NUMPHYS ; np++,phy++) { /* Indicates the type of PHY being used */ mib = phy->mib ; mib->fddiPORTPCMState = ACTIONS(PC0_OFF) ; phy->np = np ; switch (smc->s.sas) { #ifdef CONCENTRATOR case SMT_SAS : mib->fddiPORTMy_Type = (np == PS) ? TS : TM ; break ; case SMT_DAS : mib->fddiPORTMy_Type = (np == PA) ? TA : (np == PB) ? TB : TM ; break ; case SMT_NAC : mib->fddiPORTMy_Type = TM ; break; #else case SMT_SAS : mib->fddiPORTMy_Type = (np == PS) ? TS : TNONE ; mib->fddiPORTHardwarePresent = (np == PS) ? TRUE : FALSE ; #ifndef SUPERNET_3 smc->y[PA].mib->fddiPORTPCMState = PC0_OFF ; #else smc->y[PB].mib->fddiPORTPCMState = PC0_OFF ; #endif break ; case SMT_DAS : mib->fddiPORTMy_Type = (np == PB) ? TB : TA ; break ; #endif } /* * set PMD-type */ phy->pmd_scramble = 0 ; switch (phy->pmd_type[PMD_SK_PMD]) { case 'P' : mib->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ; break ; case 'L' : mib->fddiPORTPMDClass = MIB_PMDCLASS_LCF ; break ; case 'D' : mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ; break ; case 'S' : mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ; phy->pmd_scramble = TRUE ; break ; case 'U' : mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ; phy->pmd_scramble = TRUE ; break ; case '1' : mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ; break ; case '2' : mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ; break ; case '3' : mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ; break ; case '4' : mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ; break ; case 'H' : mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ; break ; case 'I' : mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ; break ; case 'G' : mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ; break ; default: mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ; break ; } /* * A and B port can be on primary and secondary path */ switch (mib->fddiPORTMy_Type) { case TA : mib->fddiPORTAvailablePaths |= MIB_PATH_S ; mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ; mib->fddiPORTRequestedPaths[2] = MIB_P_PATH_LOCAL | MIB_P_PATH_CON_ALTER | MIB_P_PATH_SEC_PREFER ; mib->fddiPORTRequestedPaths[3] = MIB_P_PATH_LOCAL | MIB_P_PATH_CON_ALTER | MIB_P_PATH_SEC_PREFER | MIB_P_PATH_THRU ; break ; case TB : mib->fddiPORTAvailablePaths |= MIB_PATH_S ; mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ; mib->fddiPORTRequestedPaths[2] = MIB_P_PATH_LOCAL | MIB_P_PATH_PRIM_PREFER ; mib->fddiPORTRequestedPaths[3] = MIB_P_PATH_LOCAL | MIB_P_PATH_PRIM_PREFER | MIB_P_PATH_CON_PREFER | MIB_P_PATH_THRU ; break ; case TS : mib->fddiPORTAvailablePaths |= MIB_PATH_S ; mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ; mib->fddiPORTRequestedPaths[2] = MIB_P_PATH_LOCAL | MIB_P_PATH_CON_ALTER | MIB_P_PATH_PRIM_PREFER ; mib->fddiPORTRequestedPaths[3] = MIB_P_PATH_LOCAL | MIB_P_PATH_CON_ALTER | MIB_P_PATH_PRIM_PREFER ; break ; case TM : mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ; mib->fddiPORTRequestedPaths[2] = MIB_P_PATH_LOCAL | MIB_P_PATH_SEC_ALTER | MIB_P_PATH_PRIM_ALTER ; mib->fddiPORTRequestedPaths[3] = 0 ; break ; } phy->pc_lem_fail = FALSE ; mib->fddiPORTPCMStateX = mib->fddiPORTPCMState ; mib->fddiPORTLCTFail_Ct = 0 ; mib->fddiPORTBS_Flag = 0 ; mib->fddiPORTCurrentPath = MIB_PATH_ISOLATED ; mib->fddiPORTNeighborType = TNONE ; phy->ls_flag = 0 ; phy->rc_flag = 0 ; phy->tc_flag = 0 ; phy->td_flag = 0 ; if (np >= PM) phy->phy_name = '0' + np - PM ; else phy->phy_name = 'A' + np ; phy->wc_flag = FALSE ; /* set by SMT */ memset((char *)&phy->lem,0,sizeof(struct lem_counter)) ; reset_lem_struct(phy) ; memset((char *)&phy->plc,0,sizeof(struct s_plc)) ; phy->plc.p_state = PS_OFF ; for (i = 0 ; i < NUMBITS ; i++) { phy->t_next[i] = 0 ; } } real_init_plc(smc) ; } void init_plc(struct s_smc *smc) { SK_UNUSED(smc) ; /* * dummy * this is an obsolete public entry point that has to remain * for compat. It is used by various drivers. * the work is now done in real_init_plc() * which is called from pcm_init() ; */ } static void real_init_plc(struct s_smc *smc) { int p ; for (p = 0 ; p < NUMPHYS ; p++) plc_init(smc,p) ; } static void plc_init(struct s_smc *smc, int p) { int i ; #ifndef MOT_ELM int rev ; /* Revision of PLC-x */ #endif /* MOT_ELM */ /* transit PCM state machine to MAINT state */ outpw(PLC(p,PL_CNTRL_B),0) ; outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ; outpw(PLC(p,PL_CNTRL_A),0) ; /* * if PLC-S then set control register C */ #ifndef MOT_ELM rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ; if (rev != PLC_REVISION_A) #endif /* MOT_ELM */ { if (smc->y[p].pmd_scramble) { outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ; #ifdef MOT_ELM outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ; outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ; #endif /* MOT_ELM */ } else { outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ; #ifdef MOT_ELM outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ; outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ; #endif /* MOT_ELM */ } } /* * set timer register */ for ( i = 0 ; pltm[i].timer; i++) /* set timer parameter reg */ outpw(PLC(p,pltm[i].timer),pltm[i].para) ; (void)inpw(PLC(p,PL_INTR_EVENT)) ; /* clear interrupt event reg */ plc_clear_irq(smc,p) ; outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */ /* * if PCM is configured for class s, it will NOT go to the * REMOVE state if offline (page 3-36;) * in the concentrator, all inactive PHYS always must be in * the remove state * there's no real need to use this feature at all .. */ #ifndef CONCENTRATOR if ((smc->s.sas == SMT_SAS) && (p == PS)) { outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ; } #endif } /* * control PCM state machine */ static void plc_go_state(struct s_smc *smc, int p, int state) { HW_PTR port ; int val ; SK_UNUSED(smc) ; port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ; val = inpw(port) & ~(PL_PCM_CNTRL | PL_MAINT) ; outpw(port,val) ; outpw(port,val | state) ; } /* * read current line state (called by ECM & PCM) */ int sm_pm_get_ls(struct s_smc *smc, int phy) { int state ; #ifdef CONCENTRATOR if (!plc_is_installed(smc,phy)) return PC_QLS; #endif state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ; switch(state) { case PL_L_QLS: state = PC_QLS ; break ; case PL_L_MLS: state = PC_MLS ; break ; case PL_L_HLS: state = PC_HLS ; break ; case PL_L_ILS4: case PL_L_ILS16: state = PC_ILS ; break ; case PL_L_ALS: state = PC_LS_PDR ; break ; default : state = PC_LS_NONE ; } return state; } static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len) { int np = phy->np ; /* PHY index */ int n ; int i ; SK_UNUSED(smc) ; /* create bit vector */ for (i = len-1,n = 0 ; i >= 0 ; i--) { n = (n<<1) | phy->t_val[phy->bitn+i] ; } if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) { #if 0 printf("PL_PCM_SIGNAL is set\n") ; #endif return 1; } /* write bit[n] & length = 1 to regs */ outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */ outpw(PLC(np,PL_XMIT_VECTOR),n) ; #ifdef DEBUG #if 1 #ifdef DEBUG_BRD if (smc->debug.d_plc & 0x80) #else if (debug.d_plc & 0x80) #endif printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ; #endif #endif return 0; } /* * config plc muxes */ void plc_config_mux(struct s_smc *smc, int mux) { if (smc->s.sas != SMT_DAS) return ; if (mux == MUX_WRAPB) { SETMASK(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ; SETMASK(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP,PL_SC_REM_LOOP) ; } else { CLEAR(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL) ; CLEAR(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP) ; } CLEAR(PLC(PB,PL_CNTRL_B),PL_CONFIG_CNTRL) ; CLEAR(PLC(PB,PL_CNTRL_A),PL_SC_REM_LOOP) ; } /* PCM state machine called by dispatcher & fddi_init() (driver) do display state change process event until SM is stable */ void pcm(struct s_smc *smc, const int np, int event) { int state ; int oldstate ; struct s_phy *phy ; struct fddi_mib_p *mib ; #ifndef CONCENTRATOR /* * ignore 2nd PHY if SAS */ if ((np != PS) && (smc->s.sas == SMT_SAS)) return ; #endif phy = &smc->y[np] ; mib = phy->mib ; oldstate = mib->fddiPORTPCMState ; do { DB_PCM("PCM %c: state %s", phy->phy_name, (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ; DB_PCM("%s, event %s\n", pcm_states[mib->fddiPORTPCMState & ~AFLAG], pcm_events[event]) ; state = mib->fddiPORTPCMState ; pcm_fsm(smc,phy,event) ; event = 0 ; } while (state != mib->fddiPORTPCMState) ; /* * because the PLC does the bit signaling for us, * we're always in SIGNAL state * the MIB want's to see CONNECT * we therefore fake an entry in the MIB */ if (state == PC5_SIGNAL) mib->fddiPORTPCMStateX = PC3_CONNECT ; else mib->fddiPORTPCMStateX = state ; #ifndef SLIM_SMT /* * path change */ if ( mib->fddiPORTPCMState != oldstate && ((oldstate == PC8_ACTIVE) || (mib->fddiPORTPCMState == PC8_ACTIVE))) { smt_srf_event(smc,SMT_EVENT_PORT_PATH_CHANGE, (int) (INDEX_PORT+ phy->np),0) ; } #endif #ifdef FDDI_MIB /* check whether a snmp-trap has to be sent */ if ( mib->fddiPORTPCMState != oldstate ) { /* a real state change took place */ DB_SNMP ("PCM from %d to %d\n", oldstate, mib->fddiPORTPCMState); if ( mib->fddiPORTPCMState == PC0_OFF ) { /* send first trap */ snmp_fddi_trap (smc, 1, (int) mib->fddiPORTIndex ); } else if ( oldstate == PC0_OFF ) { /* send second trap */ snmp_fddi_trap (smc, 2, (int) mib->fddiPORTIndex ); } else if ( mib->fddiPORTPCMState != PC2_TRACE && oldstate == PC8_ACTIVE ) { /* send third trap */ snmp_fddi_trap (smc, 3, (int) mib->fddiPORTIndex ); } else if ( mib->fddiPORTPCMState == PC8_ACTIVE ) { /* send fourth trap */ snmp_fddi_trap (smc, 4, (int) mib->fddiPORTIndex ); } } #endif pcm_state_change(smc,np,state) ; } /* * PCM state machine */ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) { int i ; int np = phy->np ; /* PHY index */ struct s_plc *plc ; struct fddi_mib_p *mib ; #ifndef MOT_ELM u_short plc_rev ; /* Revision of the plc */ #endif /* nMOT_ELM */ plc = &phy->plc ; mib = phy->mib ; /* * general transitions independent of state */ switch (cmd) { case PC_STOP : /*PC00-PC80*/ if (mib->fddiPORTPCMState != PC9_MAINT) { GO_STATE(PC0_OFF) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP, smt_get_port_event_word(smc)); } return ; case PC_START : /*PC01-PC81*/ if (mib->fddiPORTPCMState != PC9_MAINT) GO_STATE(PC1_BREAK) ; return ; case PC_DISABLE : /* PC09-PC99 */ GO_STATE(PC9_MAINT) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED, smt_get_port_event_word(smc)); return ; case PC_TIMEOUT_LCT : /* if long or extended LCT */ stop_pcm_timer0(smc,phy) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; /* end of LCT is indicate by PCM_CODE (initiate PCM event) */ return ; } switch(mib->fddiPORTPCMState) { case ACTIONS(PC0_OFF) : stop_pcm_timer0(smc,phy) ; outpw(PLC(np,PL_CNTRL_A),0) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; sm_ph_lem_stop(smc,np) ; /* disable LEM */ phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; plc_go_state(smc,np,PL_PCM_STOP) ; mib->fddiPORTConnectState = PCM_DISABLED ; ACTIONS_DONE() ; break ; case PC0_OFF: /*PC09*/ if (cmd == PC_MAINT) { GO_STATE(PC9_MAINT) ; break ; } break ; case ACTIONS(PC1_BREAK) : /* Stop the LCT timer if we came from Signal state */ stop_pcm_timer0(smc,phy) ; ACTIONS_DONE() ; plc_go_state(smc,np,0) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; sm_ph_lem_stop(smc,np) ; /* disable LEM */ /* * if vector is already loaded, go to OFF to clear PCM_SIGNAL */ #if 0 if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) { plc_go_state(smc,np,PL_PCM_STOP) ; /* TB_MIN ? */ } #endif /* * Go to OFF state in any case. */ plc_go_state(smc,np,PL_PCM_STOP) ; if (mib->fddiPORTPC_Withhold == PC_WH_NONE) mib->fddiPORTConnectState = PCM_CONNECTING ; phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; phy->ls_flag = FALSE ; phy->pc_mode = PM_NONE ; /* needed by CFM */ phy->bitn = 0 ; /* bit signaling start bit */ for (i = 0 ; i < 3 ; i++) pc_tcode_actions(smc,i,phy) ; /* Set the non-active interrupt mask register */ outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ; /* * If the LCT was stopped. There might be a * PCM_CODE interrupt event present. * This must be cleared. */ (void)inpw(PLC(np,PL_INTR_EVENT)) ; #ifndef MOT_ELM /* Get the plc revision for revision dependent code */ plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ; if (plc_rev != PLC_REV_SN3) #endif /* MOT_ELM */ { /* * No supernet III PLC, so set Xmit verctor and * length BEFORE starting the state machine. */ if (plc_send_bits(smc,phy,3)) { return ; } } /* * Now give the Start command. * - The start command shall be done before setting the bits * to be signaled. (In PLC-S description and PLCS in SN3. * - The start command shall be issued AFTER setting the * XMIT vector and the XMIT length register. * * We do it exactly according this specs for the old PLC and * the new PLCS inside the SN3. * For the usual PLCS we try it the way it is done for the * old PLC and set the XMIT registers again, if the PLC is * not in SIGNAL state. This is done according to an PLCS * errata workaround. */ plc_go_state(smc,np,PL_PCM_START) ; /* * workaround for PLC-S eng. sample errata */ #ifdef MOT_ELM if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL)) #else /* nMOT_ELM */ if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) != PLC_REVISION_A) && !(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL)) #endif /* nMOT_ELM */ { /* * Set register again (PLCS errata) or the first time * (new SN3 PLCS). */ (void) plc_send_bits(smc,phy,3) ; } /* * end of workaround */ GO_STATE(PC5_SIGNAL) ; plc->p_state = PS_BIT3 ; plc->p_bits = 3 ; plc->p_start = 0 ; break ; case PC1_BREAK : break ; case ACTIONS(PC2_TRACE) : plc_go_state(smc,np,PL_PCM_TRACE) ; ACTIONS_DONE() ; break ; case PC2_TRACE : break ; case PC3_CONNECT : /* these states are done by hardware */ case PC4_NEXT : break ; case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; switch (plc->p_state) { case PS_BIT3 : for (i = 0 ; i <= 2 ; i++) pc_rcode_actions(smc,i,phy) ; pc_tcode_actions(smc,3,phy) ; plc->p_state = PS_BIT4 ; plc->p_bits = 1 ; plc->p_start = 3 ; phy->bitn = 3 ; if (plc_send_bits(smc,phy,1)) { return ; } break ; case PS_BIT4 : pc_rcode_actions(smc,3,phy) ; for (i = 4 ; i <= 6 ; i++) pc_tcode_actions(smc,i,phy) ; plc->p_state = PS_BIT7 ; plc->p_bits = 3 ; plc->p_start = 4 ; phy->bitn = 4 ; if (plc_send_bits(smc,phy,3)) { return ; } break ; case PS_BIT7 : for (i = 3 ; i <= 6 ; i++) pc_rcode_actions(smc,i,phy) ; plc->p_state = PS_LCT ; plc->p_bits = 0 ; plc->p_start = 7 ; phy->bitn = 7 ; sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */ /* start LCT */ i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ; outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */ outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ; break ; case PS_LCT : /* check for local LCT failure */ pc_tcode_actions(smc,7,phy) ; /* * set tval[7] */ plc->p_state = PS_BIT8 ; plc->p_bits = 1 ; plc->p_start = 7 ; phy->bitn = 7 ; if (plc_send_bits(smc,phy,1)) { return ; } break ; case PS_BIT8 : /* check for remote LCT failure */ pc_rcode_actions(smc,7,phy) ; if (phy->t_val[7] || phy->r_val[7]) { plc_go_state(smc,np,PL_PCM_STOP) ; GO_STATE(PC1_BREAK) ; break ; } for (i = 8 ; i <= 9 ; i++) pc_tcode_actions(smc,i,phy) ; plc->p_state = PS_JOIN ; plc->p_bits = 2 ; plc->p_start = 8 ; phy->bitn = 8 ; if (plc_send_bits(smc,phy,2)) { return ; } break ; case PS_JOIN : for (i = 8 ; i <= 9 ; i++) pc_rcode_actions(smc,i,phy) ; plc->p_state = PS_ACTIVE ; GO_STATE(PC6_JOIN) ; break ; } break ; case ACTIONS(PC6_JOIN) : /* * prevent mux error when going from WRAP_A to WRAP_B */ if (smc->s.sas == SMT_DAS && np == PB && (smc->y[PA].pc_mode == PM_TREE || smc->y[PB].pc_mode == PM_TREE)) { SETMASK(PLC(np,PL_CNTRL_A), PL_SC_REM_LOOP,PL_SC_REM_LOOP) ; SETMASK(PLC(np,PL_CNTRL_B), PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ; } SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; ACTIONS_DONE() ; cmd = 0 ; /* fall thru */ case PC6_JOIN : switch (plc->p_state) { case PS_ACTIVE: /*PC88b*/ if (!phy->cf_join) { phy->cf_join = TRUE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; } if (cmd == PC_JOIN) GO_STATE(PC8_ACTIVE) ; /*PC82*/ if (cmd == PC_TRACE) { GO_STATE(PC2_TRACE) ; break ; } break ; } break ; case PC7_VERIFY : break ; case ACTIONS(PC8_ACTIVE) : /* * start LEM for SMT */ sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ; phy->tr_flag = FALSE ; mib->fddiPORTConnectState = PCM_ACTIVE ; /* Set the active interrupt mask register */ outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ; ACTIONS_DONE() ; break ; case PC8_ACTIVE : /*PC81 is done by PL_TNE_EXPIRED irq */ /*PC82*/ if (cmd == PC_TRACE) { GO_STATE(PC2_TRACE) ; break ; } /*PC88c: is done by TRACE_PROP irq */ break ; case ACTIONS(PC9_MAINT) : stop_pcm_timer0(smc,phy) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */ sm_ph_lem_stop(smc,np) ; /* disable LEM */ phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; plc_go_state(smc,np,PL_PCM_STOP) ; mib->fddiPORTConnectState = PCM_DISABLED ; SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ; sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ; outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ; ACTIONS_DONE() ; break ; case PC9_MAINT : DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ; /*PC90*/ if (cmd == PC_ENABLE) { GO_STATE(PC0_OFF) ; break ; } break ; default: SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ; break ; } } /* * force line state on a PHY output (only in MAINT state) */ static void sm_ph_linestate(struct s_smc *smc, int phy, int ls) { int cntrl ; SK_UNUSED(smc) ; cntrl = (inpw(PLC(phy,PL_CNTRL_B)) & ~PL_MAINT_LS) | PL_PCM_STOP | PL_MAINT ; switch(ls) { case PC_QLS: /* Force Quiet */ cntrl |= PL_M_QUI0 ; break ; case PC_MLS: /* Force Master */ cntrl |= PL_M_MASTR ; break ; case PC_HLS: /* Force Halt */ cntrl |= PL_M_HALT ; break ; default : case PC_ILS: /* Force Idle */ cntrl |= PL_M_IDLE ; break ; case PC_LS_PDR: /* Enable repeat filter */ cntrl |= PL_M_TPDR ; break ; } outpw(PLC(phy,PL_CNTRL_B),cntrl) ; } static void reset_lem_struct(struct s_phy *phy) { struct lem_counter *lem = &phy->lem ; phy->mib->fddiPORTLer_Estimate = 15 ; lem->lem_float_ber = 15 * 100 ; } /* * link error monitor */ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy) { int ber ; u_long errors ; struct lem_counter *lem = &phy->lem ; struct fddi_mib_p *mib ; int cond ; mib = phy->mib ; if (!lem->lem_on) return ; errors = inpw(PLC(((int) phy->np),PL_LINK_ERR_CTR)) ; lem->lem_errors += errors ; mib->fddiPORTLem_Ct += errors ; errors = lem->lem_errors ; /* * calculation is called on a intervall of 8 seconds * -> this means, that one error in 8 sec. is one of 8*125*10E6 * the same as BER = 10E-9 * Please note: * -> 9 errors in 8 seconds mean: * BER = 9 * 10E-9 and this is * < 10E-8, so the limit of 10E-8 is not reached! */ if (!errors) ber = 15 ; else if (errors <= 9) ber = 9 ; else if (errors <= 99) ber = 8 ; else if (errors <= 999) ber = 7 ; else if (errors <= 9999) ber = 6 ; else if (errors <= 99999) ber = 5 ; else if (errors <= 999999) ber = 4 ; else if (errors <= 9999999) ber = 3 ; else if (errors <= 99999999) ber = 2 ; else if (errors <= 999999999) ber = 1 ; else ber = 0 ; /* * weighted average */ ber *= 100 ; lem->lem_float_ber = lem->lem_float_ber * 7 + ber * 3 ; lem->lem_float_ber /= 10 ; mib->fddiPORTLer_Estimate = lem->lem_float_ber / 100 ; if (mib->fddiPORTLer_Estimate < 4) { mib->fddiPORTLer_Estimate = 4 ; } if (lem->lem_errors) { DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ; DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ; DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ; DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ; DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ; DB_PCMN(1,"avg. BER : 10E-%d\n", mib->fddiPORTLer_Estimate,0) ; } lem->lem_errors = 0L ; #ifndef SLIM_SMT cond = (mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Alarm) ? TRUE : FALSE ; #ifdef SMT_EXT_CUTOFF smt_ler_alarm_check(smc,phy,cond) ; #endif /* nSMT_EXT_CUTOFF */ if (cond != mib->fddiPORTLerFlag) { smt_srf_event(smc,SMT_COND_PORT_LER, (int) (INDEX_PORT+ phy->np) ,cond) ; } #endif if ( mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Cutoff) { phy->pc_lem_fail = TRUE ; /* flag */ mib->fddiPORTLem_Reject_Ct++ ; /* * "forgive 10e-2" if we cutoff so we can come * up again .. */ lem->lem_float_ber += 2*100 ; /*PC81b*/ #ifdef CONCENTRATOR DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n", phy->np, mib->fddiPORTLer_Cutoff) ; #endif #ifdef SMT_EXT_CUTOFF smt_port_off_event(smc,phy->np); #else /* nSMT_EXT_CUTOFF */ queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ; #endif /* nSMT_EXT_CUTOFF */ } } /* * called by SMT to calculate LEM bit error rate */ void sm_lem_evaluate(struct s_smc *smc) { int np ; for (np = 0 ; np < NUMPHYS ; np++) lem_evaluate(smc,&smc->y[np]) ; } static void lem_check_lct(struct s_smc *smc, struct s_phy *phy) { struct lem_counter *lem = &phy->lem ; struct fddi_mib_p *mib ; int errors ; mib = phy->mib ; phy->pc_lem_fail = FALSE ; /* flag */ errors = inpw(PLC(((int)phy->np),PL_LINK_ERR_CTR)) ; lem->lem_errors += errors ; mib->fddiPORTLem_Ct += errors ; if (lem->lem_errors) { switch(phy->lc_test) { case LC_SHORT: if (lem->lem_errors >= smc->s.lct_short) phy->pc_lem_fail = TRUE ; break ; case LC_MEDIUM: if (lem->lem_errors >= smc->s.lct_medium) phy->pc_lem_fail = TRUE ; break ; case LC_LONG: if (lem->lem_errors >= smc->s.lct_long) phy->pc_lem_fail = TRUE ; break ; case LC_EXTENDED: if (lem->lem_errors >= smc->s.lct_extended) phy->pc_lem_fail = TRUE ; break ; } DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ; } if (phy->pc_lem_fail) { mib->fddiPORTLCTFail_Ct++ ; mib->fddiPORTLem_Reject_Ct++ ; } else mib->fddiPORTLCTFail_Ct = 0 ; } /* * LEM functions */ static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold) { struct lem_counter *lem = &smc->y[np].lem ; lem->lem_on = 1 ; lem->lem_errors = 0L ; /* Do NOT reset mib->fddiPORTLer_Estimate here. It is called too * often. */ outpw(PLC(np,PL_LE_THRESHOLD),threshold) ; (void)inpw(PLC(np,PL_LINK_ERR_CTR)) ; /* clear error counter */ /* enable LE INT */ SETMASK(PLC(np,PL_INTR_MASK),PL_LE_CTR,PL_LE_CTR) ; } static void sm_ph_lem_stop(struct s_smc *smc, int np) { struct lem_counter *lem = &smc->y[np].lem ; lem->lem_on = 0 ; CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; } /* ARGSUSED */ void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off) /* int on_off; en- or disable ident. ls */ { SK_UNUSED(smc) ; phy = phy ; on_off = on_off ; } /* * PCM pseudo code * receive actions are called AFTER the bit n is received, * i.e. if pc_rcode_actions(5) is called, bit 6 is the next bit to be received */ /* * PCM pseudo code 5.1 .. 6.1 */ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) { struct fddi_mib_p *mib ; mib = phy->mib ; DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ; bit++ ; switch(bit) { case 0: case 1: case 2: break ; case 3 : if (phy->r_val[1] == 0 && phy->r_val[2] == 0) mib->fddiPORTNeighborType = TA ; else if (phy->r_val[1] == 0 && phy->r_val[2] == 1) mib->fddiPORTNeighborType = TB ; else if (phy->r_val[1] == 1 && phy->r_val[2] == 0) mib->fddiPORTNeighborType = TS ; else if (phy->r_val[1] == 1 && phy->r_val[2] == 1) mib->fddiPORTNeighborType = TM ; break ; case 4: if (mib->fddiPORTMy_Type == TM && mib->fddiPORTNeighborType == TM) { DB_PCMN(1,"PCM %c : E100 withhold M-M\n", phy->phy_name,0) ; mib->fddiPORTPC_Withhold = PC_WH_M_M ; RS_SET(smc,RS_EVENT) ; } else if (phy->t_val[3] || phy->r_val[3]) { mib->fddiPORTPC_Withhold = PC_WH_NONE ; if (mib->fddiPORTMy_Type == TM || mib->fddiPORTNeighborType == TM) phy->pc_mode = PM_TREE ; else phy->pc_mode = PM_PEER ; /* reevaluate the selection criteria (wc_flag) */ all_selection_criteria (smc); if (phy->wc_flag) { mib->fddiPORTPC_Withhold = PC_WH_PATH ; } } else { mib->fddiPORTPC_Withhold = PC_WH_OTHER ; RS_SET(smc,RS_EVENT) ; DB_PCMN(1,"PCM %c : E101 withhold other\n", phy->phy_name,0) ; } phy->twisted = ((mib->fddiPORTMy_Type != TS) && (mib->fddiPORTMy_Type != TM) && (mib->fddiPORTNeighborType == mib->fddiPORTMy_Type)) ; if (phy->twisted) { DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n", phy->phy_name,0) ; } break ; case 5 : break ; case 6: if (phy->t_val[4] || phy->r_val[4]) { if ((phy->t_val[4] && phy->t_val[5]) || (phy->r_val[4] && phy->r_val[5]) ) phy->lc_test = LC_EXTENDED ; else phy->lc_test = LC_LONG ; } else if (phy->t_val[5] || phy->r_val[5]) phy->lc_test = LC_MEDIUM ; else phy->lc_test = LC_SHORT ; switch (phy->lc_test) { case LC_SHORT : /* 50ms */ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ; phy->t_next[7] = smc->s.pcm_lc_short ; break ; case LC_MEDIUM : /* 500ms */ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ; phy->t_next[7] = smc->s.pcm_lc_medium ; break ; case LC_LONG : SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ; phy->t_next[7] = smc->s.pcm_lc_long ; break ; case LC_EXTENDED : SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ; phy->t_next[7] = smc->s.pcm_lc_extended ; break ; } if (phy->t_next[7] > smc->s.pcm_lc_medium) { start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy); } DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ; phy->t_next[9] = smc->s.pcm_t_next_9 ; break ; case 7: if (phy->t_val[6]) { phy->cf_loop = TRUE ; } phy->td_flag = TRUE ; break ; case 8: if (phy->t_val[7] || phy->r_val[7]) { DB_PCMN(1,"PCM %c : E103 LCT fail %s\n", phy->phy_name,phy->t_val[7]? "local":"remote") ; queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ; } break ; case 9: if (phy->t_val[8] || phy->r_val[8]) { if (phy->t_val[8]) phy->cf_loop = TRUE ; phy->td_flag = TRUE ; } break ; case 10: if (phy->r_val[9]) { /* neighbor intends to have MAC on output */ ; mib->fddiPORTMacIndicated.R_val = TRUE ; } else { /* neighbor does not intend to have MAC on output */ ; mib->fddiPORTMacIndicated.R_val = FALSE ; } break ; } } /* * PCM pseudo code 5.1 .. 6.1 */ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy) { int np = phy->np ; struct fddi_mib_p *mib ; mib = phy->mib ; switch(bit) { case 0: phy->t_val[0] = 0 ; /* no escape used */ break ; case 1: if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM) phy->t_val[1] = 1 ; else phy->t_val[1] = 0 ; break ; case 2 : if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM) phy->t_val[2] = 1 ; else phy->t_val[2] = 0 ; break ; case 3: { int type,ne ; int policy ; type = mib->fddiPORTMy_Type ; ne = mib->fddiPORTNeighborType ; policy = smc->mib.fddiSMTConnectionPolicy ; phy->t_val[3] = 1 ; /* Accept connection */ switch (type) { case TA : if ( ((policy & POLICY_AA) && ne == TA) || ((policy & POLICY_AB) && ne == TB) || ((policy & POLICY_AS) && ne == TS) || ((policy & POLICY_AM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TB : if ( ((policy & POLICY_BA) && ne == TA) || ((policy & POLICY_BB) && ne == TB) || ((policy & POLICY_BS) && ne == TS) || ((policy & POLICY_BM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TS : if ( ((policy & POLICY_SA) && ne == TA) || ((policy & POLICY_SB) && ne == TB) || ((policy & POLICY_SS) && ne == TS) || ((policy & POLICY_SM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TM : if ( ne == TM || ((policy & POLICY_MA) && ne == TA) || ((policy & POLICY_MB) && ne == TB) || ((policy & POLICY_MS) && ne == TS) || ((policy & POLICY_MM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; } #ifndef SLIM_SMT /* * detect undesirable connection attempt event */ if ( (type == TA && ne == TA ) || (type == TA && ne == TS ) || (type == TB && ne == TB ) || (type == TB && ne == TS ) || (type == TS && ne == TA ) || (type == TS && ne == TB ) ) { smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION, (int) (INDEX_PORT+ phy->np) ,0) ; } #endif } break ; case 4: if (mib->fddiPORTPC_Withhold == PC_WH_NONE) { if (phy->pc_lem_fail) { phy->t_val[4] = 1 ; /* long */ phy->t_val[5] = 0 ; } else { phy->t_val[4] = 0 ; if (mib->fddiPORTLCTFail_Ct > 0) phy->t_val[5] = 1 ; /* medium */ else phy->t_val[5] = 0 ; /* short */ /* * Implementers choice: use medium * instead of short when undesired * connection attempt is made. */ if (phy->wc_flag) phy->t_val[5] = 1 ; /* medium */ } mib->fddiPORTConnectState = PCM_CONNECTING ; } else { mib->fddiPORTConnectState = PCM_STANDBY ; phy->t_val[4] = 1 ; /* extended */ phy->t_val[5] = 1 ; } break ; case 5: break ; case 6: /* we do NOT have a MAC for LCT */ phy->t_val[6] = 0 ; break ; case 7: phy->cf_loop = FALSE ; lem_check_lct(smc,phy) ; if (phy->pc_lem_fail) { DB_PCMN(1,"PCM %c : E104 LCT failed\n", phy->phy_name,0) ; phy->t_val[7] = 1 ; } else phy->t_val[7] = 0 ; break ; case 8: phy->t_val[8] = 0 ; /* Don't request MAC loopback */ break ; case 9: phy->cf_loop = 0 ; if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) || ((smc->s.sas == SMT_DAS) && (phy->wc_flag))) { queue_event(smc,EVENT_PCM+np,PC_START) ; break ; } phy->t_val[9] = FALSE ; switch (smc->s.sas) { case SMT_DAS : /* * MAC intended on output */ if (phy->pc_mode == PM_TREE) { if ((np == PB) || ((np == PA) && (smc->y[PB].mib->fddiPORTConnectState != PCM_ACTIVE))) phy->t_val[9] = TRUE ; } else { if (np == PB) phy->t_val[9] = TRUE ; } break ; case SMT_SAS : if (np == PS) phy->t_val[9] = TRUE ; break ; #ifdef CONCENTRATOR case SMT_NAC : /* * MAC intended on output */ if (np == PB) phy->t_val[9] = TRUE ; break ; #endif } mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; break ; } DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ; } /* * return status twisted (called by SMT) */ int pcm_status_twisted(struct s_smc *smc) { int twist = 0 ; if (smc->s.sas != SMT_DAS) return 0; if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE)) twist |= 1 ; if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE)) twist |= 2 ; return twist; } /* * return status (called by SMT) * type * state * remote phy type * remote mac yes/no */ void pcm_status_state(struct s_smc *smc, int np, int *type, int *state, int *remote, int *mac) { struct s_phy *phy = &smc->y[np] ; struct fddi_mib_p *mib ; mib = phy->mib ; /* remote PHY type and MAC - set only if active */ *mac = 0 ; *type = mib->fddiPORTMy_Type ; /* our PHY type */ *state = mib->fddiPORTConnectState ; *remote = mib->fddiPORTNeighborType ; switch(mib->fddiPORTPCMState) { case PC8_ACTIVE : *mac = mib->fddiPORTMacIndicated.R_val ; break ; } } /* * return rooted station status (called by SMT) */ int pcm_rooted_station(struct s_smc *smc) { int n ; for (n = 0 ; n < NUMPHYS ; n++) { if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE && smc->y[n].mib->fddiPORTNeighborType == TM) return 0; } return 1; } /* * Interrupt actions for PLC & PCM events */ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) /* int np; PHY index */ { struct s_phy *phy = &smc->y[np] ; struct s_plc *plc = &phy->plc ; int n ; #ifdef SUPERNET_3 int corr_mask ; #endif /* SUPERNET_3 */ int i ; if (np >= smc->s.numphys) { plc->soft_err++ ; return ; } if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/ /* * Check whether the SRF Condition occurred. */ if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){ /* * This is the real Elasticity Error. * More than one in a row are treated as a * single one. * Only count this in the active state. */ phy->mib->fddiPORTEBError_Ct ++ ; } plc->ebuf_err++ ; if (plc->ebuf_cont <= 1000) { /* * Prevent counter from being wrapped after * hanging years in that interrupt. */ plc->ebuf_cont++ ; /* Ebuf continuous error */ } #ifdef SUPERNET_3 if (plc->ebuf_cont == 1000 && ((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) == PLC_REV_SN3)) { /* * This interrupt remeained high for at least * 1000 consecutive interrupt calls. * * This is caused by a hardware error of the * ORION part of the Supernet III chipset. * * Disable this bit from the mask. */ corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ; outpw(PLC(np,PL_INTR_MASK),corr_mask); /* * Disconnect from the ring. * Call the driver with the reset indication. */ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ; /* * Make an error log entry. */ SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ; /* * Indicate the Reset. */ drv_reset_indication(smc) ; } #endif /* SUPERNET_3 */ } else { /* Reset the continuous error variable */ plc->ebuf_cont = 0 ; /* reset Ebuf continuous error */ } if (cmd & PL_PHYINV) { /* physical layer invalid signal */ plc->phyinv++ ; } if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/ plc->vsym_ctr++ ; } if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/ plc->mini_ctr++ ; } if (cmd & PL_LE_CTR) { /* link error event counter */ int j ; /* * note: PL_LINK_ERR_CTR MUST be read to clear it */ j = inpw(PLC(np,PL_LE_THRESHOLD)) ; i = inpw(PLC(np,PL_LINK_ERR_CTR)) ; if (i < j) { /* wrapped around */ i += 256 ; } if (phy->lem.lem_on) { /* Note: Lem errors shall only be counted when * link is ACTIVE or LCT is active. */ phy->lem.lem_errors += i ; phy->mib->fddiPORTLem_Ct += i ; } } if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */ if (plc->p_state == PS_LCT) { /* * end of LCT */ ; } plc->tpc_exp++ ; } if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/ switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) { case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ; case PL_I_HALT : phy->curr_ls = PC_HLS ; break ; case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ; case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ; } } if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */ int reason; reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ; switch (reason) { case PL_B_PCS : plc->b_pcs++ ; break ; case PL_B_TPC : plc->b_tpc++ ; break ; case PL_B_TNE : plc->b_tne++ ; break ; case PL_B_QLS : plc->b_qls++ ; break ; case PL_B_ILS : plc->b_ils++ ; break ; case PL_B_HLS : plc->b_hls++ ; break ; } /*jd 05-Aug-1999 changed: Bug #10419 */ DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag); if (smc->e.DisconnectFlag == FALSE) { DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason); queue_event(smc,EVENT_PCM+np,PC_START) ; } else { DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason); } return ; } /* * If both CODE & ENABLE are set ignore enable */ if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */ queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ; n = inpw(PLC(np,PL_RCV_VECTOR)) ; for (i = 0 ; i < plc->p_bits ; i++) { phy->r_val[plc->p_start+i] = n & 1 ; n >>= 1 ; } } else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/ queue_event(smc,EVENT_PCM+np,PC_JOIN) ; } if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */ /*PC22b*/ if (!phy->tr_flag) { DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n", np,smc->mib.fddiSMTECMState) ; phy->tr_flag = TRUE ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; } } /* * filter PLC glitch ??? * QLS || HLS only while in PC2_TRACE state */ if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) { /*PC22a*/ if (smc->e.path_test == PT_PASSED) { DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np), phy->mib->fddiPORTPCMState) ; smc->e.path_test = PT_PENDING ; queue_event(smc,EVENT_ECM,EC_PATH_TEST) ; } } if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */ /* break_required (TNE > NS_Max) */ if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) { if (!phy->tr_flag) { DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE"); queue_event(smc,EVENT_PCM+np,PC_START) ; return ; } } } #if 0 if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/ /* * It's a bug by AMD */ plc->np_err++ ; } /* pin inactiv (GND) */ if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */ plc->parity_err++ ; } if (cmd & PL_LSDO) { /* carrier detected */ ; } #endif } #ifdef DEBUG /* * fill state struct */ void pcm_get_state(struct s_smc *smc, struct smt_state *state) { struct s_phy *phy ; struct pcm_state *pcs ; int i ; int ii ; short rbits ; short tbits ; struct fddi_mib_p *mib ; for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ; i++ , phy++, pcs++ ) { mib = phy->mib ; pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ; pcs->pcm_state = (u_char) mib->fddiPORTPCMState ; pcs->pcm_mode = phy->pc_mode ; pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ; pcs->pcm_bsf = mib->fddiPORTBS_Flag ; pcs->pcm_lsf = phy->ls_flag ; pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ; pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ; for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) { rbits <<= 1 ; tbits <<= 1 ; if (phy->r_val[NUMBITS-1-ii]) rbits |= 1 ; if (phy->t_val[NUMBITS-1-ii]) tbits |= 1 ; } pcs->pcm_r_val = rbits ; pcs->pcm_t_val = tbits ; } } int get_pcm_state(struct s_smc *smc, int np) { int pcs ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) { case PL_PC0 : pcs = PC_STOP ; break ; case PL_PC1 : pcs = PC_START ; break ; case PL_PC2 : pcs = PC_TRACE ; break ; case PL_PC3 : pcs = PC_SIGNAL ; break ; case PL_PC4 : pcs = PC_SIGNAL ; break ; case PL_PC5 : pcs = PC_SIGNAL ; break ; case PL_PC6 : pcs = PC_JOIN ; break ; case PL_PC7 : pcs = PC_JOIN ; break ; case PL_PC8 : pcs = PC_ENABLE ; break ; case PL_PC9 : pcs = PC_MAINT ; break ; default : pcs = PC_DISABLE ; break ; } return pcs; } char *get_linestate(struct s_smc *smc, int np) { char *ls = "" ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) { case PL_L_NLS : ls = "NOISE" ; break ; case PL_L_ALS : ls = "ACTIV" ; break ; case PL_L_UND : ls = "UNDEF" ; break ; case PL_L_ILS4: ls = "ILS 4" ; break ; case PL_L_QLS : ls = "QLS" ; break ; case PL_L_MLS : ls = "MLS" ; break ; case PL_L_HLS : ls = "HLS" ; break ; case PL_L_ILS16:ls = "ILS16" ; break ; #ifdef lint default: ls = "unknown" ; break ; #endif } return ls; } char *get_pcmstate(struct s_smc *smc, int np) { char *pcs ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) { case PL_PC0 : pcs = "OFF" ; break ; case PL_PC1 : pcs = "BREAK" ; break ; case PL_PC2 : pcs = "TRACE" ; break ; case PL_PC3 : pcs = "CONNECT"; break ; case PL_PC4 : pcs = "NEXT" ; break ; case PL_PC5 : pcs = "SIGNAL" ; break ; case PL_PC6 : pcs = "JOIN" ; break ; case PL_PC7 : pcs = "VERIFY" ; break ; case PL_PC8 : pcs = "ACTIV" ; break ; case PL_PC9 : pcs = "MAINT" ; break ; default : pcs = "UNKNOWN" ; break ; } return pcs; } void list_phy(struct s_smc *smc) { struct s_plc *plc ; int np ; for (np = 0 ; np < NUMPHYS ; np++) { plc = &smc->y[np].plc ; printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ; printf("\tsoft_error: %ld \t\tPC_Start : %ld\n", plc->soft_err,plc->b_pcs); printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n", plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ; printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n", plc->ebuf_err,plc->b_tne) ; printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n", plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ; printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n", plc->vsym_ctr,plc->b_ils) ; printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n", plc->mini_ctr,plc->b_hls) ; printf("\tnodepr_err: %ld\n",plc->np_err) ; printf("\tTPC_exp : %ld\n",plc->tpc_exp) ; printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ; } } #ifdef CONCENTRATOR void pcm_lem_dump(struct s_smc *smc) { int i ; struct s_phy *phy ; struct fddi_mib_p *mib ; char *entostring() ; printf("PHY errors BER\n") ; printf("----------------------\n") ; for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) { if (!plc_is_installed(smc,i)) continue ; mib = phy->mib ; printf("%s\t%ld\t10E-%d\n", entostring(smc,ENTITY_PHY(i)), mib->fddiPORTLem_Ct, mib->fddiPORTLer_Estimate) ; } } #endif #endif
gpl-2.0
cjdoucette/XIA-for-Linux
kernel/watchdog.c
138
19391
/* * Detect hard and soft lockups on a system * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * * Note: Most of this code is borrowed heavily from the original softlockup * detector, so thanks to Ingo for the initial implementation. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks * to those contributors as well. */ #define pr_fmt(fmt) "NMI watchdog: " fmt #include <linux/mm.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/smpboot.h> #include <linux/sched/rt.h> #include <asm/irq_regs.h> #include <linux/kvm_para.h> #include <linux/perf_event.h> int watchdog_user_enabled = 1; int __read_mostly watchdog_thresh = 10; #ifdef CONFIG_SMP int __read_mostly sysctl_softlockup_all_cpu_backtrace; #else #define sysctl_softlockup_all_cpu_backtrace 0 #endif static int __read_mostly watchdog_running; static u64 __read_mostly sample_period; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); #ifdef CONFIG_HARDLOCKUP_DETECTOR static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif static unsigned long soft_lockup_nmi_warn; /* boot commands */ /* * Should we panic when a soft-lockup or hard-lockup occurs: */ #ifdef CONFIG_HARDLOCKUP_DETECTOR static int hardlockup_panic = CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; static bool hardlockup_detector_enabled = true; /* * We may not want to enable hard lockup detection by default in all cases, * for example when running the kernel as a guest on a hypervisor. In these * cases this function can be called to disable hard lockup detection. This * function should only be executed once by the boot processor before the * kernel command line parameters are parsed, because otherwise it is not * possible to override this in hardlockup_panic_setup(). */ void watchdog_enable_hardlockup_detector(bool val) { hardlockup_detector_enabled = val; } bool watchdog_hardlockup_detector_is_enabled(void) { return hardlockup_detector_enabled; } static int __init hardlockup_panic_setup(char *str) { if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; else if (!strncmp(str, "nopanic", 7)) hardlockup_panic = 0; else if (!strncmp(str, "0", 1)) watchdog_user_enabled = 0; else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) { /* * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option) * has the same effect. */ watchdog_user_enabled = 1; watchdog_enable_hardlockup_detector(true); } return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); #endif unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; static int __init softlockup_panic_setup(char *str) { softlockup_panic = simple_strtoul(str, NULL, 0); return 1; } __setup("softlockup_panic=", softlockup_panic_setup); static int __init nowatchdog_setup(char *str) { watchdog_user_enabled = 0; return 1; } __setup("nowatchdog", nowatchdog_setup); /* deprecated */ static int __init nosoftlockup_setup(char *str) { watchdog_user_enabled = 0; return 1; } __setup("nosoftlockup", nosoftlockup_setup); /* */ #ifdef CONFIG_SMP static int __init softlockup_all_cpu_backtrace_setup(char *str) { sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); return 1; } __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); #endif /* * Hard-lockup warnings should be triggered after just a few seconds. Soft- * lockups can have false positives under extreme conditions. So we generally * want a higher threshold for soft lockups than for hard lockups. So we couple * the thresholds with a factor: we make the soft threshold twice the amount of * time the hard threshold is. */ static int get_softlockup_thresh(void) { return watchdog_thresh * 2; } /* * Returns seconds, approximately. We don't need nanosecond * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ static unsigned long get_timestamp(void) { return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ } static void set_sample_period(void) { /* * convert watchdog_thresh from seconds to ns * the divide by 5 is to give hrtimer several chances (two * or three with the current relation between the soft * and hard thresholds) to increment before the * hardlockup detector generates a warning */ sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ static void __touch_watchdog(void) { __this_cpu_write(watchdog_touch_ts, get_timestamp()); } void touch_softlockup_watchdog(void) { /* * Preemption can be enabled. It doesn't matter which CPU's timestamp * gets zeroed here, so use the raw_ operation. */ raw_cpu_write(watchdog_touch_ts, 0); } EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) { int cpu; /* * this is done lockless * do we care if a 0 races with a timestamp? * all it means is the softlock check starts one cycle later */ for_each_online_cpu(cpu) per_cpu(watchdog_touch_ts, cpu) = 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR void touch_nmi_watchdog(void) { /* * Using __raw here because some code paths have * preemption enabled. If preemption is enabled * then interrupts should be enabled too, in which * case we shouldn't have to worry about the watchdog * going off. */ raw_cpu_write(watchdog_nmi_touch, true); touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); #endif void touch_softlockup_watchdog_sync(void) { __this_cpu_write(softlockup_touch_sync, true); __this_cpu_write(watchdog_touch_ts, 0); } #ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ static int is_hardlockup(void) { unsigned long hrint = __this_cpu_read(hrtimer_interrupts); if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) return 1; __this_cpu_write(hrtimer_interrupts_saved, hrint); return 0; } #endif static int is_softlockup(unsigned long touch_ts) { unsigned long now = get_timestamp(); /* Warn about unreasonable delays: */ if (time_after(now, touch_ts + get_softlockup_thresh())) return now - touch_ts; return 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .size = sizeof(struct perf_event_attr), .pinned = 1, .disabled = 1, }; /* Callback function for perf event subsystem */ static void watchdog_overflow_callback(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; } /* check for a hardlockup * This is done by making sure our timer interrupt * is incrementing. The timer interrupt should have * fired multiple times before we overflow'd. If it hasn't * then this is a good indication the cpu is stuck */ if (is_hardlockup()) { int this_cpu = smp_processor_id(); /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; if (hardlockup_panic) panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); __this_cpu_write(hard_watchdog_warn, true); return; } __this_cpu_write(hard_watchdog_warn, false); return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ static void watchdog_interrupt_count(void) { __this_cpu_inc(hrtimer_interrupts); } static int watchdog_nmi_enable(unsigned int cpu); static void watchdog_nmi_disable(unsigned int cpu); /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); struct pt_regs *regs = get_irq_regs(); int duration; int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; /* kick the hardlockup detector */ watchdog_interrupt_count(); /* kick the softlockup detector */ wake_up_process(__this_cpu_read(softlockup_watchdog)); /* .. and repeat */ hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); if (touch_ts == 0) { if (unlikely(__this_cpu_read(softlockup_touch_sync))) { /* * If the time stamp was touched atomically * make sure the scheduler tick is up to date. */ __this_cpu_write(softlockup_touch_sync, false); sched_clock_tick(); } /* Clear the guest paused flag on watchdog reset */ kvm_check_and_clear_guest_paused(); __touch_watchdog(); return HRTIMER_RESTART; } /* check for a softlockup * This is done by making sure a high priority task is * being scheduled. The task touches the watchdog to * indicate it is getting cpu time. If it hasn't then * this is a good indication some task is hogging the cpu */ duration = is_softlockup(touch_ts); if (unlikely(duration)) { /* * If a virtual machine is stopped by the host it can look to * the watchdog like a soft lockup, check to see if the host * stopped the vm before we issue the warning */ if (kvm_check_and_clear_guest_paused()) return HRTIMER_RESTART; /* only warn once */ if (__this_cpu_read(soft_watchdog_warn) == true) { /* * When multiple processes are causing softlockups the * softlockup detector only warns on the first one * because the code relies on a full quiet cycle to * re-arm. The second process prevents the quiet cycle * and never gets reported. Use task pointers to detect * this. */ if (__this_cpu_read(softlockup_task_ptr_saved) != current) { __this_cpu_write(soft_watchdog_warn, false); __touch_watchdog(); } return HRTIMER_RESTART; } if (softlockup_all_cpu_backtrace) { /* Prevent multiple soft-lockup reports if one cpu is already * engaged in dumping cpu back traces */ if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { /* Someone else will report us. Let's give up */ __this_cpu_write(soft_watchdog_warn, true); return HRTIMER_RESTART; } } pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", smp_processor_id(), duration, current->comm, task_pid_nr(current)); __this_cpu_write(softlockup_task_ptr_saved, current); print_modules(); print_irqtrace_events(current); if (regs) show_regs(regs); else dump_stack(); if (softlockup_all_cpu_backtrace) { /* Avoid generating two back traces for current * given that one is already made above */ trigger_allbutself_cpu_backtrace(); clear_bit(0, &soft_lockup_nmi_warn); /* Barrier to sync with other cpus */ smp_mb__after_atomic(); } add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); if (softlockup_panic) panic("softlockup: hung tasks"); __this_cpu_write(soft_watchdog_warn, true); } else __this_cpu_write(soft_watchdog_warn, false); return HRTIMER_RESTART; } static void watchdog_set_prio(unsigned int policy, unsigned int prio) { struct sched_param param = { .sched_priority = prio }; sched_setscheduler(current, policy, &param); } static void watchdog_enable(unsigned int cpu) { struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; /* Enable the perf event */ watchdog_nmi_enable(cpu); /* done here because hrtimer_start can only pin to smp_processor_id() */ hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); /* initialize timestamp */ watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); __touch_watchdog(); } static void watchdog_disable(unsigned int cpu) { struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); watchdog_set_prio(SCHED_NORMAL, 0); hrtimer_cancel(hrtimer); /* disable the perf event */ watchdog_nmi_disable(cpu); } static void watchdog_cleanup(unsigned int cpu, bool online) { watchdog_disable(cpu); } static int watchdog_should_run(unsigned int cpu) { return __this_cpu_read(hrtimer_interrupts) != __this_cpu_read(soft_lockup_hrtimer_cnt); } /* * The watchdog thread function - touches the timestamp. * * It only runs once every sample_period seconds (4 seconds by * default) to reset the softlockup timestamp. If this gets delayed * for more than 2*watchdog_thresh seconds then the debug-printout * triggers in watchdog_timer_fn(). */ static void watchdog(unsigned int cpu) { __this_cpu_write(soft_lockup_hrtimer_cnt, __this_cpu_read(hrtimer_interrupts)); __touch_watchdog(); } #ifdef CONFIG_HARDLOCKUP_DETECTOR /* * People like the simple clean cpu node info on boot. * Reduce the watchdog noise by only printing messages * that are different from what cpu0 displayed. */ static unsigned long cpu0_err; static int watchdog_nmi_enable(unsigned int cpu) { struct perf_event_attr *wd_attr; struct perf_event *event = per_cpu(watchdog_ev, cpu); /* * Some kernels need to default hard lockup detection to * 'disabled', for example a guest on a hypervisor. */ if (!watchdog_hardlockup_detector_is_enabled()) { event = ERR_PTR(-ENOENT); goto handle_err; } /* is it already setup and enabled? */ if (event && event->state > PERF_EVENT_STATE_OFF) goto out; /* it is setup but not enabled */ if (event != NULL) goto out_enable; wd_attr = &wd_hw_attr; wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); /* Try to register using hardware perf events */ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); handle_err: /* save cpu0 error for future comparision */ if (cpu == 0 && IS_ERR(event)) cpu0_err = PTR_ERR(event); if (!IS_ERR(event)) { /* only print for cpu0 or different than cpu0 */ if (cpu == 0 || cpu0_err) pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); goto out_save; } /* skip displaying the same error again */ if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) return PTR_ERR(event); /* vary the KERN level based on the returned errno */ if (PTR_ERR(event) == -EOPNOTSUPP) pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); else if (PTR_ERR(event) == -ENOENT) pr_warn("disabled (cpu%i): hardware events not enabled\n", cpu); else pr_err("disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ out_save: per_cpu(watchdog_ev, cpu) = event; out_enable: perf_event_enable(per_cpu(watchdog_ev, cpu)); out: return 0; } static void watchdog_nmi_disable(unsigned int cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) { perf_event_disable(event); per_cpu(watchdog_ev, cpu) = NULL; /* should be in cleanup, but blocks oprofile */ perf_event_release_kernel(event); } if (cpu == 0) { /* watchdog_nmi_enable() expects this to be zero initially. */ cpu0_err = 0; } } #else static int watchdog_nmi_enable(unsigned int cpu) { return 0; } static void watchdog_nmi_disable(unsigned int cpu) { return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ static struct smp_hotplug_thread watchdog_threads = { .store = &softlockup_watchdog, .thread_should_run = watchdog_should_run, .thread_fn = watchdog, .thread_comm = "watchdog/%u", .setup = watchdog_enable, .cleanup = watchdog_cleanup, .park = watchdog_disable, .unpark = watchdog_enable, }; static void restart_watchdog_hrtimer(void *info) { struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); int ret; /* * No need to cancel and restart hrtimer if it is currently executing * because it will reprogram itself with the new period now. * We should never see it unqueued here because we are running per-cpu * with interrupts disabled. */ ret = hrtimer_try_to_cancel(hrtimer); if (ret == 1) hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); } static void update_timers(int cpu) { /* * Make sure that perf event counter will adopt to a new * sampling period. Updating the sampling period directly would * be much nicer but we do not have an API for that now so * let's use a big hammer. * Hrtimer will adopt the new period on the next tick but this * might be late already so we have to restart the timer as well. */ watchdog_nmi_disable(cpu); smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1); watchdog_nmi_enable(cpu); } static void update_timers_all_cpus(void) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) update_timers(cpu); put_online_cpus(); } static int watchdog_enable_all_cpus(bool sample_period_changed) { int err = 0; if (!watchdog_running) { err = smpboot_register_percpu_thread(&watchdog_threads); if (err) pr_err("Failed to create watchdog threads, disabled\n"); else watchdog_running = 1; } else if (sample_period_changed) { update_timers_all_cpus(); } return err; } /* prepare/enable/disable routines */ /* sysctl functions */ #ifdef CONFIG_SYSCTL static void watchdog_disable_all_cpus(void) { if (watchdog_running) { watchdog_running = 0; smpboot_unregister_percpu_thread(&watchdog_threads); } } /* * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh */ int proc_dowatchdog(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err, old_thresh, old_enabled; bool old_hardlockup; static DEFINE_MUTEX(watchdog_proc_mutex); mutex_lock(&watchdog_proc_mutex); old_thresh = ACCESS_ONCE(watchdog_thresh); old_enabled = ACCESS_ONCE(watchdog_user_enabled); old_hardlockup = watchdog_hardlockup_detector_is_enabled(); err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (err || !write) goto out; set_sample_period(); /* * Watchdog threads shouldn't be enabled if they are * disabled. The 'watchdog_running' variable check in * watchdog_*_all_cpus() function takes care of this. */ if (watchdog_user_enabled && watchdog_thresh) { /* * Prevent a change in watchdog_thresh accidentally overriding * the enablement of the hardlockup detector. */ if (watchdog_user_enabled != old_enabled) watchdog_enable_hardlockup_detector(true); err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); } else watchdog_disable_all_cpus(); /* Restore old values on failure */ if (err) { watchdog_thresh = old_thresh; watchdog_user_enabled = old_enabled; watchdog_enable_hardlockup_detector(old_hardlockup); } out: mutex_unlock(&watchdog_proc_mutex); return err; } #endif /* CONFIG_SYSCTL */ void __init lockup_detector_init(void) { set_sample_period(); if (watchdog_user_enabled) watchdog_enable_all_cpus(false); }
gpl-2.0
Lloir/android_kernel_htc_enrc2b-bladev2
drivers/media/video/ivtv/ivtv-ioctl.c
394
53604
/* ioctl system call Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-version.h" #include "ivtv-mailbox.h" #include "ivtv-i2c.h" #include "ivtv-queue.h" #include "ivtv-fileops.h" #include "ivtv-vbi.h" #include "ivtv-routing.h" #include "ivtv-streams.h" #include "ivtv-yuv.h" #include "ivtv-ioctl.h" #include "ivtv-gpio.h" #include "ivtv-controls.h" #include "ivtv-cards.h" #include <media/saa7127.h> #include <media/tveeprom.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-event.h> #include <linux/dvb/audio.h> u16 ivtv_service2vbi(int type) { switch (type) { case V4L2_SLICED_TELETEXT_B: return IVTV_SLICED_TYPE_TELETEXT_B; case V4L2_SLICED_CAPTION_525: return IVTV_SLICED_TYPE_CAPTION_525; case V4L2_SLICED_WSS_625: return IVTV_SLICED_TYPE_WSS_625; case V4L2_SLICED_VPS: return IVTV_SLICED_TYPE_VPS; default: return 0; } } static int valid_service_line(int field, int line, int is_pal) { return (is_pal && line >= 6 && (line != 23 || field == 0)) || (!is_pal && line >= 10 && line < 22); } static u16 select_service_from_set(int field, int line, u16 set, int is_pal) { u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525); int i; set = set & valid_set; if (set == 0 || !valid_service_line(field, line, is_pal)) { return 0; } if (!is_pal) { if (line == 21 && (set & V4L2_SLICED_CAPTION_525)) return V4L2_SLICED_CAPTION_525; } else { if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS)) return V4L2_SLICED_VPS; if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625)) return V4L2_SLICED_WSS_625; if (line == 23) return 0; } for (i = 0; i < 32; i++) { if ((1 << i) & set) return 1 << i; } return 0; } void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { u16 set = fmt->service_set; int f, l; fmt->service_set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); } } } static void check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { int f, l; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); } } } u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt) { int f, l; u16 set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { set |= fmt->service_lines[f][l]; } } return set; } void ivtv_set_osd_alpha(struct ivtv *itv) { ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3, itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state); ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_chroma_key_state, itv->osd_chroma_key); } int ivtv_set_speed(struct ivtv *itv, int speed) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; int single_step = (speed == 1 || speed == -1); DEFINE_WAIT(wait); if (speed == 0) speed = 1000; /* No change? */ if (speed == itv->speed && !single_step) return 0; s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; if (single_step && (speed < 0) == (itv->speed < 0)) { /* Single step video and no need to change direction */ ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); itv->speed = speed; return 0; } if (single_step) /* Need to change direction */ speed = speed < 0 ? -1000 : 1000; data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0; data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0; data[1] = (speed < 0); data[2] = speed < 0 ? 3 : 7; data[3] = v4l2_ctrl_g_ctrl(itv->cxhdl.video_b_frames); data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0; data[5] = 0; data[6] = 0; if (speed == 1500 || speed == -1500) data[0] |= 1; else if (speed == 2000 || speed == -2000) data[0] |= 2; else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed); else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed); /* If not decoding, just change speed setting */ if (atomic_read(&itv->decoding) > 0) { int got_sig = 0; /* Stop all DMA and decoding activity */ ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); /* Wait for any DMA to finish */ prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { got_sig = signal_pending(current); if (got_sig) break; got_sig = 0; schedule(); } finish_wait(&itv->dma_waitq, &wait); if (got_sig) return -EINTR; /* Change Speed safely */ ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data); IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", data[0], data[1], data[2], data[3], data[4], data[5], data[6]); } if (single_step) { speed = (speed < 0) ? -1 : 1; ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); } itv->speed = speed; return 0; } static int ivtv_validate_speed(int cur_speed, int new_speed) { int fact = new_speed < 0 ? -1 : 1; int s; if (cur_speed == 0) cur_speed = 1000; if (new_speed < 0) new_speed = -new_speed; if (cur_speed < 0) cur_speed = -cur_speed; if (cur_speed <= new_speed) { if (new_speed > 1500) return fact * 2000; if (new_speed > 1000) return fact * 1500; } else { if (new_speed >= 2000) return fact * 2000; if (new_speed >= 1500) return fact * 1500; if (new_speed >= 1000) return fact * 1000; } if (new_speed == 0) return 1000; if (new_speed == 1 || new_speed == 1000) return fact * new_speed; s = new_speed; new_speed = 1000 / new_speed; if (1000 / cur_speed == new_speed) new_speed += (cur_speed < s) ? -1 : 1; if (new_speed > 60) return 1000 / (fact * 60); return 1000 / (fact * new_speed); } static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id, struct video_command *vc, int try) { struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; switch (vc->cmd) { case VIDEO_CMD_PLAY: { vc->flags = 0; vc->play.speed = ivtv_validate_speed(itv->speed, vc->play.speed); if (vc->play.speed < 0) vc->play.format = VIDEO_PLAY_FMT_GOP; if (try) break; if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { /* forces ivtv_set_speed to be called */ itv->speed = 0; } return ivtv_start_decoding(id, vc->play.speed); } case VIDEO_CMD_STOP: vc->flags &= VIDEO_CMD_STOP_IMMEDIATELY|VIDEO_CMD_STOP_TO_BLACK; if (vc->flags & VIDEO_CMD_STOP_IMMEDIATELY) vc->stop.pts = 0; if (try) break; if (atomic_read(&itv->decoding) == 0) return 0; if (itv->output_mode != OUT_MPG) return -EBUSY; itv->output_mode = OUT_NONE; return ivtv_stop_v4l2_decode_stream(s, vc->flags, vc->stop.pts); case VIDEO_CMD_FREEZE: vc->flags &= VIDEO_CMD_FREEZE_TO_BLACK; if (try) break; if (itv->output_mode != OUT_MPG) return -EBUSY; if (atomic_read(&itv->decoding) > 0) { ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, (vc->flags & VIDEO_CMD_FREEZE_TO_BLACK) ? 1 : 0); set_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); } break; case VIDEO_CMD_CONTINUE: vc->flags = 0; if (try) break; if (itv->output_mode != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { int speed = itv->speed; itv->speed = 0; return ivtv_start_decoding(id, speed); } break; default: return -EINVAL; } return 0; } static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; if (itv->is_60hz) { vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525; vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625; vbifmt->service_lines[0][16] = V4L2_SLICED_VPS; } vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; pixfmt->width = itv->cxhdl.width; pixfmt->height = itv->cxhdl.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { pixfmt->pixelformat = V4L2_PIX_FMT_HM12; /* YUV size is (Y=(h*720) + UV=(h*(720/2))) */ pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2; pixfmt->bytesperline = 720; } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi; vbifmt->sampling_rate = 27000000; vbifmt->offset = 248; vbifmt->samples_per_line = itv->vbi.raw_decoder_line_size - 4; vbifmt->sample_format = V4L2_PIX_FMT_GREY; vbifmt->start[0] = itv->vbi.start[0]; vbifmt->start[1] = itv->vbi.start[1]; vbifmt->count[0] = vbifmt->count[1] = itv->vbi.count; vbifmt->flags = 0; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; return 0; } static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) { vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; ivtv_expand_service_set(vbifmt, itv->is_50hz); return 0; } v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; pixfmt->width = itv->main_rect.width; pixfmt->height = itv->main_rect.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_DEC_STREAM_TYPE_YUV) { switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) { case IVTV_YUV_MODE_INTERLACED: pixfmt->field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ? V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB; break; case IVTV_YUV_MODE_PROGRESSIVE: pixfmt->field = V4L2_FIELD_NONE; break; default: pixfmt->field = V4L2_FIELD_ANY; break; } pixfmt->pixelformat = V4L2_PIX_FMT_HM12; pixfmt->bytesperline = 720; pixfmt->width = itv->yuv_info.v4l2_src_w; pixfmt->height = itv->yuv_info.v4l2_src_h; /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ pixfmt->sizeimage = 1080 * ((pixfmt->height + 31) & ~31); } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_window *winfmt = &fmt->fmt.win; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; winfmt->chromakey = itv->osd_chroma_key; winfmt->global_alpha = itv->osd_global_alpha; winfmt->field = V4L2_FIELD_INTERLACED; winfmt->clips = NULL; winfmt->clipcount = 0; winfmt->bitmap = NULL; winfmt->w.top = winfmt->w.left = 0; winfmt->w.width = itv->osd_rect.width; winfmt->w.height = itv->osd_rect.height; return 0; } static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; int min_h = 2; w = min(w, 720); w = max(w, 2); if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { /* YUV height must be a multiple of 32 */ h &= ~0x1f; min_h = 32; } h = min(h, itv->is_50hz ? 576 : 480); h = max(h, min_h); ivtv_g_fmt_vid_cap(file, fh, fmt); fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return 0; } static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) return ivtv_g_fmt_sliced_vbi_cap(file, fh, fmt); /* set sliced VBI capture format */ vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (vbifmt->service_set) ivtv_expand_service_set(vbifmt, itv->is_50hz); check_service_set(vbifmt, itv->is_50hz); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); s32 w = fmt->fmt.pix.width; s32 h = fmt->fmt.pix.height; int field = fmt->fmt.pix.field; int ret = ivtv_g_fmt_vid_out(file, fh, fmt); w = min(w, 720); w = max(w, 2); /* Why can the height be 576 even when the output is NTSC? Internally the buffers of the PVR350 are always set to 720x576. The decoded video frame will always be placed in the top left corner of this buffer. For any video which is not 720x576, the buffer will then be cropped to remove the unused right and lower areas, with the remaining image being scaled by the hardware to fit the display area. The video can be scaled both up and down, so a 720x480 video can be displayed full-screen on PAL and a 720x576 video can be displayed without cropping on NTSC. Note that the scaling only occurs on the video stream, the osd resolution is locked to the broadcast standard and not scaled. Thanks to Ian Armstrong for this explanation. */ h = min(h, 576); h = max(h, 2); if (id->type == IVTV_DEC_STREAM_TYPE_YUV) fmt->fmt.pix.field = field; fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return ret; } static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; ivtv_g_fmt_vid_out_overlay(file, fh, fmt); fmt->fmt.win.chromakey = chromakey; fmt->fmt.win.global_alpha = global_alpha; return 0; } static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_mbus_framefmt mbus_fmt; int ret = ivtv_try_fmt_vid_cap(file, fh, fmt); int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; if (ret) return ret; if (itv->cxhdl.width == w && itv->cxhdl.height == h) return 0; if (atomic_read(&itv->capturing) > 0) return -EBUSY; itv->cxhdl.width = w; itv->cxhdl.height = h; if (v4l2_ctrl_g_ctrl(itv->cxhdl.video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) fmt->fmt.pix.width /= 2; mbus_fmt.width = fmt->fmt.pix.width; mbus_fmt.height = h; mbus_fmt.code = V4L2_MBUS_FMT_FIXED; v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt); return ivtv_g_fmt_vid_cap(file, fh, fmt); } static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.sliced_in->service_set = 0; itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi); return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt); if (ret || id->type == IVTV_DEC_STREAM_TYPE_VBI) return ret; check_service_set(vbifmt, itv->is_50hz); if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt); memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in)); return 0; } static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int ret = ivtv_try_fmt_vid_out(file, fh, fmt); if (ret) return ret; if (id->type != IVTV_DEC_STREAM_TYPE_YUV) return 0; /* Return now if we already have some frame data */ if (yi->stream_size) return -EBUSY; yi->v4l2_src_w = fmt->fmt.pix.width; yi->v4l2_src_h = fmt->fmt.pix.height; switch (fmt->fmt.pix.field) { case V4L2_FIELD_NONE: yi->lace_mode = IVTV_YUV_MODE_PROGRESSIVE; break; case V4L2_FIELD_ANY: yi->lace_mode = IVTV_YUV_MODE_AUTO; break; case V4L2_FIELD_INTERLACED_BT: yi->lace_mode = IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD; break; case V4L2_FIELD_INTERLACED_TB: default: yi->lace_mode = IVTV_YUV_MODE_INTERLACED; break; } yi->lace_sync_field = (yi->lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1; if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) itv->dma_data_req_size = 1080 * ((yi->v4l2_src_h + 31) & ~31); return 0; } static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt); if (ret == 0) { itv->osd_chroma_key = fmt->fmt.win.chromakey; itv->osd_global_alpha = fmt->fmt.win.global_alpha; ivtv_set_osd_alpha(itv); } return ret; } static int ivtv_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct ivtv *itv = fh2id(fh)->itv; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type == V4L2_CHIP_MATCH_HOST) { if (v4l2_chip_match_host(&chip->match)) chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416; return 0; } if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; /* TODO: is this correct? */ return ivtv_call_all_err(itv, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg) { struct v4l2_dbg_register *regs = arg; volatile u8 __iomem *reg_start; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (regs->reg >= IVTV_REG_OFFSET && regs->reg < IVTV_REG_OFFSET + IVTV_REG_SIZE) reg_start = itv->reg_mem - IVTV_REG_OFFSET; else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET && regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE) reg_start = itv->dec_mem - IVTV_DECODER_OFFSET; else if (regs->reg < IVTV_ENCODER_SIZE) reg_start = itv->enc_mem; else return -EINVAL; regs->size = 4; if (cmd == VIDIOC_DBG_G_REGISTER) regs->val = readl(regs->reg + reg_start); else writel(regs->val, regs->reg + reg_start); return 0; } static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) return ivtv_itvc(itv, VIDIOC_DBG_G_REGISTER, reg); /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, g_register, reg); return 0; } static int ivtv_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) return ivtv_itvc(itv, VIDIOC_DBG_S_REGISTER, reg); /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, s_register, reg); return 0; } #endif static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap) { struct ivtv *itv = fh2id(fh)->itv; strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver)); strlcpy(vcap->card, itv->card_name, sizeof(vcap->card)); snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev)); vcap->capabilities = itv->v4l2_cap; /* capabilities */ return 0; } static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = itv->audio_input; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_s_audio(struct file *file, void *fh, struct v4l2_audio *vout) { struct ivtv *itv = fh2id(fh)->itv; if (vout->index >= itv->nof_audio_inputs) return -EINVAL; itv->audio_input = vout->index; ivtv_audio_set_io(itv); return 0; } static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = 0; return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_s_audout(struct file *file, void *fh, struct v4l2_audioout *vout) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_audio_output(itv, vout->index, vout); } static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_input(itv, vin->index, vin); } static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_output(itv, vout->index, vout); } static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; cropcap->bounds.top = cropcap->bounds.left = 0; cropcap->bounds.width = 720; if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { cropcap->bounds.height = itv->is_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_50hz ? 54 : 11; } else if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { if (yi->track_osd) { cropcap->bounds.width = yi->osd_full_w; cropcap->bounds.height = yi->osd_full_h; } else { cropcap->bounds.width = 720; cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; } cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } else { cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } cropcap->defrect = cropcap->bounds; return 0; } static int ivtv_s_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { yi->main_rect = crop->c; return 0; } else { if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, crop->c.width, crop->c.height, crop->c.left, crop->c.top)) { itv->main_rect = crop->c; return 0; } } return -EINVAL; } return -EINVAL; } static int ivtv_g_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) crop->c = yi->main_rect; else crop->c = itv->main_rect; return 0; } return -EINVAL; } static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static struct v4l2_fmtdesc formats[] = { { 0, 0, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }, { 1, 0, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } } }; enum v4l2_buf_type type = fmt->type; if (fmt->index > 1) return -EINVAL; *fmt = formats[fmt->index]; fmt->type = type; return 0; } static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { struct ivtv *itv = fh2id(fh)->itv; static struct v4l2_fmtdesc formats[] = { { 0, 0, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }, { 1, 0, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } } }; enum v4l2_buf_type type = fmt->type; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (fmt->index > 1) return -EINVAL; *fmt = formats[fmt->index]; fmt->type = type; return 0; } static int ivtv_g_input(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; *i = itv->active_input; return 0; } int ivtv_s_input(struct file *file, void *fh, unsigned int inp) { struct ivtv *itv = fh2id(fh)->itv; if (inp < 0 || inp >= itv->nof_inputs) return -EINVAL; if (inp == itv->active_input) { IVTV_DEBUG_INFO("Input unchanged\n"); return 0; } if (atomic_read(&itv->capturing) > 0) { return -EBUSY; } IVTV_DEBUG_INFO("Changing input from %d to %d\n", itv->active_input, inp); itv->active_input = inp; /* Set the audio input to whatever is appropriate for the input type. */ itv->audio_input = itv->card->video_inputs[inp].audio_index; /* prevent others from messing with the streams until we're finished changing inputs. */ ivtv_mute(itv); ivtv_video_set_io(itv); ivtv_audio_set_io(itv); ivtv_unmute(itv); return 0; } static int ivtv_g_output(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; *i = itv->active_output; return 0; } static int ivtv_s_output(struct file *file, void *fh, unsigned int outp) { struct ivtv *itv = fh2id(fh)->itv; if (outp >= itv->card->nof_outputs) return -EINVAL; if (outp == itv->active_output) { IVTV_DEBUG_INFO("Output unchanged\n"); return 0; } IVTV_DEBUG_INFO("Changing output from %d to %d\n", itv->active_output, outp); itv->active_output = outp; ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_NORMAL, itv->card->video_outputs[outp].video_output, 0); return 0; } static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; if (vf->tuner != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_frequency, vf); return 0; } int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; if (vf->tuner != 0) return -EINVAL; ivtv_mute(itv); IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency); ivtv_call_all(itv, tuner, s_frequency, vf); ivtv_unmute(itv); return 0; } static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct ivtv *itv = fh2id(fh)->itv; *std = itv->std; return 0; } void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std) { itv->std = *std; itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0; itv->is_50hz = !itv->is_60hz; cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz); itv->cxhdl.width = 720; itv->cxhdl.height = itv->is_50hz ? 576 : 480; itv->vbi.count = itv->is_50hz ? 18 : 12; itv->vbi.start[0] = itv->is_50hz ? 6 : 10; itv->vbi.start[1] = itv->is_50hz ? 318 : 273; if (itv->hw_flags & IVTV_HW_CX25840) itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; /* Tuner */ ivtv_call_all(itv, core, s_std, itv->std); } void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std) { struct yuv_playback_info *yi = &itv->yuv_info; DEFINE_WAIT(wait); int f; /* set display standard */ itv->std_out = *std; itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0; itv->is_out_50hz = !itv->is_out_60hz; ivtv_call_all(itv, video, s_std_output, itv->std_out); /* * The next firmware call is time sensitive. Time it to * avoid risk of a hard lock, by trying to ensure the call * happens within the first 100 lines of the top field. * Make 4 attempts to sync to the decoder before giving up. */ for (f = 0; f < 4; f++) { prepare_to_wait(&itv->vsync_waitq, &wait, TASK_UNINTERRUPTIBLE); if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) break; schedule_timeout(msecs_to_jiffies(25)); } finish_wait(&itv->vsync_waitq, &wait); if (f == 4) IVTV_WARN("Mode change failed to sync to decoder\n"); ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz); itv->main_rect.left = 0; itv->main_rect.top = 0; itv->main_rect.width = 720; itv->main_rect.height = itv->is_out_50hz ? 576 : 480; ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, 720, itv->main_rect.height, 0, 0); yi->main_rect = itv->main_rect; if (!itv->osd_info) { yi->osd_full_w = 720; yi->osd_full_h = itv->is_out_50hz ? 576 : 480; } } int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std) { struct ivtv *itv = fh2id(fh)->itv; if ((*std & V4L2_STD_ALL) == 0) return -EINVAL; if (*std == itv->std) return 0; if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) || atomic_read(&itv->capturing) > 0 || atomic_read(&itv->decoding) > 0) { /* Switching standard would mess with already running streams, prevent that by returning EBUSY. */ return -EBUSY; } IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std); ivtv_s_std_enc(itv, std); if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) ivtv_s_std_dec(itv, std); return 0; } static int ivtv_s_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, s_tuner, vt); return 0; } static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct ivtv *itv = fh2id(fh)->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_tuner, vt); if (vt->type == V4L2_TUNER_RADIO) strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name)); else strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name)); return 0; } static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct ivtv *itv = fh2id(fh)->itv; int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; int f, l; if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) { for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { if (valid_service_line(f, l, itv->is_50hz)) cap->service_lines[f][l] = set; } } return 0; } if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; if (itv->is_60hz) { cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525; cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { cap->service_lines[0][23] = V4L2_SLICED_WSS_625; cap->service_lines[0][16] = V4L2_SLICED_VPS; } return 0; } return -EINVAL; } static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_enc_idx_entry *e = idx->entry; int entries; int i; entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) % IVTV_MAX_PGM_INDEX; if (entries > V4L2_ENC_IDX_ENTRIES) entries = V4L2_ENC_IDX_ENTRIES; idx->entries = 0; for (i = 0; i < entries; i++) { *e = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX]; if ((e->flags & V4L2_ENC_IDX_FRAME_MASK) <= V4L2_ENC_IDX_FRAME_B) { idx->entries++; e++; } } itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX; return 0; } static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return ivtv_start_capture(id); case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END); return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_mute(itv); ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0); break; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1); ivtv_unmute(itv); break; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } return 0; } static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv *itv = fh2id(fh)->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; return 0; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } } static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; struct yuv_playback_info *yi = &itv->yuv_info; int pixfmt; static u32 pixel_format[16] = { V4L2_PIX_FMT_PAL8, /* Uses a 256-entry RGB colormap */ V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_RGB32, 0, 0, 0, V4L2_PIX_FMT_PAL8, /* Uses a 256-entry YUV colormap */ V4L2_PIX_FMT_YUV565, V4L2_PIX_FMT_YUV555, V4L2_PIX_FMT_YUV444, V4L2_PIX_FMT_YUV32, 0, 0, 0, }; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_GLOBAL_ALPHA; ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; pixfmt = (data[0] >> 3) & 0xf; fb->fmt.pixelformat = pixel_format[pixfmt]; fb->fmt.width = itv->osd_rect.width; fb->fmt.height = itv->osd_rect.height; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.bytesperline = fb->fmt.width; fb->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.priv = 0; if (fb->fmt.pixelformat != V4L2_PIX_FMT_PAL8) fb->fmt.bytesperline *= 2; if (fb->fmt.pixelformat == V4L2_PIX_FMT_RGB32 || fb->fmt.pixelformat == V4L2_PIX_FMT_YUV32) fb->fmt.bytesperline *= 2; fb->fmt.sizeimage = fb->fmt.bytesperline * fb->fmt.height; fb->base = (void *)itv->osd_video_pbase; fb->flags = 0; if (itv->osd_chroma_key_state) fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY; if (itv->osd_global_alpha_state) fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA; if (yi->track_osd) fb->flags |= V4L2_FBUF_FLAG_OVERLAY; pixfmt &= 7; /* no local alpha for RGB565 or unknown formats */ if (pixfmt == 1 || pixfmt > 4) return 0; /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->capability |= V4L2_FBUF_CAP_LOCAL_INV_ALPHA; else fb->capability |= V4L2_FBUF_CAP_LOCAL_ALPHA; if (itv->osd_local_alpha_state) { /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->flags |= V4L2_FBUF_FLAG_LOCAL_INV_ALPHA; else fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; } return 0; } static int ivtv_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0; itv->osd_local_alpha_state = (fb->flags & (V4L2_FBUF_FLAG_LOCAL_ALPHA|V4L2_FBUF_FLAG_LOCAL_INV_ALPHA)) != 0; itv->osd_chroma_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0; ivtv_set_osd_alpha(itv); yi->track_osd = (fb->flags & V4L2_FBUF_FLAG_OVERLAY) != 0; return ivtv_g_fbuf(file, fh, fb); } static int ivtv_overlay(struct file *file, void *fh, unsigned int on) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, on != 0); return 0; } static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_VSYNC: case V4L2_EVENT_EOS: case V4L2_EVENT_CTRL: return v4l2_event_subscribe(fh, sub, 0); default: return -EINVAL; } } static int ivtv_log_status(struct file *file, void *fh) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT; struct v4l2_input vidin; struct v4l2_audio audin; int i; IVTV_INFO("================= START STATUS CARD #%d =================\n", itv->instance); IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name); if (itv->hw_flags & IVTV_HW_TVEEPROM) { struct tveeprom tv; ivtv_read_eeprom(itv, &tv); } ivtv_call_all(itv, core, log_status); ivtv_get_input(itv, itv->active_input, &vidin); ivtv_get_audio_input(itv, itv->audio_input, &audin); IVTV_INFO("Video Input: %s\n", vidin.name); IVTV_INFO("Audio Input: %s%s\n", audin.name, (itv->dualwatch_stereo_mode & ~0x300) == 0x200 ? " (Bilingual)" : ""); if (has_output) { struct v4l2_output vidout; struct v4l2_audioout audout; int mode = itv->output_mode; static const char * const output_modes[5] = { "None", "MPEG Streaming", "YUV Streaming", "YUV Frames", "Passthrough", }; static const char * const audio_modes[5] = { "Stereo", "Left", "Right", "Mono", "Swapped" }; static const char * const alpha_mode[4] = { "None", "Global", "Local", "Global and Local" }; static const char * const pixel_format[16] = { "ARGB Indexed", "RGB 5:6:5", "ARGB 1:5:5:5", "ARGB 1:4:4:4", "ARGB 8:8:8:8", "5", "6", "7", "AYUV Indexed", "YUV 5:6:5", "AYUV 1:5:5:5", "AYUV 1:4:4:4", "AYUV 8:8:8:8", "13", "14", "15", }; ivtv_get_output(itv, itv->active_output, &vidout); ivtv_get_audio_output(itv, 0, &audout); IVTV_INFO("Video Output: %s\n", vidout.name); IVTV_INFO("Audio Output: %s (Stereo/Bilingual: %s/%s)\n", audout.name, audio_modes[itv->audio_stereo_mode], audio_modes[itv->audio_bilingual_mode]); if (mode < 0 || mode > OUT_PASSTHROUGH) mode = OUT_NONE; IVTV_INFO("Output Mode: %s\n", output_modes[mode]); ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; IVTV_INFO("Overlay: %s, Alpha: %s, Pixel Format: %s\n", data[0] & 1 ? "On" : "Off", alpha_mode[(data[0] >> 1) & 0x3], pixel_format[(data[0] >> 3) & 0xf]); } IVTV_INFO("Tuner: %s\n", test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV"); v4l2_ctrl_handler_log_status(&itv->cxhdl.hdl, itv->v4l2_dev.name); IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags); for (i = 0; i < IVTV_MAX_STREAMS; i++) { struct ivtv_stream *s = &itv->streams[i]; if (s->vdev == NULL || s->buffers == 0) continue; IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags, (s->buffers - s->q_free.buffers) * 100 / s->buffers, (s->buffers * s->buf_size) / 1024, s->buffers); } IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n", (long long)itv->mpg_data_received, (long long)itv->vbi_data_inserted); IVTV_INFO("================== END STATUS CARD #%d ==================\n", itv->instance); return 0; } static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg) { struct ivtv_open_id *id = fh2id(filp->private_data); struct ivtv *itv = id->itv; int nonblocking = filp->f_flags & O_NONBLOCK; struct ivtv_stream *s = &itv->streams[id->type]; unsigned long iarg = (unsigned long)arg; switch (cmd) { case IVTV_IOC_DMA_FRAME: { struct ivtv_dma_frame *args = arg; IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL) return 0; if (ivtv_start_decoding(id, id->type)) { return -EBUSY; } if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) { ivtv_release_stream(s); return -EBUSY; } /* Mark that this file handle started the UDMA_YUV mode */ id->yuv_frames = 1; if (args->y_source == NULL) return 0; return ivtv_yuv_prep_frame(itv, args); } case VIDEO_GET_PTS: { u32 data[CX2341X_MBOX_MAX_DATA]; u64 *pts = arg; IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *pts = s->dma_pts; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) { *pts = (u64) ((u64)itv->last_dec_timing[2] << 32) | (u64)itv->last_dec_timing[1]; break; } *pts = 0; if (atomic_read(&itv->decoding)) { if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) { IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n"); return -EIO; } memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing)); set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); *pts = (u64) ((u64) data[2] << 32) | (u64) data[1]; /*timing->scr = (u64) (((u64) data[4] << 32) | (u64) (data[3]));*/ } break; } case VIDEO_GET_FRAME_COUNT: { u32 data[CX2341X_MBOX_MAX_DATA]; u64 *frame = arg; IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *frame = 0; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) { *frame = itv->last_dec_timing[0]; break; } *frame = 0; if (atomic_read(&itv->decoding)) { if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) { IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n"); return -EIO; } memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing)); set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); *frame = data[0]; } break; } case VIDEO_PLAY: { struct video_command vc; IVTV_DEBUG_IOCTL("VIDEO_PLAY\n"); memset(&vc, 0, sizeof(vc)); vc.cmd = VIDEO_CMD_PLAY; return ivtv_video_command(itv, id, &vc, 0); } case VIDEO_STOP: { struct video_command vc; IVTV_DEBUG_IOCTL("VIDEO_STOP\n"); memset(&vc, 0, sizeof(vc)); vc.cmd = VIDEO_CMD_STOP; vc.flags = VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY; return ivtv_video_command(itv, id, &vc, 0); } case VIDEO_FREEZE: { struct video_command vc; IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n"); memset(&vc, 0, sizeof(vc)); vc.cmd = VIDEO_CMD_FREEZE; return ivtv_video_command(itv, id, &vc, 0); } case VIDEO_CONTINUE: { struct video_command vc; IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n"); memset(&vc, 0, sizeof(vc)); vc.cmd = VIDEO_CMD_CONTINUE; return ivtv_video_command(itv, id, &vc, 0); } case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: { struct video_command *vc = arg; int try = (cmd == VIDEO_TRY_COMMAND); if (try) IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND %d\n", vc->cmd); else IVTV_DEBUG_IOCTL("VIDEO_COMMAND %d\n", vc->cmd); return ivtv_video_command(itv, id, vc, try); } case VIDEO_GET_EVENT: { struct video_event *ev = arg; DEFINE_WAIT(wait); IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; memset(ev, 0, sizeof(*ev)); set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); while (1) { if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) ev->type = VIDEO_EVENT_DECODER_STOPPED; else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) { ev->type = VIDEO_EVENT_VSYNC; ev->u.vsync_field = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ? VIDEO_VSYNC_FIELD_ODD : VIDEO_VSYNC_FIELD_EVEN; if (itv->output_mode == OUT_UDMA_YUV && (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) == IVTV_YUV_MODE_PROGRESSIVE) { ev->u.vsync_field = VIDEO_VSYNC_FIELD_PROGRESSIVE; } } if (ev->type) return 0; if (nonblocking) return -EAGAIN; /* Wait for event. Note that serialize_lock is locked, so to allow other processes to access the driver while we are waiting unlock first and later lock again. */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE); if (!test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags) && !test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) schedule(); finish_wait(&itv->event_waitq, &wait); mutex_lock(&itv->serialize_lock); if (signal_pending(current)) { /* return if a signal was received */ IVTV_DEBUG_INFO("User stopped wait for event\n"); return -EINTR; } } break; } case VIDEO_SELECT_SOURCE: IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX); case AUDIO_SET_MUTE: IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n"); itv->speed_mute_audio = iarg; return 0; case AUDIO_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; itv->audio_stereo_mode = iarg; ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode); return 0; case AUDIO_BILINGUAL_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; itv->audio_bilingual_mode = iarg; ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode); return 0; default: return -EINVAL; } return 0; } static long ivtv_default(struct file *file, void *fh, bool valid_prio, int cmd, void *arg) { struct ivtv *itv = fh2id(fh)->itv; if (!valid_prio) { switch (cmd) { case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return -EBUSY; } } switch (cmd) { case VIDIOC_INT_RESET: { u32 val = *(u32 *)arg; if ((val == 0 && itv->options.newi2c) || (val & 0x01)) ivtv_reset_ir_gpio(itv); if (val & 0x02) v4l2_subdev_call(itv->sd_video, core, reset, 0); break; } case IVTV_IOC_DMA_FRAME: case VIDEO_GET_PTS: case VIDEO_GET_FRAME_COUNT: case VIDEO_GET_EVENT: case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return ivtv_decoder_ioctls(file, cmd, (void *)arg); default: return -EINVAL; } return 0; } static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp, unsigned int cmd, unsigned long arg) { struct video_device *vfd = video_devdata(filp); long ret; if (ivtv_debug & IVTV_DBGFLG_IOCTL) vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG; ret = video_ioctl2(filp, cmd, arg); vfd->debug = 0; return ret; } long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ivtv_open_id *id = fh2id(filp->private_data); struct ivtv *itv = id->itv; long res; /* DQEVENT can block, so this should not run with the serialize lock */ if (cmd == VIDIOC_DQEVENT) return ivtv_serialized_ioctl(itv, filp, cmd, arg); mutex_lock(&itv->serialize_lock); res = ivtv_serialized_ioctl(itv, filp, cmd, arg); mutex_unlock(&itv->serialize_lock); return res; } static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { .vidioc_querycap = ivtv_querycap, .vidioc_s_audio = ivtv_s_audio, .vidioc_g_audio = ivtv_g_audio, .vidioc_enumaudio = ivtv_enumaudio, .vidioc_s_audout = ivtv_s_audout, .vidioc_g_audout = ivtv_g_audout, .vidioc_enum_input = ivtv_enum_input, .vidioc_enum_output = ivtv_enum_output, .vidioc_enumaudout = ivtv_enumaudout, .vidioc_cropcap = ivtv_cropcap, .vidioc_s_crop = ivtv_s_crop, .vidioc_g_crop = ivtv_g_crop, .vidioc_g_input = ivtv_g_input, .vidioc_s_input = ivtv_s_input, .vidioc_g_output = ivtv_g_output, .vidioc_s_output = ivtv_s_output, .vidioc_g_frequency = ivtv_g_frequency, .vidioc_s_frequency = ivtv_s_frequency, .vidioc_s_tuner = ivtv_s_tuner, .vidioc_g_tuner = ivtv_g_tuner, .vidioc_g_enc_index = ivtv_g_enc_index, .vidioc_g_fbuf = ivtv_g_fbuf, .vidioc_s_fbuf = ivtv_s_fbuf, .vidioc_g_std = ivtv_g_std, .vidioc_s_std = ivtv_s_std, .vidioc_overlay = ivtv_overlay, .vidioc_log_status = ivtv_log_status, .vidioc_enum_fmt_vid_cap = ivtv_enum_fmt_vid_cap, .vidioc_encoder_cmd = ivtv_encoder_cmd, .vidioc_try_encoder_cmd = ivtv_try_encoder_cmd, .vidioc_enum_fmt_vid_out = ivtv_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap = ivtv_g_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = ivtv_g_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = ivtv_g_fmt_sliced_vbi_cap, .vidioc_g_fmt_vid_out = ivtv_g_fmt_vid_out, .vidioc_g_fmt_vid_out_overlay = ivtv_g_fmt_vid_out_overlay, .vidioc_g_fmt_sliced_vbi_out = ivtv_g_fmt_sliced_vbi_out, .vidioc_s_fmt_vid_cap = ivtv_s_fmt_vid_cap, .vidioc_s_fmt_vbi_cap = ivtv_s_fmt_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = ivtv_s_fmt_sliced_vbi_cap, .vidioc_s_fmt_vid_out = ivtv_s_fmt_vid_out, .vidioc_s_fmt_vid_out_overlay = ivtv_s_fmt_vid_out_overlay, .vidioc_s_fmt_sliced_vbi_out = ivtv_s_fmt_sliced_vbi_out, .vidioc_try_fmt_vid_cap = ivtv_try_fmt_vid_cap, .vidioc_try_fmt_vbi_cap = ivtv_try_fmt_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = ivtv_try_fmt_sliced_vbi_cap, .vidioc_try_fmt_vid_out = ivtv_try_fmt_vid_out, .vidioc_try_fmt_vid_out_overlay = ivtv_try_fmt_vid_out_overlay, .vidioc_try_fmt_sliced_vbi_out = ivtv_try_fmt_sliced_vbi_out, .vidioc_g_sliced_vbi_cap = ivtv_g_sliced_vbi_cap, .vidioc_g_chip_ident = ivtv_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = ivtv_g_register, .vidioc_s_register = ivtv_s_register, #endif .vidioc_default = ivtv_default, .vidioc_subscribe_event = ivtv_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; void ivtv_set_funcs(struct video_device *vdev) { vdev->ioctl_ops = &ivtv_ioctl_ops; }
gpl-2.0
RenderKernels/android_kernel_asus_grouper
arch/x86/platform/olpc/olpc.c
650
9261
/* * Support for the OLPC DCON and OLPC EC access * * Copyright © 2006 Advanced Micro Devices, Inc. * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/syscore_ops.h> #include <asm/geode.h> #include <asm/setup.h> #include <asm/olpc.h> #include <asm/olpc_ofw.h> struct olpc_platform_t olpc_platform_info; EXPORT_SYMBOL_GPL(olpc_platform_info); static DEFINE_SPINLOCK(ec_lock); /* EC event mask to be applied during suspend (defining wakeup sources). */ static u16 ec_wakeup_mask; /* what the timeout *should* be (in ms) */ #define EC_BASE_TIMEOUT 20 /* the timeout that bugs in the EC might force us to actually use */ static int ec_timeout = EC_BASE_TIMEOUT; static int __init olpc_ec_timeout_set(char *str) { if (get_option(&str, &ec_timeout) != 1) { ec_timeout = EC_BASE_TIMEOUT; printk(KERN_ERR "olpc-ec: invalid argument to " "'olpc_ec_timeout=', ignoring!\n"); } printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n", ec_timeout); return 1; } __setup("olpc_ec_timeout=", olpc_ec_timeout_set); /* * These {i,o}bf_status functions return whether the buffers are full or not. */ static inline unsigned int ibf_status(unsigned int port) { return !!(inb(port) & 0x02); } static inline unsigned int obf_status(unsigned int port) { return inb(port) & 0x01; } #define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d)) static int __wait_on_ibf(unsigned int line, unsigned int port, int desired) { unsigned int timeo; int state = ibf_status(port); for (timeo = ec_timeout; state != desired && timeo; timeo--) { mdelay(1); state = ibf_status(port); } if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && timeo < (ec_timeout - EC_BASE_TIMEOUT)) { printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n", line, ec_timeout - timeo); } return !(state == desired); } #define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d)) static int __wait_on_obf(unsigned int line, unsigned int port, int desired) { unsigned int timeo; int state = obf_status(port); for (timeo = ec_timeout; state != desired && timeo; timeo--) { mdelay(1); state = obf_status(port); } if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && timeo < (ec_timeout - EC_BASE_TIMEOUT)) { printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n", line, ec_timeout - timeo); } return !(state == desired); } /* * This allows the kernel to run Embedded Controller commands. The EC is * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the * available EC commands are here: * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while * OpenFirmware's source is available, the EC's is not. */ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, unsigned char *outbuf, size_t outlen) { unsigned long flags; int ret = -EIO; int i; int restarts = 0; spin_lock_irqsave(&ec_lock, flags); /* Clear OBF */ for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) inb(0x68); if (i == 10) { printk(KERN_ERR "olpc-ec: timeout while attempting to " "clear OBF flag!\n"); goto err; } if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for EC to " "quiesce!\n"); goto err; } restart: /* * Note that if we time out during any IBF checks, that's a failure; * we have to return. There's no way for the kernel to clear that. * * If we time out during an OBF check, we can restart the command; * reissuing it will clear the OBF flag, and we should be alright. * The OBF flag will sometimes misbehave due to what we believe * is a hardware quirk.. */ pr_devel("olpc-ec: running cmd 0x%x\n", cmd); outb(cmd, 0x6c); if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for EC to read " "command!\n"); goto err; } if (inbuf && inlen) { /* write data to EC */ for (i = 0; i < inlen; i++) { pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); outb(inbuf[i], 0x68); if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC accept data!\n"); goto err; } } } if (outbuf && outlen) { /* read data from EC */ for (i = 0; i < outlen; i++) { if (wait_on_obf(0x6c, 1)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC to provide data!\n"); if (restarts++ < 10) goto restart; goto err; } outbuf[i] = inb(0x68); pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); } } ret = 0; err: spin_unlock_irqrestore(&ec_lock, flags); return ret; } EXPORT_SYMBOL_GPL(olpc_ec_cmd); void olpc_ec_wakeup_set(u16 value) { ec_wakeup_mask |= value; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_set); void olpc_ec_wakeup_clear(u16 value) { ec_wakeup_mask &= ~value; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_clear); /* * Returns true if the compile and runtime configurations allow for EC events * to wake the system. */ bool olpc_ec_wakeup_available(void) { if (!machine_is_olpc()) return false; /* * XO-1 EC wakeups are available when olpc-xo1-sci driver is * compiled in */ #ifdef CONFIG_OLPC_XO1_SCI if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */ return true; #endif /* * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is * compiled in */ #ifdef CONFIG_OLPC_XO15_SCI if (olpc_platform_info.boardrev >= olpc_board_pre(0xd0)) /* XO-1.5 */ return true; #endif return false; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_available); int olpc_ec_mask_write(u16 bits) { if (olpc_platform_info.flags & OLPC_F_EC_WIDE_SCI) { __be16 ec_word = cpu_to_be16(bits); return olpc_ec_cmd(EC_WRITE_EXT_SCI_MASK, (void *) &ec_word, 2, NULL, 0); } else { unsigned char ec_byte = bits & 0xff; return olpc_ec_cmd(EC_WRITE_SCI_MASK, &ec_byte, 1, NULL, 0); } } EXPORT_SYMBOL_GPL(olpc_ec_mask_write); int olpc_ec_sci_query(u16 *sci_value) { int ret; if (olpc_platform_info.flags & OLPC_F_EC_WIDE_SCI) { __be16 ec_word; ret = olpc_ec_cmd(EC_EXT_SCI_QUERY, NULL, 0, (void *) &ec_word, 2); if (ret == 0) *sci_value = be16_to_cpu(ec_word); } else { unsigned char ec_byte; ret = olpc_ec_cmd(EC_SCI_QUERY, NULL, 0, &ec_byte, 1); if (ret == 0) *sci_value = ec_byte; } return ret; } EXPORT_SYMBOL_GPL(olpc_ec_sci_query); static int olpc_ec_suspend(void) { return olpc_ec_mask_write(ec_wakeup_mask); } static bool __init check_ofw_architecture(struct device_node *root) { const char *olpc_arch; int propsize; olpc_arch = of_get_property(root, "architecture", &propsize); return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; } static u32 __init get_board_revision(struct device_node *root) { int propsize; const __be32 *rev; rev = of_get_property(root, "board-revision-int", &propsize); if (propsize != 4) return 0; return be32_to_cpu(*rev); } static bool __init platform_detect(void) { struct device_node *root = of_find_node_by_path("/"); bool success; if (!root) return false; success = check_ofw_architecture(root); if (success) { olpc_platform_info.boardrev = get_board_revision(root); olpc_platform_info.flags |= OLPC_F_PRESENT; } of_node_put(root); return success; } static int __init add_xo1_platform_devices(void) { struct platform_device *pdev; pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static struct syscore_ops olpc_syscore_ops = { .suspend = olpc_ec_suspend, }; static int __init olpc_init(void) { int r = 0; if (!olpc_ofw_present() || !platform_detect()) return 0; spin_lock_init(&ec_lock); /* assume B1 and above models always have a DCON */ if (olpc_board_at_least(olpc_board(0xb1))) olpc_platform_info.flags |= OLPC_F_DCON; /* get the EC revision */ olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, (unsigned char *) &olpc_platform_info.ecver, 1); #ifdef CONFIG_PCI_OLPC /* If the VSA exists let it emulate PCI, if not emulate in kernel. * XO-1 only. */ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) && !cs5535_has_vsa2()) x86_init.pci.arch_init = pci_olpc_init; #endif /* EC version 0x5f adds support for wide SCI mask */ if (olpc_platform_info.ecver >= 0x5f) olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", olpc_platform_info.boardrev >> 4, olpc_platform_info.ecver); if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ r = add_xo1_platform_devices(); if (r) return r; } register_syscore_ops(&olpc_syscore_ops); return 0; } postcore_initcall(olpc_init);
gpl-2.0
Fevax/android_kernel_samsung_universal8890-N
arch/score/mm/fault.c
1162
6021
/* * arch/score/mm/fault.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; const int field = sizeof(unsigned long) * 2; unsigned long flags = 0; siginfo_t info; int fault; info.si_code = SEGV_MAPERR; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) goto vmalloc_fault; #ifdef MODULE_START if (unlikely(address >= MODULE_START && address < MODULE_END)) goto vmalloc_fault; #endif /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto bad_area_nosemaphore; if (user_mode(regs)) flags |= FAULT_FLAG_USER; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else { if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { current->thread.cp0_baduaddr = address; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", 0, field, address, field, regs->cp0_epc, field, regs->regs[3]); die("Oops", regs); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; else /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, tsk); return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ int offset = __pgd_offset(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *) pgd_current + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd_k)) goto no_context; set_pgd(pgd, *pgd_k); pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto no_context; set_pmd(pmd, *pmd_k); pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
gpl-2.0
jawad6233/MT6795.kernel
alps/kernel-3.10/arch/mips/kernel/asm-offsets.c
1930
13458
/* * offset.c: Calculate pt_regs and task_struct offsets. * * Copyright (C) 1996 David S. Miller * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ #include <linux/compat.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/kbuild.h> #include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <linux/kvm_host.h> void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); OFFSET(PT_R3, pt_regs, regs[3]); OFFSET(PT_R4, pt_regs, regs[4]); OFFSET(PT_R5, pt_regs, regs[5]); OFFSET(PT_R6, pt_regs, regs[6]); OFFSET(PT_R7, pt_regs, regs[7]); OFFSET(PT_R8, pt_regs, regs[8]); OFFSET(PT_R9, pt_regs, regs[9]); OFFSET(PT_R10, pt_regs, regs[10]); OFFSET(PT_R11, pt_regs, regs[11]); OFFSET(PT_R12, pt_regs, regs[12]); OFFSET(PT_R13, pt_regs, regs[13]); OFFSET(PT_R14, pt_regs, regs[14]); OFFSET(PT_R15, pt_regs, regs[15]); OFFSET(PT_R16, pt_regs, regs[16]); OFFSET(PT_R17, pt_regs, regs[17]); OFFSET(PT_R18, pt_regs, regs[18]); OFFSET(PT_R19, pt_regs, regs[19]); OFFSET(PT_R20, pt_regs, regs[20]); OFFSET(PT_R21, pt_regs, regs[21]); OFFSET(PT_R22, pt_regs, regs[22]); OFFSET(PT_R23, pt_regs, regs[23]); OFFSET(PT_R24, pt_regs, regs[24]); OFFSET(PT_R25, pt_regs, regs[25]); OFFSET(PT_R26, pt_regs, regs[26]); OFFSET(PT_R27, pt_regs, regs[27]); OFFSET(PT_R28, pt_regs, regs[28]); OFFSET(PT_R29, pt_regs, regs[29]); OFFSET(PT_R30, pt_regs, regs[30]); OFFSET(PT_R31, pt_regs, regs[31]); OFFSET(PT_LO, pt_regs, lo); OFFSET(PT_HI, pt_regs, hi); #ifdef CONFIG_CPU_HAS_SMARTMIPS OFFSET(PT_ACX, pt_regs, acx); #endif OFFSET(PT_EPC, pt_regs, cp0_epc); OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_CAUSE, pt_regs, cp0_cause); #ifdef CONFIG_MIPS_MT_SMTC OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); #endif /* CONFIG_MIPS_MT_SMTC */ #ifdef CONFIG_CPU_CAVIUM_OCTEON OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MTP, pt_regs, mtp); #endif /* CONFIG_CPU_CAVIUM_OCTEON */ DEFINE(PT_SIZE, sizeof(struct pt_regs)); BLANK(); } void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } void output_thread_info_defines(void) { COMMENT("MIPS thread_info offsets."); OFFSET(TI_TASK, thread_info, task); OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); BLANK(); } void output_thread_defines(void) { COMMENT("MIPS specific thread_struct offsets."); OFFSET(THREAD_REG16, task_struct, thread.reg16); OFFSET(THREAD_REG17, task_struct, thread.reg17); OFFSET(THREAD_REG18, task_struct, thread.reg18); OFFSET(THREAD_REG19, task_struct, thread.reg19); OFFSET(THREAD_REG20, task_struct, thread.reg20); OFFSET(THREAD_REG21, task_struct, thread.reg21); OFFSET(THREAD_REG22, task_struct, thread.reg22); OFFSET(THREAD_REG23, task_struct, thread.reg23); OFFSET(THREAD_REG29, task_struct, thread.reg29); OFFSET(THREAD_REG30, task_struct, thread.reg30); OFFSET(THREAD_REG31, task_struct, thread.reg31); OFFSET(THREAD_STATUS, task_struct, thread.cp0_status); OFFSET(THREAD_FPU, task_struct, thread.fpu); OFFSET(THREAD_BVADDR, task_struct, \ thread.cp0_badvaddr); OFFSET(THREAD_BUADDR, task_struct, \ thread.cp0_baduaddr); OFFSET(THREAD_ECODE, task_struct, \ thread.error_code); BLANK(); } void output_thread_fpu_defines(void) { OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); BLANK(); } void output_mm_defines(void) { COMMENT("Size of struct page"); DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); BLANK(); COMMENT("Linux mm_struct offsets."); OFFSET(MM_USERS, mm_struct, mm_users); OFFSET(MM_PGD, mm_struct, pgd); OFFSET(MM_CONTEXT, mm_struct, context); BLANK(); DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); DEFINE(_PTE_T_SIZE, sizeof(pte_t)); BLANK(); DEFINE(_PGD_T_LOG2, PGD_T_LOG2); #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_T_LOG2, PMD_T_LOG2); #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_ORDER, PMD_ORDER); #endif DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); BLANK(); DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); BLANK(); DEFINE(_PAGE_SHIFT, PAGE_SHIFT); DEFINE(_PAGE_SIZE, PAGE_SIZE); BLANK(); } #ifdef CONFIG_32BIT void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_FPREGS, sigcontext, sc_fpregs); OFFSET(SC_ACX, sigcontext, sc_acx); OFFSET(SC_MDHI, sigcontext, sc_mdhi); OFFSET(SC_MDLO, sigcontext, sc_mdlo); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); OFFSET(SC_HI1, sigcontext, sc_hi1); OFFSET(SC_LO1, sigcontext, sc_lo1); OFFSET(SC_HI2, sigcontext, sc_hi2); OFFSET(SC_LO2, sigcontext, sc_lo2); OFFSET(SC_HI3, sigcontext, sc_hi3); OFFSET(SC_LO3, sigcontext, sc_lo3); BLANK(); } #endif #ifdef CONFIG_64BIT void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_FPREGS, sigcontext, sc_fpregs); OFFSET(SC_MDHI, sigcontext, sc_mdhi); OFFSET(SC_MDLO, sigcontext, sc_mdlo); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); BLANK(); } #endif #ifdef CONFIG_MIPS32_COMPAT void output_sc32_defines(void) { COMMENT("Linux 32-bit sigcontext offsets."); OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); BLANK(); } #endif void output_signal_defined(void) { COMMENT("Linux signal numbers."); DEFINE(_SIGHUP, SIGHUP); DEFINE(_SIGINT, SIGINT); DEFINE(_SIGQUIT, SIGQUIT); DEFINE(_SIGILL, SIGILL); DEFINE(_SIGTRAP, SIGTRAP); DEFINE(_SIGIOT, SIGIOT); DEFINE(_SIGABRT, SIGABRT); DEFINE(_SIGEMT, SIGEMT); DEFINE(_SIGFPE, SIGFPE); DEFINE(_SIGKILL, SIGKILL); DEFINE(_SIGBUS, SIGBUS); DEFINE(_SIGSEGV, SIGSEGV); DEFINE(_SIGSYS, SIGSYS); DEFINE(_SIGPIPE, SIGPIPE); DEFINE(_SIGALRM, SIGALRM); DEFINE(_SIGTERM, SIGTERM); DEFINE(_SIGUSR1, SIGUSR1); DEFINE(_SIGUSR2, SIGUSR2); DEFINE(_SIGCHLD, SIGCHLD); DEFINE(_SIGPWR, SIGPWR); DEFINE(_SIGWINCH, SIGWINCH); DEFINE(_SIGURG, SIGURG); DEFINE(_SIGIO, SIGIO); DEFINE(_SIGSTOP, SIGSTOP); DEFINE(_SIGTSTP, SIGTSTP); DEFINE(_SIGCONT, SIGCONT); DEFINE(_SIGTTIN, SIGTTIN); DEFINE(_SIGTTOU, SIGTTOU); DEFINE(_SIGVTALRM, SIGVTALRM); DEFINE(_SIGPROF, SIGPROF); DEFINE(_SIGXCPU, SIGXCPU); DEFINE(_SIGXFSZ, SIGXFSZ); BLANK(); } #ifdef CONFIG_CPU_CAVIUM_OCTEON void output_octeon_cop2_state_defines(void) { COMMENT("Octeon specific octeon_cop2_state offsets."); OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); OFFSET(THREAD_CP2, task_struct, thread.cp2); OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); BLANK(); } #endif #ifdef CONFIG_HIBERNATION void output_pbe_defines(void) { COMMENT(" Linux struct pbe offsets. "); OFFSET(PBE_ADDRESS, pbe, address); OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); OFFSET(PBE_NEXT, pbe, next); DEFINE(PBE_SIZE, sizeof(struct pbe)); BLANK(); } #endif void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specfic offsets. "); DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch)); OFFSET(VCPU_RUN, kvm_vcpu, run); OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch); OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase); OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase); OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack); OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp); OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr); OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause); OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc); OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi); OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst); OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]); OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]); OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]); OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]); OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]); OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]); OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]); OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]); OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]); OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]); OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]); OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]); OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]); OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]); OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]); OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]); OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]); OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]); OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]); OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]); OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]); OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]); OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]); OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]); OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]); OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]); OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]); OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]); OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]); OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]); OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]); OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]); OFFSET(VCPU_LO, kvm_vcpu_arch, lo); OFFSET(VCPU_HI, kvm_vcpu_arch, hi); OFFSET(VCPU_PC, kvm_vcpu_arch, pc); OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]); OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); BLANK(); }
gpl-2.0
sai9615/MY-kernel-for-grand-I9082
dragon/drivers/net/wireless/ath/ath5k/ahb.c
2442
6515
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <ar231x_platform.h> #include "ath5k.h" #include "debug.h" #include "base.h" #include "reg.h" #include "debug.h" /* return bus cachesize in 4B word units */ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) { *csz = L1_CACHE_BYTES >> 2; } static bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath5k_softc *sc = common->priv; struct platform_device *pdev = to_platform_device(sc->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u16 *eeprom, *eeprom_end; bcfg = pdev->dev.platform_data; eeprom = (u16 *) bcfg->radio; eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; eeprom += off; if (eeprom > eeprom_end) return false; *data = *eeprom; return true; } int ath5k_hw_read_srev(struct ath5k_hw *ah) { struct ath5k_softc *sc = ah->ah_sc; struct platform_device *pdev = to_platform_device(sc->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; ah->ah_mac_srev = bcfg->devid; return 0; } static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) { struct ath5k_softc *sc = ah->ah_sc; struct platform_device *pdev = to_platform_device(sc->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u8 *cfg_mac; if (to_platform_device(sc->dev)->id == 0) cfg_mac = bcfg->config->wlan0_mac; else cfg_mac = bcfg->config->wlan1_mac; memcpy(mac, cfg_mac, ETH_ALEN); return 0; } static const struct ath_bus_ops ath_ahb_bus_ops = { .ath_bus_type = ATH_AHB, .read_cachesize = ath5k_ahb_read_cachesize, .eeprom_read = ath5k_ahb_eeprom_read, .eeprom_read_mac = ath5k_ahb_eeprom_read_mac, }; /*Initialization*/ static int ath_ahb_probe(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ath5k_softc *sc; struct ieee80211_hw *hw; struct resource *res; void __iomem *mem; int irq; int ret = 0; u32 reg; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform data specified\n"); ret = -EINVAL; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource found\n"); ret = -ENXIO; goto err_out; } mem = ioremap_nocache(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "no IRQ resource found\n"); ret = -ENXIO; goto err_out; } irq = res->start; hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops); if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; goto err_out; } sc = hw->priv; sc->hw = hw; sc->dev = &pdev->dev; sc->iobase = mem; sc->irq = irq; sc->devid = bcfg->devid; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Enable WMAC AHB arbitration */ reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN; __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); /* Enable global WMAC swapping */ reg = __raw_readl((void __iomem *) AR5K_AR2315_BYTESWAP); reg |= AR5K_AR2315_BYTESWAP_WMAC; __raw_writel(reg, (void __iomem *) AR5K_AR2315_BYTESWAP); } else { /* Enable WMAC DMA access (assuming 5312 or 231x*/ /* TODO: check other platforms */ reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(sc->dev)->id == 0) reg |= AR5K_AR5312_ENABLE_WLAN0; else reg |= AR5K_AR5312_ENABLE_WLAN1; __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE); /* * On a dual-band AR5312, the multiband radio is only * used as pass-through. Disable 2 GHz support in the * driver for it */ if (to_platform_device(sc->dev)->id == 0 && (bcfg->config->flags & (BD_WLAN0|BD_WLAN1)) == (BD_WLAN1|BD_WLAN0)) __set_bit(ATH_STAT_2G_DISABLED, sc->status); } ret = ath5k_init_softc(sc, &ath_ahb_bus_ops); if (ret != 0) { dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret); ret = -ENODEV; goto err_free_hw; } platform_set_drvdata(pdev, hw); return 0; err_free_hw: ieee80211_free_hw(hw); platform_set_drvdata(pdev, NULL); err_out: return ret; } static int ath_ahb_remove(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct ath5k_softc *sc; u32 reg; if (!hw) return 0; sc = hw->priv; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Disable WMAC AHB arbitration */ reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN; __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); } else { /*Stop DMA access */ reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(sc->dev)->id == 0) reg &= ~AR5K_AR5312_ENABLE_WLAN0; else reg &= ~AR5K_AR5312_ENABLE_WLAN1; __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE); } ath5k_deinit_softc(sc); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver ath_ahb_driver = { .probe = ath_ahb_probe, .remove = ath_ahb_remove, .driver = { .name = "ar231x-wmac", .owner = THIS_MODULE, }, }; static int __init ath5k_ahb_init(void) { return platform_driver_register(&ath_ahb_driver); } static void __exit ath5k_ahb_exit(void) { platform_driver_unregister(&ath_ahb_driver); } module_init(ath5k_ahb_init); module_exit(ath5k_ahb_exit);
gpl-2.0
robocoreio/edison-kernel
arch/ia64/kernel/time.c
2442
12640
/* * linux/arch/ia64/kernel/time.c * * Copyright (C) 1998-2003 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999-2000 VA Linux Systems * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> */ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> #include <linux/timekeeper_internal.h> #include <linux/platform_device.h> #include <asm/machvec.h> #include <asm/delay.h> #include <asm/hw_irq.h> #include <asm/paravirt.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/sections.h> #include "fsyscall_gtod_data.h" static cycle_t itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data; struct itc_jitter_data_t itc_jitter_data; volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ #ifdef CONFIG_IA64_DEBUG_IRQ unsigned long last_cli_ip; EXPORT_SYMBOL(last_cli_ip); #endif #ifdef CONFIG_PARAVIRT /* We need to define a real function for sched_clock, to override the weak default version */ unsigned long long sched_clock(void) { return paravirt_sched_clock(); } #endif #ifdef CONFIG_PARAVIRT static void paravirt_clocksource_resume(struct clocksource *cs) { if (pv_time_ops.clocksource_resume) pv_time_ops.clocksource_resume(); } #endif static struct clocksource clocksource_itc = { .name = "itc", .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, #ifdef CONFIG_PARAVIRT .resume = paravirt_clocksource_resume, #endif }; static struct clocksource *itc_clocksource; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <linux/kernel_stat.h> extern cputime_t cycle_to_cputime(u64 cyc); void vtime_account_user(struct task_struct *tsk) { cputime_t delta_utime; struct thread_info *ti = task_thread_info(tsk); if (ti->ac_utime) { delta_utime = cycle_to_cputime(ti->ac_utime); account_user_time(tsk, delta_utime, delta_utime); ti->ac_utime = 0; } } /* * Called from the context switch with interrupts disabled, to charge all * accumulated times to the current process, and to prepare accounting on * the next process. */ void arch_vtime_task_switch(struct task_struct *prev) { struct thread_info *pi = task_thread_info(prev); struct thread_info *ni = task_thread_info(current); pi->ac_stamp = ni->ac_stamp; ni->ac_stime = ni->ac_utime = 0; } /* * Account time for a transition between system, hard irq or soft irq state. * Note that this function is called with interrupts enabled. */ static cputime_t vtime_delta(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); cputime_t delta_stime; __u64 now; WARN_ON_ONCE(!irqs_disabled()); now = ia64_get_itc(); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); ti->ac_stime = 0; ti->ac_stamp = now; return delta_stime; } void vtime_account_system(struct task_struct *tsk) { cputime_t delta = vtime_delta(tsk); account_system_time(tsk, 0, delta, delta); } EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { account_idle_time(vtime_delta(tsk)); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); if (paravirt_do_steal_accounting(&new_itm)) goto skip_process_time_accounting; while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == time_keeper_id) xtime_update(1); local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } skip_process_time_accounting: do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; } /* * Encapsulate access to the itm structure for SMP. */ void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); unsigned long shift = 0, delta; /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); delta = local_cpu_data->itm_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: */ if (cpu) { unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } local_cpu_data->itm_next = ia64_get_itc() + delta + shift; ia64_set_itm(local_cpu_data->itm_next); } static int nojitter; static int __init nojitter_setup(char *str) { nojitter = 1; printk("Jitter checking for ITC timers disabled\n"); return 1; } __setup("nojitter", nojitter_setup); void ia64_init_itm(void) { unsigned long platform_base_freq, itc_freq; struct pal_freq_ratio itc_ratio, proc_ratio; long status, platform_base_drift, itc_drift; /* * According to SAL v2.6, we need to use a SAL call to determine the platform base * frequency and then a PAL call to determine the frequency ratio between the ITC * and the base frequency. */ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &platform_base_freq, &platform_base_drift); if (status != 0) { printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); } else { status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); if (status != 0) printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); } if (status != 0) { /* invent "random" values */ printk(KERN_ERR "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); platform_base_freq = 100000000; platform_base_drift = -1; /* no drift info */ itc_ratio.num = 3; itc_ratio.den = 1; } if (platform_base_freq < 40000000) { printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", platform_base_freq); platform_base_freq = 75000000; platform_base_drift = -1; } if (!proc_ratio.den) proc_ratio.den = 1; /* avoid division by zero */ if (!itc_ratio.den) itc_ratio.den = 1; /* avoid division by zero */ itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " "ITC freq=%lu.%03luMHz", smp_processor_id(), platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); if (platform_base_drift != -1) { itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; printk("+/-%ldppm\n", itc_drift); } else { itc_drift = -1; printk("\n"); } local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; local_cpu_data->itc_freq = itc_freq; local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) + itc_freq/2)/itc_freq; if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { #ifdef CONFIG_SMP /* On IA64 in an SMP configuration ITCs are never accurately synchronized. * Jitter compensation requires a cmpxchg which may limit * the scalability of the syscalls for retrieving time. * The ITC synchronization is usually successful to within a few * ITC ticks but this is not a sure thing. If you need to improve * timer performance in SMP situations then boot the kernel with the * "nojitter" option. However, doing so may result in time fluctuating (maybe * even going backward) if the ITC offsets between the individual CPUs * are too large. */ if (!nojitter) itc_jitter_data.itc_jitter = 1; #endif } else /* * ITC is drifty and we have not synchronized the ITCs in smpboot.c. * ITC values may fluctuate significantly between processors. * Clock should not be used for hrtimers. Mark itc as only * useful for boot and testing. * * Note that jitter compensation is off! There is no point of * synchronizing ITCs since they may be large differentials * that change over time. * * The only way to fix this would be to repeatedly sync the * ITCs. Until that time we have to avoid ITC. */ clocksource_itc.rating = 50; paravirt_init_missing_ticks_accounting(smp_processor_id()); /* avoid softlock up message when cpu is unplug and plugged again. */ touch_softlockup_watchdog(); /* Setup the CPU local timer tick */ ia64_cpu_local_tick(); if (!itc_clocksource) { clocksource_register_hz(&clocksource_itc, local_cpu_data->itc_freq); itc_clocksource = &clocksource_itc; } } static cycle_t itc_get_cycles(struct clocksource *cs) { unsigned long lcycle, now, ret; if (!itc_jitter_data.itc_jitter) return get_cycles(); lcycle = itc_jitter_data.itc_lastcycle; now = get_cycles(); if (lcycle && time_after(lcycle, now)) return lcycle; /* * Keep track of the last timer value returned. * In an SMP environment, you could lose out in contention of * cmpxchg. If so, your cmpxchg returns new value which the * winner of contention updated to. Use the new value instead. */ ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); if (unlikely(ret != lcycle)) return ret; return now; } static struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_IRQPOLL, .name = "timer" }; static struct platform_device rtc_efi_dev = { .name = "rtc-efi", .id = -1, }; static int __init rtc_init(void) { if (platform_device_register(&rtc_efi_dev) < 0) printk(KERN_ERR "unable to register rtc device...\n"); /* not necessarily an error */ return 0; } module_init(rtc_init); void read_persistent_clock(struct timespec *ts) { efi_gettimeofday(ts); } void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); ia64_init_itm(); } /* * Generic udelay assumes that if preemption is allowed and the thread * migrates to another CPU, that the ITC values are synchronized across * all CPUs. */ static void ia64_itc_udelay (unsigned long usecs) { unsigned long start = ia64_get_itc(); unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; while (time_before(ia64_get_itc(), end)) cpu_relax(); } void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; void udelay (unsigned long usecs) { (*ia64_udelay)(usecs); } EXPORT_SYMBOL(udelay); /* IA64 doesn't cache the timezone */ void update_vsyscall_tz(void) { } void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, struct clocksource *c, u32 mult) { write_seqcount_begin(&fsyscall_gtod_data.seq); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec + wall->tv_sec; fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec + wall->tv_nsec; /* normalize */ while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; fsyscall_gtod_data.monotonic_time.tv_sec++; } write_seqcount_end(&fsyscall_gtod_data.seq); }
gpl-2.0
nerull7/android_kernel_grouper
fs/gfs2/util.c
4234
8166
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/crc32.h> #include <linux/gfs2_ondisk.h> #include <asm/uaccess.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "util.h" struct kmem_cache *gfs2_glock_cachep __read_mostly; struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly; struct kmem_cache *gfs2_inode_cachep __read_mostly; struct kmem_cache *gfs2_bufdata_cachep __read_mostly; struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; struct kmem_cache *gfs2_quotad_cachep __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { printk(KERN_EMERG "GFS2: fsid=%s: fatal assertion failed\n", sdp->sd_fsname); } int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; const struct lm_lockops *lm = ls->ls_ops; va_list args; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW && test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags)) return 0; va_start(args, fmt); vprintk(fmt, args); va_end(args); if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) { fs_err(sdp, "about to withdraw this file system\n"); BUG_ON(sdp->sd_args.ar_debug); kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE); if (lm->lm_unmount) { fs_err(sdp, "telling LM to unmount\n"); lm->lm_unmount(sdp); } fs_err(sdp, "withdrawn\n"); dump_stack(); } if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) panic("GFS2: fsid=%s: panic requested.\n", sdp->sd_fsname); return -1; } /** * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false * Returns: -1 if this call withdrew the machine, * -2 if it was already withdrawn */ int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion, const char *function, char *file, unsigned int line) { int me; me = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: assertion \"%s\" failed\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, assertion, sdp->sd_fsname, function, file, line); dump_stack(); return (me) ? -1 : -2; } /** * gfs2_assert_warn_i - Print a message to the console if @assertion is false * Returns: -1 if we printed something * -2 if we didn't */ int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, const char *function, char *file, unsigned int line) { if (time_before(jiffies, sdp->sd_last_warning + gfs2_tune_get(sdp, gt_complain_secs) * HZ)) return -2; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) printk(KERN_WARNING "GFS2: fsid=%s: warning: assertion \"%s\" failed\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, assertion, sdp->sd_fsname, function, file, line); if (sdp->sd_args.ar_debug) BUG(); else dump_stack(); if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) panic("GFS2: fsid=%s: warning: assertion \"%s\" failed\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, assertion, sdp->sd_fsname, function, file, line); sdp->sd_last_warning = jiffies; return -1; } /** * gfs2_consist_i - Flag a filesystem consistency error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide, const char *function, char *file, unsigned int line) { int rv; rv = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: filesystem consistency error\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, function, file, line); return rv; } /** * gfs2_consist_inode_i - Flag an inode consistency error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_consist_inode_i(struct gfs2_inode *ip, int cluster_wide, const char *function, char *file, unsigned int line) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int rv; rv = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: filesystem consistency error\n" "GFS2: fsid=%s: inode = %llu %llu\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, sdp->sd_fsname, function, file, line); return rv; } /** * gfs2_consist_rgrpd_i - Flag a RG consistency error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide, const char *function, char *file, unsigned int line) { struct gfs2_sbd *sdp = rgd->rd_sbd; int rv; rv = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: filesystem consistency error\n" "GFS2: fsid=%s: RG = %llu\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, (unsigned long long)rgd->rd_addr, sdp->sd_fsname, function, file, line); return rv; } /** * gfs2_meta_check_ii - Flag a magic number consistency error and withdraw * Returns: -1 if this call withdrew the machine, * -2 if it was already withdrawn */ int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, const char *type, const char *function, char *file, unsigned int line) { int me; me = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: invalid metadata block\n" "GFS2: fsid=%s: bh = %llu (%s)\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, (unsigned long long)bh->b_blocknr, type, sdp->sd_fsname, function, file, line); return (me) ? -1 : -2; } /** * gfs2_metatype_check_ii - Flag a metadata type consistency error and withdraw * Returns: -1 if this call withdrew the machine, * -2 if it was already withdrawn */ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, u16 type, u16 t, const char *function, char *file, unsigned int line) { int me; me = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: invalid metadata block\n" "GFS2: fsid=%s: bh = %llu (type: exp=%u, found=%u)\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, (unsigned long long)bh->b_blocknr, type, t, sdp->sd_fsname, function, file, line); return (me) ? -1 : -2; } /** * gfs2_io_error_i - Flag an I/O error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file, unsigned int line) { int rv; rv = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: I/O error\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, function, file, line); return rv; } /** * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, const char *function, char *file, unsigned int line) { int rv; rv = gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: fatal: I/O error\n" "GFS2: fsid=%s: block = %llu\n" "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", sdp->sd_fsname, sdp->sd_fsname, (unsigned long long)bh->b_blocknr, sdp->sd_fsname, function, file, line); return rv; } void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, unsigned int bit, int new_value) { unsigned int c, o, b = bit; int old_value; c = b / (8 * PAGE_SIZE); b %= 8 * PAGE_SIZE; o = b / 8; b %= 8; old_value = (bitmap[c][o] & (1 << b)); gfs2_assert_withdraw(sdp, !old_value != !new_value); if (new_value) bitmap[c][o] |= 1 << b; else bitmap[c][o] &= ~(1 << b); }
gpl-2.0
houst0nn/android_kernel_msm
drivers/usb/class/usblp.c
4746
39646
/* * usblp.c * * Copyright (c) 1999 Michael Gee <michael@linuxspecific.com> * Copyright (c) 1999 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2000 Randy Dunlap <rdunlap@xenotime.net> * Copyright (c) 2000 Vojtech Pavlik <vojtech@suse.cz> # Copyright (c) 2001 Pete Zaitcev <zaitcev@redhat.com> # Copyright (c) 2001 David Paschal <paschal@rcsis.com> * Copyright (c) 2006 Oliver Neukum <oliver@neukum.name> * * USB Printer Device Class driver for USB printers and printer cables * * Sponsored by SuSE * * ChangeLog: * v0.1 - thorough cleaning, URBification, almost a rewrite * v0.2 - some more cleanups * v0.3 - cleaner again, waitqueue fixes * v0.4 - fixes in unidirectional mode * v0.5 - add DEVICE_ID string support * v0.6 - never time out * v0.7 - fixed bulk-IN read and poll (David Paschal) * v0.8 - add devfs support * v0.9 - fix unplug-while-open paths * v0.10- remove sleep_on, fix error on oom (oliver@neukum.org) * v0.11 - add proto_bias option (Pete Zaitcev) * v0.12 - add hpoj.sourceforge.net ioctls (David Paschal) * v0.13 - alloc space for statusbuf (<status> not on stack); * use usb_alloc_coherent() for read buf & write buf; * none - Maintained in Linux kernel after v0.13 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/lp.h> #include <linux/mutex.h> #undef DEBUG #include <linux/usb.h> #include <linux/ratelimit.h> /* * Version Information */ #define DRIVER_AUTHOR "Michael Gee, Pavel Machek, Vojtech Pavlik, Randy Dunlap, Pete Zaitcev, David Paschal" #define DRIVER_DESC "USB Printer Device Class driver" #define USBLP_BUF_SIZE 8192 #define USBLP_BUF_SIZE_IN 1024 #define USBLP_DEVICE_ID_SIZE 1024 /* ioctls: */ #define IOCNR_GET_DEVICE_ID 1 #define IOCNR_GET_PROTOCOLS 2 #define IOCNR_SET_PROTOCOL 3 #define IOCNR_HP_SET_CHANNEL 4 #define IOCNR_GET_BUS_ADDRESS 5 #define IOCNR_GET_VID_PID 6 #define IOCNR_SOFT_RESET 7 /* Get device_id string: */ #define LPIOC_GET_DEVICE_ID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_DEVICE_ID, len) /* The following ioctls were added for http://hpoj.sourceforge.net: */ /* Get two-int array: * [0]=current protocol (1=7/1/1, 2=7/1/2, 3=7/1/3), * [1]=supported protocol mask (mask&(1<<n)!=0 means 7/1/n supported): */ #define LPIOC_GET_PROTOCOLS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_PROTOCOLS, len) /* Set protocol (arg: 1=7/1/1, 2=7/1/2, 3=7/1/3): */ #define LPIOC_SET_PROTOCOL _IOC(_IOC_WRITE, 'P', IOCNR_SET_PROTOCOL, 0) /* Set channel number (HP Vendor-specific command): */ #define LPIOC_HP_SET_CHANNEL _IOC(_IOC_WRITE, 'P', IOCNR_HP_SET_CHANNEL, 0) /* Get two-int array: [0]=bus number, [1]=device address: */ #define LPIOC_GET_BUS_ADDRESS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_BUS_ADDRESS, len) /* Get two-int array: [0]=vendor ID, [1]=product ID: */ #define LPIOC_GET_VID_PID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_VID_PID, len) /* Perform class specific soft reset */ #define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0); /* * A DEVICE_ID string may include the printer's serial number. * It should end with a semi-colon (';'). * An example from an HP 970C DeskJet printer is (this is one long string, * with the serial number changed): MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:Hewlett-Packard DeskJet 970C;SERN:US970CSEPROF;VSTATUS:$HB0$NC0,ff,DN,IDLE,CUT,K1,C0,DP,NR,KP000,CP027;VP:0800,FL,B0;VJ: ; */ /* * USB Printer Requests */ #define USBLP_REQ_GET_ID 0x00 #define USBLP_REQ_GET_STATUS 0x01 #define USBLP_REQ_RESET 0x02 #define USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST 0x00 /* HP Vendor-specific */ #define USBLP_MINORS 16 #define USBLP_MINOR_BASE 0 #define USBLP_CTL_TIMEOUT 5000 /* 5 seconds */ #define USBLP_FIRST_PROTOCOL 1 #define USBLP_LAST_PROTOCOL 3 #define USBLP_MAX_PROTOCOLS (USBLP_LAST_PROTOCOL+1) /* * some arbitrary status buffer size; * need a status buffer that is allocated via kmalloc(), not on stack */ #define STATUS_BUF_SIZE 8 /* * Locks down the locking order: * ->wmut locks wstatus. * ->mut locks the whole usblp, except [rw]complete, and thus, by indirection, * [rw]status. We only touch status when we know the side idle. * ->lock locks what interrupt accesses. */ struct usblp { struct usb_device *dev; /* USB device */ struct mutex wmut; struct mutex mut; spinlock_t lock; /* locks rcomplete, wcomplete */ char *readbuf; /* read transfer_buffer */ char *statusbuf; /* status transfer_buffer */ struct usb_anchor urbs; wait_queue_head_t rwait, wwait; int readcount; /* Counter for reads */ int ifnum; /* Interface number */ struct usb_interface *intf; /* The interface */ /* Alternate-setting numbers and endpoints for each protocol * (7/1/{index=1,2,3}) that the device supports: */ struct { int alt_setting; struct usb_endpoint_descriptor *epwrite; struct usb_endpoint_descriptor *epread; } protocol[USBLP_MAX_PROTOCOLS]; int current_protocol; int minor; /* minor number of device */ int wcomplete, rcomplete; int wstatus; /* bytes written or error */ int rstatus; /* bytes ready or error */ unsigned int quirks; /* quirks flags */ unsigned int flags; /* mode flags */ unsigned char used; /* True if open */ unsigned char present; /* True if not disconnected */ unsigned char bidir; /* interface is bidirectional */ unsigned char no_paper; /* Paper Out happened */ unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ /* first 2 bytes are (big-endian) length */ }; #ifdef DEBUG static void usblp_dump(struct usblp *usblp) { int p; dbg("usblp=0x%p", usblp); dbg("dev=0x%p", usblp->dev); dbg("present=%d", usblp->present); dbg("readbuf=0x%p", usblp->readbuf); dbg("readcount=%d", usblp->readcount); dbg("ifnum=%d", usblp->ifnum); for (p = USBLP_FIRST_PROTOCOL; p <= USBLP_LAST_PROTOCOL; p++) { dbg("protocol[%d].alt_setting=%d", p, usblp->protocol[p].alt_setting); dbg("protocol[%d].epwrite=%p", p, usblp->protocol[p].epwrite); dbg("protocol[%d].epread=%p", p, usblp->protocol[p].epread); } dbg("current_protocol=%d", usblp->current_protocol); dbg("minor=%d", usblp->minor); dbg("wstatus=%d", usblp->wstatus); dbg("rstatus=%d", usblp->rstatus); dbg("quirks=%d", usblp->quirks); dbg("used=%d", usblp->used); dbg("bidir=%d", usblp->bidir); dbg("device_id_string=\"%s\"", usblp->device_id_string ? usblp->device_id_string + 2 : (unsigned char *)"(null)"); } #endif /* Quirks: various printer quirks are handled by this table & its flags. */ struct quirk_printer_struct { __u16 vendorId; __u16 productId; unsigned int quirks; }; #define USBLP_QUIRK_BIDIR 0x1 /* reports bidir but requires unidirectional mode (no INs/reads) */ #define USBLP_QUIRK_USB_INIT 0x2 /* needs vendor USB init string */ #define USBLP_QUIRK_BAD_CLASS 0x4 /* descriptor uses vendor-specific Class or SubClass */ static const struct quirk_printer_struct quirk_printers[] = { { 0x03f0, 0x0004, USBLP_QUIRK_BIDIR }, /* HP DeskJet 895C */ { 0x03f0, 0x0104, USBLP_QUIRK_BIDIR }, /* HP DeskJet 880C */ { 0x03f0, 0x0204, USBLP_QUIRK_BIDIR }, /* HP DeskJet 815C */ { 0x03f0, 0x0304, USBLP_QUIRK_BIDIR }, /* HP DeskJet 810C/812C */ { 0x03f0, 0x0404, USBLP_QUIRK_BIDIR }, /* HP DeskJet 830C */ { 0x03f0, 0x0504, USBLP_QUIRK_BIDIR }, /* HP DeskJet 885C */ { 0x03f0, 0x0604, USBLP_QUIRK_BIDIR }, /* HP DeskJet 840C */ { 0x03f0, 0x0804, USBLP_QUIRK_BIDIR }, /* HP DeskJet 816C */ { 0x03f0, 0x1104, USBLP_QUIRK_BIDIR }, /* HP Deskjet 959C */ { 0x0409, 0xefbe, USBLP_QUIRK_BIDIR }, /* NEC Picty900 (HP OEM) */ { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */ { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */ { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */ { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ { 0, 0 } }; static int usblp_wwait(struct usblp *usblp, int nonblock); static int usblp_wtest(struct usblp *usblp, int nonblock); static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock); static int usblp_rtest(struct usblp *usblp, int nonblock); static int usblp_submit_read(struct usblp *usblp); static int usblp_select_alts(struct usblp *usblp); static int usblp_set_protocol(struct usblp *usblp, int protocol); static int usblp_cache_device_id_string(struct usblp *usblp); /* forward reference to make our lives easier */ static struct usb_driver usblp_driver; static DEFINE_MUTEX(usblp_mutex); /* locks the existence of usblp's */ /* * Functions for usblp control messages. */ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, int recip, int value, void *buf, int len) { int retval; int index = usblp->ifnum; /* High byte has the interface index. Low byte has the alternate setting. */ if ((request == USBLP_REQ_GET_ID) && (type == USB_TYPE_CLASS)) index = (usblp->ifnum<<8)|usblp->protocol[usblp->current_protocol].alt_setting; retval = usb_control_msg(usblp->dev, dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0), request, type | dir | recip, value, index, buf, len, USBLP_CTL_TIMEOUT); dbg("usblp_control_msg: rq: 0x%02x dir: %d recip: %d value: %d idx: %d len: %#x result: %d", request, !!dir, recip, value, index, len, retval); return retval < 0 ? retval : 0; } #define usblp_read_status(usblp, status)\ usblp_ctrl_msg(usblp, USBLP_REQ_GET_STATUS, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, 0, status, 1) #define usblp_get_id(usblp, config, id, maxlen)\ usblp_ctrl_msg(usblp, USBLP_REQ_GET_ID, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, config, id, maxlen) #define usblp_reset(usblp)\ usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) #define usblp_hp_channel_change_request(usblp, channel, buffer) \ usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) /* * See the description for usblp_select_alts() below for the usage * explanation. Look into your /proc/bus/usb/devices and dmesg in * case of any trouble. */ static int proto_bias = -1; /* * URB callback. */ static void usblp_bulk_read(struct urb *urb) { struct usblp *usblp = urb->context; int status = urb->status; if (usblp->present && usblp->used) { if (status) printk(KERN_WARNING "usblp%d: " "nonzero read bulk status received: %d\n", usblp->minor, status); } spin_lock(&usblp->lock); if (status < 0) usblp->rstatus = status; else usblp->rstatus = urb->actual_length; usblp->rcomplete = 1; wake_up(&usblp->rwait); spin_unlock(&usblp->lock); usb_free_urb(urb); } static void usblp_bulk_write(struct urb *urb) { struct usblp *usblp = urb->context; int status = urb->status; if (usblp->present && usblp->used) { if (status) printk(KERN_WARNING "usblp%d: " "nonzero write bulk status received: %d\n", usblp->minor, status); } spin_lock(&usblp->lock); if (status < 0) usblp->wstatus = status; else usblp->wstatus = urb->actual_length; usblp->no_paper = 0; usblp->wcomplete = 1; wake_up(&usblp->wwait); spin_unlock(&usblp->lock); usb_free_urb(urb); } /* * Get and print printer errors. */ static const char *usblp_messages[] = { "ok", "out of paper", "off-line", "on fire" }; static int usblp_check_status(struct usblp *usblp, int err) { unsigned char status, newerr = 0; int error; mutex_lock(&usblp->mut); if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) { mutex_unlock(&usblp->mut); printk_ratelimited(KERN_ERR "usblp%d: error %d reading printer status\n", usblp->minor, error); return 0; } status = *usblp->statusbuf; mutex_unlock(&usblp->mut); if (~status & LP_PERRORP) newerr = 3; if (status & LP_POUTPA) newerr = 1; if (~status & LP_PSELECD) newerr = 2; if (newerr != err) { printk(KERN_INFO "usblp%d: %s\n", usblp->minor, usblp_messages[newerr]); } return newerr; } static int handle_bidir(struct usblp *usblp) { if (usblp->bidir && usblp->used) { if (usblp_submit_read(usblp) < 0) return -EIO; } return 0; } /* * File op functions. */ static int usblp_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct usblp *usblp; struct usb_interface *intf; int retval; if (minor < 0) return -ENODEV; mutex_lock(&usblp_mutex); retval = -ENODEV; intf = usb_find_interface(&usblp_driver, minor); if (!intf) goto out; usblp = usb_get_intfdata(intf); if (!usblp || !usblp->dev || !usblp->present) goto out; retval = -EBUSY; if (usblp->used) goto out; /* * We do not implement LP_ABORTOPEN/LPABORTOPEN for two reasons: * - We do not want persistent state which close(2) does not clear * - It is not used anyway, according to CUPS people */ retval = usb_autopm_get_interface(intf); if (retval < 0) goto out; usblp->used = 1; file->private_data = usblp; usblp->wcomplete = 1; /* we begin writeable */ usblp->wstatus = 0; usblp->rcomplete = 0; if (handle_bidir(usblp) < 0) { usb_autopm_put_interface(intf); usblp->used = 0; file->private_data = NULL; retval = -EIO; } out: mutex_unlock(&usblp_mutex); return retval; } static void usblp_cleanup(struct usblp *usblp) { printk(KERN_INFO "usblp%d: removed\n", usblp->minor); kfree(usblp->readbuf); kfree(usblp->device_id_string); kfree(usblp->statusbuf); kfree(usblp); } static void usblp_unlink_urbs(struct usblp *usblp) { usb_kill_anchored_urbs(&usblp->urbs); } static int usblp_release(struct inode *inode, struct file *file) { struct usblp *usblp = file->private_data; usblp->flags &= ~LP_ABORT; mutex_lock(&usblp_mutex); usblp->used = 0; if (usblp->present) { usblp_unlink_urbs(usblp); usb_autopm_put_interface(usblp->intf); } else /* finish cleanup from disconnect */ usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); return 0; } /* No kernel lock - fine */ static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait) { int ret; unsigned long flags; struct usblp *usblp = file->private_data; /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); spin_lock_irqsave(&usblp->lock, flags); ret = ((usblp->bidir && usblp->rcomplete) ? POLLIN | POLLRDNORM : 0) | ((usblp->no_paper || usblp->wcomplete) ? POLLOUT | POLLWRNORM : 0); spin_unlock_irqrestore(&usblp->lock, flags); return ret; } static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct usblp *usblp = file->private_data; int length, err, i; unsigned char newChannel; int status; int twoints[2]; int retval = 0; mutex_lock(&usblp->mut); if (!usblp->present) { retval = -ENODEV; goto done; } dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd)); if (_IOC_TYPE(cmd) == 'P') /* new-style ioctl number */ switch (_IOC_NR(cmd)) { case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */ if (_IOC_DIR(cmd) != _IOC_READ) { retval = -EINVAL; goto done; } length = usblp_cache_device_id_string(usblp); if (length < 0) { retval = length; goto done; } if (length > _IOC_SIZE(cmd)) length = _IOC_SIZE(cmd); /* truncate */ if (copy_to_user((void __user *) arg, usblp->device_id_string, (unsigned long) length)) { retval = -EFAULT; goto done; } break; case IOCNR_GET_PROTOCOLS: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = usblp->current_protocol; twoints[1] = 0; for (i = USBLP_FIRST_PROTOCOL; i <= USBLP_LAST_PROTOCOL; i++) { if (usblp->protocol[i].alt_setting >= 0) twoints[1] |= (1<<i); } if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } break; case IOCNR_SET_PROTOCOL: if (_IOC_DIR(cmd) != _IOC_WRITE) { retval = -EINVAL; goto done; } #ifdef DEBUG if (arg == -10) { usblp_dump(usblp); break; } #endif usblp_unlink_urbs(usblp); retval = usblp_set_protocol(usblp, arg); if (retval < 0) { usblp_set_protocol(usblp, usblp->current_protocol); } break; case IOCNR_HP_SET_CHANNEL: if (_IOC_DIR(cmd) != _IOC_WRITE || le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 || usblp->quirks & USBLP_QUIRK_BIDIR) { retval = -EINVAL; goto done; } err = usblp_hp_channel_change_request(usblp, arg, &newChannel); if (err < 0) { dev_err(&usblp->dev->dev, "usblp%d: error = %d setting " "HP channel\n", usblp->minor, err); retval = -EIO; goto done; } dbg("usblp%d requested/got HP channel %ld/%d", usblp->minor, arg, newChannel); break; case IOCNR_GET_BUS_ADDRESS: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = usblp->dev->bus->busnum; twoints[1] = usblp->dev->devnum; if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } dbg("usblp%d is bus=%d, device=%d", usblp->minor, twoints[0], twoints[1]); break; case IOCNR_GET_VID_PID: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor); twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct); if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } dbg("usblp%d is VID=0x%4.4X, PID=0x%4.4X", usblp->minor, twoints[0], twoints[1]); break; case IOCNR_SOFT_RESET: if (_IOC_DIR(cmd) != _IOC_NONE) { retval = -EINVAL; goto done; } retval = usblp_reset(usblp); break; default: retval = -ENOTTY; } else /* old-style ioctl value */ switch (cmd) { case LPGETSTATUS: if ((retval = usblp_read_status(usblp, usblp->statusbuf))) { printk_ratelimited(KERN_ERR "usblp%d:" "failed reading printer status (%d)\n", usblp->minor, retval); retval = -EIO; goto done; } status = *usblp->statusbuf; if (copy_to_user((void __user *)arg, &status, sizeof(int))) retval = -EFAULT; break; case LPABORT: if (arg) usblp->flags |= LP_ABORT; else usblp->flags &= ~LP_ABORT; break; default: retval = -ENOTTY; } done: mutex_unlock(&usblp->mut); return retval; } static struct urb *usblp_new_writeurb(struct usblp *usblp, int transfer_length) { struct urb *urb; char *writebuf; if ((writebuf = kmalloc(transfer_length, GFP_KERNEL)) == NULL) return NULL; if ((urb = usb_alloc_urb(0, GFP_KERNEL)) == NULL) { kfree(writebuf); return NULL; } usb_fill_bulk_urb(urb, usblp->dev, usb_sndbulkpipe(usblp->dev, usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress), writebuf, transfer_length, usblp_bulk_write, usblp); urb->transfer_flags |= URB_FREE_BUFFER; return urb; } static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct usblp *usblp = file->private_data; struct urb *writeurb; int rv; int transfer_length; ssize_t writecount = 0; if (mutex_lock_interruptible(&usblp->wmut)) { rv = -EINTR; goto raise_biglock; } if ((rv = usblp_wwait(usblp, !!(file->f_flags & O_NONBLOCK))) < 0) goto raise_wait; while (writecount < count) { /* * Step 1: Submit next block. */ if ((transfer_length = count - writecount) > USBLP_BUF_SIZE) transfer_length = USBLP_BUF_SIZE; rv = -ENOMEM; if ((writeurb = usblp_new_writeurb(usblp, transfer_length)) == NULL) goto raise_urb; usb_anchor_urb(writeurb, &usblp->urbs); if (copy_from_user(writeurb->transfer_buffer, buffer + writecount, transfer_length)) { rv = -EFAULT; goto raise_badaddr; } spin_lock_irq(&usblp->lock); usblp->wcomplete = 0; spin_unlock_irq(&usblp->lock); if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) { usblp->wstatus = 0; spin_lock_irq(&usblp->lock); usblp->no_paper = 0; usblp->wcomplete = 1; wake_up(&usblp->wwait); spin_unlock_irq(&usblp->lock); if (rv != -ENOMEM) rv = -EIO; goto raise_submit; } /* * Step 2: Wait for transfer to end, collect results. */ rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); if (rv < 0) { if (rv == -EAGAIN) { /* Presume that it's going to complete well. */ writecount += transfer_length; } if (rv == -ENOSPC) { spin_lock_irq(&usblp->lock); usblp->no_paper = 1; /* Mark for poll(2) */ spin_unlock_irq(&usblp->lock); writecount += transfer_length; } /* Leave URB dangling, to be cleaned on close. */ goto collect_error; } if (usblp->wstatus < 0) { rv = -EIO; goto collect_error; } /* * This is critical: it must be our URB, not other writer's. * The wmut exists mainly to cover us here. */ writecount += usblp->wstatus; } mutex_unlock(&usblp->wmut); return writecount; raise_submit: raise_badaddr: usb_unanchor_urb(writeurb); usb_free_urb(writeurb); raise_urb: raise_wait: collect_error: /* Out of raise sequence */ mutex_unlock(&usblp->wmut); raise_biglock: return writecount ? writecount : rv; } /* * Notice that we fail to restart in a few cases: on EFAULT, on restart * error, etc. This is the historical behaviour. In all such cases we return * EIO, and applications loop in order to get the new read going. */ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, loff_t *ppos) { struct usblp *usblp = file->private_data; ssize_t count; ssize_t avail; int rv; if (!usblp->bidir) return -EINVAL; rv = usblp_rwait_and_lock(usblp, !!(file->f_flags & O_NONBLOCK)); if (rv < 0) return rv; if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); usblp_submit_read(usblp); count = -EIO; goto done; } count = len < avail - usblp->readcount ? len : avail - usblp->readcount; if (count != 0 && copy_to_user(buffer, usblp->readbuf + usblp->readcount, count)) { count = -EFAULT; goto done; } if ((usblp->readcount += count) == avail) { if (usblp_submit_read(usblp) < 0) { /* We don't want to leak USB return codes into errno. */ if (count == 0) count = -EIO; goto done; } } done: mutex_unlock(&usblp->mut); return count; } /* * Wait for the write path to come idle. * This is called under the ->wmut, so the idle path stays idle. * * Our write path has a peculiar property: it does not buffer like a tty, * but waits for the write to succeed. This allows our ->release to bug out * without waiting for writes to drain. But it obviously does not work * when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use * select(2) or poll(2) to wait for the buffer to drain before closing. * Alternatively, set blocking mode with fcntl and issue a zero-size write. */ static int usblp_wwait(struct usblp *usblp, int nonblock) { DECLARE_WAITQUEUE(waita, current); int rc; int err = 0; add_wait_queue(&usblp->wwait, &waita); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (mutex_lock_interruptible(&usblp->mut)) { rc = -EINTR; break; } rc = usblp_wtest(usblp, nonblock); mutex_unlock(&usblp->mut); if (rc <= 0) break; if (schedule_timeout(msecs_to_jiffies(1500)) == 0) { if (usblp->flags & LP_ABORT) { err = usblp_check_status(usblp, err); if (err == 1) { /* Paper out */ rc = -ENOSPC; break; } } else { /* Prod the printer, Gentoo#251237. */ mutex_lock(&usblp->mut); usblp_read_status(usblp, usblp->statusbuf); mutex_unlock(&usblp->mut); } } } set_current_state(TASK_RUNNING); remove_wait_queue(&usblp->wwait, &waita); return rc; } static int usblp_wtest(struct usblp *usblp, int nonblock) { unsigned long flags; if (!usblp->present) return -ENODEV; if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&usblp->lock, flags); if (usblp->wcomplete) { spin_unlock_irqrestore(&usblp->lock, flags); return 0; } spin_unlock_irqrestore(&usblp->lock, flags); if (nonblock) return -EAGAIN; return 1; } /* * Wait for read bytes to become available. This probably should have been * called usblp_r_lock_and_wait(), because we lock first. But it's a traditional * name for functions which lock and return. * * We do not use wait_event_interruptible because it makes locking iffy. */ static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock) { DECLARE_WAITQUEUE(waita, current); int rc; add_wait_queue(&usblp->rwait, &waita); for (;;) { if (mutex_lock_interruptible(&usblp->mut)) { rc = -EINTR; break; } set_current_state(TASK_INTERRUPTIBLE); if ((rc = usblp_rtest(usblp, nonblock)) < 0) { mutex_unlock(&usblp->mut); break; } if (rc == 0) /* Keep it locked */ break; mutex_unlock(&usblp->mut); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&usblp->rwait, &waita); return rc; } static int usblp_rtest(struct usblp *usblp, int nonblock) { unsigned long flags; if (!usblp->present) return -ENODEV; if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&usblp->lock, flags); if (usblp->rcomplete) { spin_unlock_irqrestore(&usblp->lock, flags); return 0; } spin_unlock_irqrestore(&usblp->lock, flags); if (nonblock) return -EAGAIN; return 1; } /* * Please check ->bidir and other such things outside for now. */ static int usblp_submit_read(struct usblp *usblp) { struct urb *urb; unsigned long flags; int rc; rc = -ENOMEM; if ((urb = usb_alloc_urb(0, GFP_KERNEL)) == NULL) goto raise_urb; usb_fill_bulk_urb(urb, usblp->dev, usb_rcvbulkpipe(usblp->dev, usblp->protocol[usblp->current_protocol].epread->bEndpointAddress), usblp->readbuf, USBLP_BUF_SIZE_IN, usblp_bulk_read, usblp); usb_anchor_urb(urb, &usblp->urbs); spin_lock_irqsave(&usblp->lock, flags); usblp->readcount = 0; /* XXX Why here? */ usblp->rcomplete = 0; spin_unlock_irqrestore(&usblp->lock, flags); if ((rc = usb_submit_urb(urb, GFP_KERNEL)) < 0) { dbg("error submitting urb (%d)", rc); spin_lock_irqsave(&usblp->lock, flags); usblp->rstatus = rc; usblp->rcomplete = 1; spin_unlock_irqrestore(&usblp->lock, flags); goto raise_submit; } return 0; raise_submit: usb_unanchor_urb(urb); usb_free_urb(urb); raise_urb: return rc; } /* * Checks for printers that have quirks, such as requiring unidirectional * communication but reporting bidirectional; currently some HP printers * have this flaw (HP 810, 880, 895, etc.), or needing an init string * sent at each open (like some Epsons). * Returns 1 if found, 0 if not found. * * HP recommended that we use the bidirectional interface but * don't attempt any bulk IN transfers from the IN endpoint. * Here's some more detail on the problem: * The problem is not that it isn't bidirectional though. The problem * is that if you request a device ID, or status information, while * the buffers are full, the return data will end up in the print data * buffer. For example if you make sure you never request the device ID * while you are sending print data, and you don't try to query the * printer status every couple of milliseconds, you will probably be OK. */ static unsigned int usblp_quirks(__u16 vendor, __u16 product) { int i; for (i = 0; quirk_printers[i].vendorId; i++) { if (vendor == quirk_printers[i].vendorId && product == quirk_printers[i].productId) return quirk_printers[i].quirks; } return 0; } static const struct file_operations usblp_fops = { .owner = THIS_MODULE, .read = usblp_read, .write = usblp_write, .poll = usblp_poll, .unlocked_ioctl = usblp_ioctl, .compat_ioctl = usblp_ioctl, .open = usblp_open, .release = usblp_release, .llseek = noop_llseek, }; static char *usblp_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } static struct usb_class_driver usblp_class = { .name = "lp%d", .devnode = usblp_devnode, .fops = &usblp_fops, .minor_base = USBLP_MINOR_BASE, }; static ssize_t usblp_show_ieee1284_id(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usblp *usblp = usb_get_intfdata(intf); if (usblp->device_id_string[0] == 0 && usblp->device_id_string[1] == 0) return 0; return sprintf(buf, "%s", usblp->device_id_string+2); } static DEVICE_ATTR(ieee1284_id, S_IRUGO, usblp_show_ieee1284_id, NULL); static int usblp_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usblp *usblp; int protocol; int retval; /* Malloc and start initializing usblp structure so we can use it * directly. */ usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL); if (!usblp) { retval = -ENOMEM; goto abort_ret; } usblp->dev = dev; mutex_init(&usblp->wmut); mutex_init(&usblp->mut); spin_lock_init(&usblp->lock); init_waitqueue_head(&usblp->rwait); init_waitqueue_head(&usblp->wwait); init_usb_anchor(&usblp->urbs); usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; usblp->intf = intf; /* Malloc device ID string buffer to the largest expected length, * since we can re-query it on an ioctl and a dynamic string * could change in length. */ if (!(usblp->device_id_string = kmalloc(USBLP_DEVICE_ID_SIZE, GFP_KERNEL))) { retval = -ENOMEM; goto abort; } /* * Allocate read buffer. We somewhat wastefully * malloc both regardless of bidirectionality, because the * alternate setting can be changed later via an ioctl. */ if (!(usblp->readbuf = kmalloc(USBLP_BUF_SIZE_IN, GFP_KERNEL))) { retval = -ENOMEM; goto abort; } /* Allocate buffer for printer status */ usblp->statusbuf = kmalloc(STATUS_BUF_SIZE, GFP_KERNEL); if (!usblp->statusbuf) { retval = -ENOMEM; goto abort; } /* Lookup quirks for this printer. */ usblp->quirks = usblp_quirks( le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Analyze and pick initial alternate settings and endpoints. */ protocol = usblp_select_alts(usblp); if (protocol < 0) { dbg("incompatible printer-class device 0x%4.4X/0x%4.4X", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); retval = -ENODEV; goto abort; } /* Setup the selected alternate setting and endpoints. */ if (usblp_set_protocol(usblp, protocol) < 0) { retval = -ENODEV; /* ->probe isn't ->ioctl */ goto abort; } /* Retrieve and store the device ID string. */ usblp_cache_device_id_string(usblp); retval = device_create_file(&intf->dev, &dev_attr_ieee1284_id); if (retval) goto abort_intfdata; #ifdef DEBUG usblp_check_status(usblp, 0); #endif usb_set_intfdata(intf, usblp); usblp->present = 1; retval = usb_register_dev(intf, &usblp_class); if (retval) { printk(KERN_ERR "usblp: Not able to get a minor" " (base %u, slice default): %d\n", USBLP_MINOR_BASE, retval); goto abort_intfdata; } usblp->minor = intf->minor; printk(KERN_INFO "usblp%d: USB %sdirectional printer dev %d " "if %d alt %d proto %d vid 0x%4.4X pid 0x%4.4X\n", usblp->minor, usblp->bidir ? "Bi" : "Uni", dev->devnum, usblp->ifnum, usblp->protocol[usblp->current_protocol].alt_setting, usblp->current_protocol, le16_to_cpu(usblp->dev->descriptor.idVendor), le16_to_cpu(usblp->dev->descriptor.idProduct)); return 0; abort_intfdata: usb_set_intfdata(intf, NULL); device_remove_file(&intf->dev, &dev_attr_ieee1284_id); abort: kfree(usblp->readbuf); kfree(usblp->statusbuf); kfree(usblp->device_id_string); kfree(usblp); abort_ret: return retval; } /* * We are a "new" style driver with usb_device_id table, * but our requirements are too intricate for simple match to handle. * * The "proto_bias" option may be used to specify the preferred protocol * for all USB printers (1=7/1/1, 2=7/1/2, 3=7/1/3). If the device * supports the preferred protocol, then we bind to it. * * The best interface for us is 7/1/2, because it is compatible * with a stream of characters. If we find it, we bind to it. * * Note that the people from hpoj.sourceforge.net need to be able to * bind to 7/1/3 (MLC/1284.4), so we provide them ioctls for this purpose. * * Failing 7/1/2, we look for 7/1/3, even though it's probably not * stream-compatible, because this matches the behaviour of the old code. * * If nothing else, we bind to 7/1/1 - the unidirectional interface. */ static int usblp_select_alts(struct usblp *usblp) { struct usb_interface *if_alt; struct usb_host_interface *ifd; struct usb_endpoint_descriptor *epd, *epwrite, *epread; int p, i, e; if_alt = usblp->intf; for (p = 0; p < USBLP_MAX_PROTOCOLS; p++) usblp->protocol[p].alt_setting = -1; /* Find out what we have. */ for (i = 0; i < if_alt->num_altsetting; i++) { ifd = &if_alt->altsetting[i]; if (ifd->desc.bInterfaceClass != 7 || ifd->desc.bInterfaceSubClass != 1) if (!(usblp->quirks & USBLP_QUIRK_BAD_CLASS)) continue; if (ifd->desc.bInterfaceProtocol < USBLP_FIRST_PROTOCOL || ifd->desc.bInterfaceProtocol > USBLP_LAST_PROTOCOL) continue; /* Look for bulk OUT and IN endpoints. */ epwrite = epread = NULL; for (e = 0; e < ifd->desc.bNumEndpoints; e++) { epd = &ifd->endpoint[e].desc; if (usb_endpoint_is_bulk_out(epd)) if (!epwrite) epwrite = epd; if (usb_endpoint_is_bulk_in(epd)) if (!epread) epread = epd; } /* Ignore buggy hardware without the right endpoints. */ if (!epwrite || (ifd->desc.bInterfaceProtocol > 1 && !epread)) continue; /* Turn off reads for 7/1/1 (unidirectional) interfaces * and buggy bidirectional printers. */ if (ifd->desc.bInterfaceProtocol == 1) { epread = NULL; } else if (usblp->quirks & USBLP_QUIRK_BIDIR) { printk(KERN_INFO "usblp%d: Disabling reads from " "problematic bidirectional printer\n", usblp->minor); epread = NULL; } usblp->protocol[ifd->desc.bInterfaceProtocol].alt_setting = ifd->desc.bAlternateSetting; usblp->protocol[ifd->desc.bInterfaceProtocol].epwrite = epwrite; usblp->protocol[ifd->desc.bInterfaceProtocol].epread = epread; } /* If our requested protocol is supported, then use it. */ if (proto_bias >= USBLP_FIRST_PROTOCOL && proto_bias <= USBLP_LAST_PROTOCOL && usblp->protocol[proto_bias].alt_setting != -1) return proto_bias; /* Ordering is important here. */ if (usblp->protocol[2].alt_setting != -1) return 2; if (usblp->protocol[1].alt_setting != -1) return 1; if (usblp->protocol[3].alt_setting != -1) return 3; /* If nothing is available, then don't bind to this device. */ return -1; } static int usblp_set_protocol(struct usblp *usblp, int protocol) { int r, alts; if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; alts = usblp->protocol[protocol].alt_setting; if (alts < 0) return -EINVAL; r = usb_set_interface(usblp->dev, usblp->ifnum, alts); if (r < 0) { printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", alts, usblp->ifnum); return r; } usblp->bidir = (usblp->protocol[protocol].epread != NULL); usblp->current_protocol = protocol; dbg("usblp%d set protocol %d", usblp->minor, protocol); return 0; } /* Retrieves and caches device ID string. * Returns length, including length bytes but not null terminator. * On error, returns a negative errno value. */ static int usblp_cache_device_id_string(struct usblp *usblp) { int err, length; err = usblp_get_id(usblp, 0, usblp->device_id_string, USBLP_DEVICE_ID_SIZE - 1); if (err < 0) { dbg("usblp%d: error = %d reading IEEE-1284 Device ID string", usblp->minor, err); usblp->device_id_string[0] = usblp->device_id_string[1] = '\0'; return -EIO; } /* First two bytes are length in big-endian. * They count themselves, and we copy them into * the user's buffer. */ length = be16_to_cpu(*((__be16 *)usblp->device_id_string)); if (length < 2) length = 2; else if (length >= USBLP_DEVICE_ID_SIZE) length = USBLP_DEVICE_ID_SIZE - 1; usblp->device_id_string[length] = '\0'; dbg("usblp%d Device ID string [len=%d]=\"%s\"", usblp->minor, length, &usblp->device_id_string[2]); return length; } static void usblp_disconnect(struct usb_interface *intf) { struct usblp *usblp = usb_get_intfdata(intf); usb_deregister_dev(intf, &usblp_class); if (!usblp || !usblp->dev) { dev_err(&intf->dev, "bogus disconnect\n"); BUG(); } device_remove_file(&intf->dev, &dev_attr_ieee1284_id); mutex_lock(&usblp_mutex); mutex_lock(&usblp->mut); usblp->present = 0; wake_up(&usblp->wwait); wake_up(&usblp->rwait); usb_set_intfdata(intf, NULL); usblp_unlink_urbs(usblp); mutex_unlock(&usblp->mut); if (!usblp->used) usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); } static int usblp_suspend(struct usb_interface *intf, pm_message_t message) { struct usblp *usblp = usb_get_intfdata(intf); usblp_unlink_urbs(usblp); #if 0 /* XXX Do we want this? What if someone is reading, should we fail? */ /* not strictly necessary, but just in case */ wake_up(&usblp->wwait); wake_up(&usblp->rwait); #endif return 0; } static int usblp_resume(struct usb_interface *intf) { struct usblp *usblp = usb_get_intfdata(intf); int r; r = handle_bidir(usblp); return r; } static const struct usb_device_id usblp_ids[] = { { USB_DEVICE_INFO(7, 1, 1) }, { USB_DEVICE_INFO(7, 1, 2) }, { USB_DEVICE_INFO(7, 1, 3) }, { USB_INTERFACE_INFO(7, 1, 1) }, { USB_INTERFACE_INFO(7, 1, 2) }, { USB_INTERFACE_INFO(7, 1, 3) }, { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usblp_ids); static struct usb_driver usblp_driver = { .name = "usblp", .probe = usblp_probe, .disconnect = usblp_disconnect, .suspend = usblp_suspend, .resume = usblp_resume, .id_table = usblp_ids, .supports_autosuspend = 1, }; module_usb_driver(usblp_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); module_param(proto_bias, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(proto_bias, "Favourite protocol number"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/Galaxy_Note_3
arch/arm/mach-footbridge/dc21285-timer.c
5002
2579
/* * linux/arch/arm/mach-footbridge/dc21285-timer.c * * Copyright (C) 1998 Russell King. * Copyright (C) 1998 Phil Blundell */ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq.h> #include <asm/hardware/dec21285.h> #include <asm/mach/time.h> #include <asm/system_info.h> #include "common.h" static cycle_t cksrc_dc21285_read(struct clocksource *cs) { return cs->mask - *CSR_TIMER2_VALUE; } static int cksrc_dc21285_enable(struct clocksource *cs) { *CSR_TIMER2_LOAD = cs->mask; *CSR_TIMER2_CLR = 0; *CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16; return 0; } static void cksrc_dc21285_disable(struct clocksource *cs) { *CSR_TIMER2_CNTL = 0; } static struct clocksource cksrc_dc21285 = { .name = "dc21285_timer2", .rating = 200, .read = cksrc_dc21285_read, .enable = cksrc_dc21285_enable, .disable = cksrc_dc21285_disable, .mask = CLOCKSOURCE_MASK(24), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void ckevt_dc21285_set_mode(enum clock_event_mode mode, struct clock_event_device *c) { switch (mode) { case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: *CSR_TIMER1_CLR = 0; *CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ); *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16; break; default: *CSR_TIMER1_CNTL = 0; break; } } static struct clock_event_device ckevt_dc21285 = { .name = "dc21285_timer1", .features = CLOCK_EVT_FEAT_PERIODIC, .rating = 200, .irq = IRQ_TIMER1, .set_mode = ckevt_dc21285_set_mode, }; static irqreturn_t timer1_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; *CSR_TIMER1_CLR = 0; ce->event_handler(ce); return IRQ_HANDLED; } static struct irqaction footbridge_timer_irq = { .name = "dc21285_timer1", .handler = timer1_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .dev_id = &ckevt_dc21285, }; /* * Set up timer interrupt. */ static void __init footbridge_timer_init(void) { struct clock_event_device *ce = &ckevt_dc21285; clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); setup_irq(ce->irq, &footbridge_timer_irq); clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); ce->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(ce); } struct sys_timer footbridge_timer = { .init = footbridge_timer_init, };
gpl-2.0
rocky-luo/linux-kernel
fs/fat/fatent.c
6538
16686
/* * Copyright (C) 2004, OGAWA Hirofumi * Released under GPL v2. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/msdos_fs.h> #include <linux/blkdev.h> #include "fat.h" struct fatent_operations { void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); void (*ent_set_ptr)(struct fat_entry *, int); int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); int (*ent_get)(struct fat_entry *); void (*ent_put)(struct fat_entry *, int); int (*ent_next)(struct fat_entry *); }; static DEFINE_SPINLOCK(fat12_entry_lock); static void fat12_ent_blocknr(struct super_block *sb, int entry, int *offset, sector_t *blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = entry + (entry >> 1); WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } static void fat_ent_blocknr(struct super_block *sb, int entry, int *offset, sector_t *blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = (entry << sbi->fatent_shift); WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset) { struct buffer_head **bhs = fatent->bhs; if (fatent->nr_bhs == 1) { WARN_ON(offset >= (bhs[0]->b_size - 1)); fatent->u.ent12_p[0] = bhs[0]->b_data + offset; fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1); } else { WARN_ON(offset != (bhs[0]->b_size - 1)); fatent->u.ent12_p[0] = bhs[0]->b_data + offset; fatent->u.ent12_p[1] = bhs[1]->b_data; } } static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset) { WARN_ON(offset & (2 - 1)); fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset); } static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset) { WARN_ON(offset & (4 - 1)); fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset); } static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct buffer_head **bhs = fatent->bhs; WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); fatent->fat_inode = MSDOS_SB(sb)->fat_inode; bhs[0] = sb_bread(sb, blocknr); if (!bhs[0]) goto err; if ((offset + 1) < sb->s_blocksize) fatent->nr_bhs = 1; else { /* This entry is block boundary, it needs the next block */ blocknr++; bhs[1] = sb_bread(sb, blocknr); if (!bhs[1]) goto err_brelse; fatent->nr_bhs = 2; } fat12_ent_set_ptr(fatent, offset); return 0; err_brelse: brelse(bhs[0]); err: fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); return -EIO; } static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); fatent->fat_inode = MSDOS_SB(sb)->fat_inode; fatent->bhs[0] = sb_bread(sb, blocknr); if (!fatent->bhs[0]) { fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); return -EIO; } fatent->nr_bhs = 1; ops->ent_set_ptr(fatent, offset); return 0; } static int fat12_ent_get(struct fat_entry *fatent) { u8 **ent12_p = fatent->u.ent12_p; int next; spin_lock(&fat12_entry_lock); if (fatent->entry & 1) next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4); else next = (*ent12_p[1] << 8) | *ent12_p[0]; spin_unlock(&fat12_entry_lock); next &= 0x0fff; if (next >= BAD_FAT12) next = FAT_ENT_EOF; return next; } static int fat16_ent_get(struct fat_entry *fatent) { int next = le16_to_cpu(*fatent->u.ent16_p); WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1)); if (next >= BAD_FAT16) next = FAT_ENT_EOF; return next; } static int fat32_ent_get(struct fat_entry *fatent) { int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1)); if (next >= BAD_FAT32) next = FAT_ENT_EOF; return next; } static void fat12_ent_put(struct fat_entry *fatent, int new) { u8 **ent12_p = fatent->u.ent12_p; if (new == FAT_ENT_EOF) new = EOF_FAT12; spin_lock(&fat12_entry_lock); if (fatent->entry & 1) { *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f); *ent12_p[1] = new >> 4; } else { *ent12_p[0] = new & 0xff; *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8); } spin_unlock(&fat12_entry_lock); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); if (fatent->nr_bhs == 2) mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode); } static void fat16_ent_put(struct fat_entry *fatent, int new) { if (new == FAT_ENT_EOF) new = EOF_FAT16; *fatent->u.ent16_p = cpu_to_le16(new); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); } static void fat32_ent_put(struct fat_entry *fatent, int new) { if (new == FAT_ENT_EOF) new = EOF_FAT32; WARN_ON(new & 0xf0000000); new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; *fatent->u.ent32_p = cpu_to_le32(new); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); } static int fat12_ent_next(struct fat_entry *fatent) { u8 **ent12_p = fatent->u.ent12_p; struct buffer_head **bhs = fatent->bhs; u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); fatent->entry++; if (fatent->nr_bhs == 1) { WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2))); WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))); if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) { ent12_p[0] = nextp - 1; ent12_p[1] = nextp; return 1; } } else { WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))); WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data); ent12_p[0] = nextp - 1; ent12_p[1] = nextp; brelse(bhs[0]); bhs[0] = bhs[1]; fatent->nr_bhs = 1; return 1; } ent12_p[0] = NULL; ent12_p[1] = NULL; return 0; } static int fat16_ent_next(struct fat_entry *fatent) { const struct buffer_head *bh = fatent->bhs[0]; fatent->entry++; if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) { fatent->u.ent16_p++; return 1; } fatent->u.ent16_p = NULL; return 0; } static int fat32_ent_next(struct fat_entry *fatent) { const struct buffer_head *bh = fatent->bhs[0]; fatent->entry++; if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) { fatent->u.ent32_p++; return 1; } fatent->u.ent32_p = NULL; return 0; } static struct fatent_operations fat12_ops = { .ent_blocknr = fat12_ent_blocknr, .ent_set_ptr = fat12_ent_set_ptr, .ent_bread = fat12_ent_bread, .ent_get = fat12_ent_get, .ent_put = fat12_ent_put, .ent_next = fat12_ent_next, }; static struct fatent_operations fat16_ops = { .ent_blocknr = fat_ent_blocknr, .ent_set_ptr = fat16_ent_set_ptr, .ent_bread = fat_ent_bread, .ent_get = fat16_ent_get, .ent_put = fat16_ent_put, .ent_next = fat16_ent_next, }; static struct fatent_operations fat32_ops = { .ent_blocknr = fat_ent_blocknr, .ent_set_ptr = fat32_ent_set_ptr, .ent_bread = fat_ent_bread, .ent_get = fat32_ent_get, .ent_put = fat32_ent_put, .ent_next = fat32_ent_next, }; static inline void lock_fat(struct msdos_sb_info *sbi) { mutex_lock(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { mutex_unlock(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); mutex_init(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: sbi->fatent_shift = 2; sbi->fatent_ops = &fat32_ops; break; case 16: sbi->fatent_shift = 1; sbi->fatent_ops = &fat16_ops; break; case 12: sbi->fatent_shift = -1; sbi->fatent_ops = &fat12_ops; break; } } static inline int fat_ent_update_ptr(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct buffer_head **bhs = fatent->bhs; /* Is this fatent's blocks including this entry? */ if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) return 0; if (sbi->fat_bits == 12) { if ((offset + 1) < sb->s_blocksize) { /* This entry is on bhs[0]. */ if (fatent->nr_bhs == 2) { brelse(bhs[1]); fatent->nr_bhs = 1; } } else { /* This entry needs the next block. */ if (fatent->nr_bhs != 2) return 0; if (bhs[1]->b_blocknr != (blocknr + 1)) return 0; } } ops->ent_set_ptr(fatent, offset); return 1; } int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct fatent_operations *ops = sbi->fatent_ops; int err, offset; sector_t blocknr; if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { fatent_brelse(fatent); fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); return -EIO; } fatent_set_entry(fatent, entry); ops->ent_blocknr(sb, entry, &offset, &blocknr); if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) { fatent_brelse(fatent); err = ops->ent_bread(sb, fatent, offset, blocknr); if (err) return err; } return ops->ent_get(fatent); } /* FIXME: We can write the blocks as more big chunk. */ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, int nr_bhs) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *c_bh; int err, n, copy; err = 0; for (copy = 1; copy < sbi->fats; copy++) { sector_t backup_fat = sbi->fat_length * copy; for (n = 0; n < nr_bhs; n++) { c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr); if (!c_bh) { err = -ENOMEM; goto error; } memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); set_buffer_uptodate(c_bh); mark_buffer_dirty_inode(c_bh, sbi->fat_inode); if (sb->s_flags & MS_SYNCHRONOUS) err = sync_dirty_buffer(c_bh); brelse(c_bh); if (err) goto error; } } error: return err; } int fat_ent_write(struct inode *inode, struct fat_entry *fatent, int new, int wait) { struct super_block *sb = inode->i_sb; struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; int err; ops->ent_put(fatent, new); if (wait) { err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs); if (err) return err; } return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs); } static inline int fat_ent_next(struct msdos_sb_info *sbi, struct fat_entry *fatent) { if (sbi->fatent_ops->ent_next(fatent)) { if (fatent->entry < sbi->max_cluster) return 1; } return 0; } static inline int fat_ent_read_block(struct super_block *sb, struct fat_entry *fatent) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; sector_t blocknr; int offset; fatent_brelse(fatent); ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); return ops->ent_bread(sb, fatent, offset, blocknr); } static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs, struct fat_entry *fatent) { int n, i; for (n = 0; n < fatent->nr_bhs; n++) { for (i = 0; i < *nr_bhs; i++) { if (fatent->bhs[n] == bhs[i]) break; } if (i == *nr_bhs) { get_bh(fatent->bhs[n]); bhs[i] = fatent->bhs[n]; (*nr_bhs)++; } } } int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent, prev_ent; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; int i, count, err, nr_bhs, idx_clus; BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */ lock_fat(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid && sbi->free_clusters < nr_cluster) { unlock_fat(sbi); return -ENOSPC; } err = nr_bhs = idx_clus = 0; count = FAT_START_ENT; fatent_init(&prev_ent); fatent_init(&fatent); fatent_set_entry(&fatent, sbi->prev_free + 1); while (count < sbi->max_cluster) { if (fatent.entry >= sbi->max_cluster) fatent.entry = FAT_START_ENT; fatent_set_entry(&fatent, fatent.entry); err = fat_ent_read_block(sb, &fatent); if (err) goto out; /* Find the free entries in a block */ do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) { int entry = fatent.entry; /* make the cluster chain */ ops->ent_put(&fatent, FAT_ENT_EOF); if (prev_ent.nr_bhs) ops->ent_put(&prev_ent, entry); fat_collect_bhs(bhs, &nr_bhs, &fatent); sbi->prev_free = entry; if (sbi->free_clusters != -1) sbi->free_clusters--; sb->s_dirt = 1; cluster[idx_clus] = entry; idx_clus++; if (idx_clus == nr_cluster) goto out; /* * fat_collect_bhs() gets ref-count of bhs, * so we can still use the prev_ent. */ prev_ent = fatent; } count++; if (count == sbi->max_cluster) break; } while (fat_ent_next(sbi, &fatent)); } /* Couldn't allocate the free entries */ sbi->free_clusters = 0; sbi->free_clus_valid = 1; sb->s_dirt = 1; err = -ENOSPC; out: unlock_fat(sbi); fatent_brelse(&fatent); if (!err) { if (inode_needs_sync(inode)) err = fat_sync_bhs(bhs, nr_bhs); if (!err) err = fat_mirror_bhs(sb, bhs, nr_bhs); } for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); if (err && idx_clus) fat_free_clusters(inode, cluster[0]); return err; } int fat_free_clusters(struct inode *inode, int cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; int i, err, nr_bhs; int first_cl = cluster; nr_bhs = 0; fatent_init(&fatent); lock_fat(sbi); do { cluster = fat_ent_read(inode, &fatent, cluster); if (cluster < 0) { err = cluster; goto error; } else if (cluster == FAT_ENT_FREE) { fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", __func__); err = -EIO; goto error; } if (sbi->options.discard) { /* * Issue discard for the sectors we no longer * care about, batching contiguous clusters * into one request */ if (cluster != fatent.entry + 1) { int nr_clus = fatent.entry - first_cl + 1; sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl), nr_clus * sbi->sec_per_clus, GFP_NOFS, 0); first_cl = cluster; } } ops->ent_put(&fatent, FAT_ENT_FREE); if (sbi->free_clusters != -1) { sbi->free_clusters++; sb->s_dirt = 1; } if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) { if (sb->s_flags & MS_SYNCHRONOUS) { err = fat_sync_bhs(bhs, nr_bhs); if (err) goto error; } err = fat_mirror_bhs(sb, bhs, nr_bhs); if (err) goto error; for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); nr_bhs = 0; } fat_collect_bhs(bhs, &nr_bhs, &fatent); } while (cluster != FAT_ENT_EOF); if (sb->s_flags & MS_SYNCHRONOUS) { err = fat_sync_bhs(bhs, nr_bhs); if (err) goto error; } err = fat_mirror_bhs(sb, bhs, nr_bhs); error: fatent_brelse(&fatent); for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); unlock_fat(sbi); return err; } EXPORT_SYMBOL_GPL(fat_free_clusters); /* 128kb is the whole sectors for FAT12 and FAT16 */ #define FAT_READA_SIZE (128 * 1024) static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, unsigned long reada_blocks) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; sector_t blocknr; int i, offset; ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); for (i = 0; i < reada_blocks; i++) sb_breadahead(sb, blocknr + i); } int fat_count_free_clusters(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; unsigned long reada_blocks, reada_mask, cur_block; int err = 0, free; lock_fat(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid) goto out; reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; reada_mask = reada_blocks - 1; cur_block = 0; free = 0; fatent_init(&fatent); fatent_set_entry(&fatent, FAT_START_ENT); while (fatent.entry < sbi->max_cluster) { /* readahead of fat blocks */ if ((cur_block & reada_mask) == 0) { unsigned long rest = sbi->fat_length - cur_block; fat_ent_reada(sb, &fatent, min(reada_blocks, rest)); } cur_block++; err = fat_ent_read_block(sb, &fatent); if (err) goto out; do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) free++; } while (fat_ent_next(sbi, &fatent)); } sbi->free_clusters = free; sbi->free_clus_valid = 1; sb->s_dirt = 1; fatent_brelse(&fatent); out: unlock_fat(sbi); return err; }
gpl-2.0
assusdan/cyanogenmod_kernel_prestigio_muzed3
net/netlabel/netlabel_kapi.c
7050
28793
/* * NetLabel Kernel API * * This file defines the kernel API for the NetLabel system. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/audit.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/bug.h> #include <linux/atomic.h> #include "netlabel_domainhash.h" #include "netlabel_unlabeled.h" #include "netlabel_cipso_v4.h" #include "netlabel_user.h" #include "netlabel_mgmt.h" #include "netlabel_addrlist.h" /* * Configuration Functions */ /** * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping * @domain: the domain mapping to remove * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the * default domain mapping to be removed. Returns zero on success, negative * values on failure. * */ int netlbl_cfg_map_del(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { if (addr == NULL && mask == NULL) { return netlbl_domhsh_remove(domain, audit_info); } else if (addr != NULL && mask != NULL) { switch (family) { case AF_INET: return netlbl_domhsh_remove_af4(domain, addr, mask, audit_info); default: return -EPFNOSUPPORT; } } else return -EINVAL; } /** * netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping * @domain: the domain mapping to add * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL * causes a new default domain mapping to be added. Returns zero on success, * negative values on failure. * */ int netlbl_cfg_unlbl_map_add(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *map4 = NULL; struct netlbl_domaddr6_map *map6 = NULL; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto cfg_unlbl_map_add_failure; } if (addr == NULL && mask == NULL) entry->type = NETLBL_NLTYPE_UNLABELED; else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto cfg_unlbl_map_add_failure; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); switch (family) { case AF_INET: { const struct in_addr *addr4 = addr; const struct in_addr *mask4 = mask; map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); if (map4 == NULL) goto cfg_unlbl_map_add_failure; map4->type = NETLBL_NLTYPE_UNLABELED; map4->list.addr = addr4->s_addr & mask4->s_addr; map4->list.mask = mask4->s_addr; map4->list.valid = 1; ret_val = netlbl_af4list_add(&map4->list, &addrmap->list4); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); if (map6 == NULL) goto cfg_unlbl_map_add_failure; map6->type = NETLBL_NLTYPE_UNLABELED; map6->list.addr = *addr6; map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; map6->list.mask = *mask6; map6->list.valid = 1; ret_val = netlbl_af6list_add(&map6->list, &addrmap->list6); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #endif /* IPv6 */ default: goto cfg_unlbl_map_add_failure; break; } entry->type_def.addrsel = addrmap; entry->type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto cfg_unlbl_map_add_failure; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_unlbl_map_add_failure; return 0; cfg_unlbl_map_add_failure: kfree(entry->domain); kfree(entry); kfree(addrmap); kfree(map4); kfree(map6); return ret_val; } /** * netlbl_cfg_unlbl_static_add - Adds a new static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @secid: LSM secid value for the entry * @audit_info: NetLabel audit information * * Description: * Adds a new NetLabel static label to be used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_add(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, u32 secid, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_add(net, dev_name, addr, mask, addr_len, secid, audit_info); } /** * netlbl_cfg_unlbl_static_del - Removes an existing static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @secid: LSM secid value for the entry * @audit_info: NetLabel audit information * * Description: * Removes an existing NetLabel static label used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_del(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_remove(net, dev_name, addr, mask, addr_len, audit_info); } /** * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition * @doi_def: CIPSO DOI definition * @audit_info: NetLabel audit information * * Description: * Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on * success and negative values on failure. * */ int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { return cipso_v4_doi_add(doi_def, audit_info); } /** * netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition * @doi: CIPSO DOI * @audit_info: NetLabel audit information * * Description: * Remove an existing CIPSO DOI definition matching @doi. Returns zero on * success and negative values on failure. * */ void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info) { cipso_v4_doi_remove(doi, audit_info); } /** * netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping * @doi: the CIPSO DOI * @domain: the domain mapping to add * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel * subsystem. A @domain value of NULL adds a new default domain mapping. * Returns zero on success, negative values on failure. * */ int netlbl_cfg_cipsov4_map_add(u32 doi, const char *domain, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct cipso_v4_doi *doi_def; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *addrinfo = NULL; doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) return -ENOENT; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) goto out_entry; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto out_domain; } if (addr == NULL && mask == NULL) { entry->type_def.cipsov4 = doi_def; entry->type = NETLBL_NLTYPE_CIPSOV4; } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) goto out_addrinfo; addrinfo->type_def.cipsov4 = doi_def; addrinfo->type = NETLBL_NLTYPE_CIPSOV4; addrinfo->list.addr = addr->s_addr & mask->s_addr; addrinfo->list.mask = mask->s_addr; addrinfo->list.valid = 1; ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; entry->type_def.addrsel = addrmap; entry->type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; return 0; cfg_cipsov4_map_add_failure: kfree(addrinfo); out_addrinfo: kfree(addrmap); out_addrmap: kfree(entry->domain); out_domain: kfree(entry); out_entry: cipso_v4_doi_putdef(doi_def); return ret_val; } /* * Security Attribute Functions */ /** * netlbl_secattr_catmap_walk - Walk a LSM secattr catmap looking for a bit * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first set bit or -ENOENT if no bits are set. * */ int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap, u32 offset) { struct netlbl_lsm_secattr_catmap *iter = catmap; u32 node_idx; u32 node_bit; NETLBL_CATMAP_MAPTYPE bitmap; if (offset > iter->startbit) { while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { iter = iter->next; if (iter == NULL) return -ENOENT; } node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE; node_bit = offset - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx); } else { node_idx = 0; node_bit = 0; } bitmap = iter->bitmap[node_idx] >> node_bit; for (;;) { if (bitmap != 0) { while ((bitmap & NETLBL_CATMAP_BIT) == 0) { bitmap >>= 1; node_bit++; } return iter->startbit + (NETLBL_CATMAP_MAPSIZE * node_idx) + node_bit; } if (++node_idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next != NULL) { iter = iter->next; node_idx = 0; } else return -ENOENT; } bitmap = iter->bitmap[node_idx]; node_bit = 0; } return -ENOENT; } /** * netlbl_secattr_catmap_walk_rng - Find the end of a string of set bits * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first cleared bit or -ENOENT if the offset is past * the end of the bitmap. * */ int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap, u32 offset) { struct netlbl_lsm_secattr_catmap *iter = catmap; u32 node_idx; u32 node_bit; NETLBL_CATMAP_MAPTYPE bitmask; NETLBL_CATMAP_MAPTYPE bitmap; if (offset > iter->startbit) { while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { iter = iter->next; if (iter == NULL) return -ENOENT; } node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE; node_bit = offset - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx); } else { node_idx = 0; node_bit = 0; } bitmask = NETLBL_CATMAP_BIT << node_bit; for (;;) { bitmap = iter->bitmap[node_idx]; while (bitmask != 0 && (bitmap & bitmask) != 0) { bitmask <<= 1; node_bit++; } if (bitmask != 0) return iter->startbit + (NETLBL_CATMAP_MAPSIZE * node_idx) + node_bit - 1; else if (++node_idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next == NULL) return iter->startbit + NETLBL_CATMAP_SIZE - 1; iter = iter->next; node_idx = 0; } bitmask = NETLBL_CATMAP_BIT; node_bit = 0; } return -ENOENT; } /** * netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap * @catmap: the category bitmap * @bit: the bit to set * @flags: memory allocation flags * * Description: * Set the bit specified by @bit in @catmap. Returns zero on success, * negative values on failure. * */ int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap, u32 bit, gfp_t flags) { struct netlbl_lsm_secattr_catmap *iter = catmap; u32 node_bit; u32 node_idx; while (iter->next != NULL && bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) iter = iter->next; if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) { iter->next = netlbl_secattr_catmap_alloc(flags); if (iter->next == NULL) return -ENOMEM; iter = iter->next; iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1); } /* gcc always rounds to zero when doing integer division */ node_idx = (bit - iter->startbit) / NETLBL_CATMAP_MAPSIZE; node_bit = bit - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx); iter->bitmap[node_idx] |= NETLBL_CATMAP_BIT << node_bit; return 0; } /** * netlbl_secattr_catmap_setrng - Set a range of bits in a LSM secattr catmap * @catmap: the category bitmap * @start: the starting bit * @end: the last bit in the string * @flags: memory allocation flags * * Description: * Set a range of bits, starting at @start and ending with @end. Returns zero * on success, negative values on failure. * */ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap, u32 start, u32 end, gfp_t flags) { int ret_val = 0; struct netlbl_lsm_secattr_catmap *iter = catmap; u32 iter_max_spot; u32 spot; /* XXX - This could probably be made a bit faster by combining writes * to the catmap instead of setting a single bit each time, but for * right now skipping to the start of the range in the catmap should * be a nice improvement over calling the individual setbit function * repeatedly from a loop. */ while (iter->next != NULL && start >= (iter->startbit + NETLBL_CATMAP_SIZE)) iter = iter->next; iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE; for (spot = start; spot <= end && ret_val == 0; spot++) { if (spot >= iter_max_spot && iter->next != NULL) { iter = iter->next; iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE; } ret_val = netlbl_secattr_catmap_setbit(iter, spot, flags); } return ret_val; } /* * LSM Functions */ /** * netlbl_enabled - Determine if the NetLabel subsystem is enabled * * Description: * The LSM can use this function to determine if it should use NetLabel * security attributes in it's enforcement mechanism. Currently, NetLabel is * considered to be enabled when it's configuration contains a valid setup for * at least one labeled protocol (i.e. NetLabel can understand incoming * labeled packets of at least one type); otherwise NetLabel is considered to * be disabled. * */ int netlbl_enabled(void) { /* At some point we probably want to expose this mechanism to the user * as well so that admins can toggle NetLabel regardless of the * configuration */ return (atomic_read(&netlabel_mgmt_protocount) > 0); } /** * netlbl_sock_setattr - Label a socket using the correct protocol * @sk: the socket to label * @family: protocol family * @secattr: the security attributes * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. This function requires exclusive access to @sk, * which means it either needs to be in the process of being created or locked. * Returns zero on success, -EDESTADDRREQ if the domain is configured to use * network address selectors (can't blindly label the socket), and negative * values on all other failures. * */ int netlbl_sock_setattr(struct sock *sk, u16 family, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct netlbl_dom_map *dom_entry; rcu_read_lock(); dom_entry = netlbl_domhsh_getentry(secattr->domain); if (dom_entry == NULL) { ret_val = -ENOENT; goto socket_setattr_return; } switch (family) { case AF_INET: switch (dom_entry->type) { case NETLBL_NLTYPE_ADDRSELECT: ret_val = -EDESTADDRREQ; break; case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, dom_entry->type_def.cipsov4, secattr); break; case NETLBL_NLTYPE_UNLABELED: ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ ret_val = 0; break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } socket_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_sock_delattr - Delete all the NetLabel labels on a socket * @sk: the socket * * Description: * Remove all the NetLabel labeling from @sk. The caller is responsible for * ensuring that @sk is locked. * */ void netlbl_sock_delattr(struct sock *sk) { cipso_v4_sock_delattr(sk); } /** * netlbl_sock_getattr - Determine the security attributes of a sock * @sk: the sock * @secattr: the security attributes * * Description: * Examines the given sock to see if any NetLabel style labeling has been * applied to the sock, if so it parses the socket label and returns the * security attributes in @secattr. Returns zero on success, negative values * on failure. * */ int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { int ret_val; switch (sk->sk_family) { case AF_INET: ret_val = cipso_v4_sock_getattr(sk, secattr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ret_val = -ENOMSG; break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } return ret_val; } /** * netlbl_conn_setattr - Label a connected socket using the correct protocol * @sk: the socket to label * @addr: the destination address * @secattr: the security attributes * * Description: * Attach the correct label to the given connected socket using the security * attributes specified in @secattr. The caller is responsible for ensuring * that @sk is locked. Returns zero on success, negative values on failure. * */ int netlbl_conn_setattr(struct sock *sk, struct sockaddr *addr, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct sockaddr_in *addr4; struct netlbl_domaddr4_map *af4_entry; rcu_read_lock(); switch (addr->sa_family) { case AF_INET: addr4 = (struct sockaddr_in *)addr; af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, addr4->sin_addr.s_addr); if (af4_entry == NULL) { ret_val = -ENOENT; goto conn_setattr_return; } switch (af4_entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, af4_entry->type_def.cipsov4, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ cipso_v4_sock_delattr(sk); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ ret_val = 0; break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } conn_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_setattr - Label a request socket using the correct protocol * @req: the request socket to label * @secattr: the security attributes * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_req_setattr(struct request_sock *req, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct netlbl_dom_map *dom_entry; struct netlbl_domaddr4_map *af4_entry; u32 proto_type; struct cipso_v4_doi *proto_cv4; rcu_read_lock(); dom_entry = netlbl_domhsh_getentry(secattr->domain); if (dom_entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } switch (req->rsk_ops->family) { case AF_INET: if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) { struct inet_request_sock *req_inet = inet_rsk(req); af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, req_inet->rmt_addr); if (af4_entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } proto_type = af4_entry->type; proto_cv4 = af4_entry->type_def.cipsov4; } else { proto_type = dom_entry->type; proto_cv4 = dom_entry->type_def.cipsov4; } switch (proto_type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ cipso_v4_req_delattr(req); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ ret_val = 0; break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } req_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_delattr - Delete all the NetLabel labels on a socket * @req: the socket * * Description: * Remove all the NetLabel labeling from @req. * */ void netlbl_req_delattr(struct request_sock *req) { cipso_v4_req_delattr(req); } /** * netlbl_skbuff_setattr - Label a packet using the correct protocol * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Attach the correct label to the given packet using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_skbuff_setattr(struct sk_buff *skb, u16 family, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *hdr4; struct netlbl_domaddr4_map *af4_entry; rcu_read_lock(); switch (family) { case AF_INET: hdr4 = ip_hdr(skb); af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, hdr4->daddr); if (af4_entry == NULL) { ret_val = -ENOENT; goto skbuff_setattr_return; } switch (af4_entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_skbuff_setattr(skb, af4_entry->type_def.cipsov4, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ ret_val = cipso_v4_skbuff_delattr(skb); break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ ret_val = 0; break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } skbuff_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_skbuff_getattr - Determine the security attributes of a packet * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Examines the given packet to see if a recognized form of packet labeling * is present, if so it parses the packet label and returns the security * attributes in @secattr. Returns zero on success, negative values on * failure. * */ int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr) { switch (family) { case AF_INET: if (CIPSO_V4_OPTEXIST(skb) && cipso_v4_skbuff_getattr(skb, secattr) == 0) return 0; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: break; #endif /* IPv6 */ } return netlbl_unlabel_getattr(skb, family, secattr); } /** * netlbl_skbuff_err - Handle a LSM error on a sk_buff * @skb: the packet * @error: the error code * @gateway: true if host is acting as a gateway, false otherwise * * Description: * Deal with a LSM problem when handling the packet in @skb, typically this is * a permission denied problem (-EACCES). The correct action is determined * according to the packet's labeling protocol. * */ void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway) { if (CIPSO_V4_OPTEXIST(skb)) cipso_v4_error(skb, error, gateway); } /** * netlbl_cache_invalidate - Invalidate all of the NetLabel protocol caches * * Description: * For all of the NetLabel protocols that support some form of label mapping * cache, invalidate the cache. Returns zero on success, negative values on * error. * */ void netlbl_cache_invalidate(void) { cipso_v4_cache_invalidate(); } /** * netlbl_cache_add - Add an entry to a NetLabel protocol cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add the LSM security attributes for the given packet to the underlying * NetLabel protocol's label mapping cache. Returns zero on success, negative * values on error. * */ int netlbl_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) return -ENOMSG; if (CIPSO_V4_OPTEXIST(skb)) return cipso_v4_cache_add(skb, secattr); return -ENOMSG; } /* * Protocol Engine Functions */ /** * netlbl_audit_start - Start an audit message * @type: audit message type * @audit_info: NetLabel audit information * * Description: * Start an audit message using the type specified in @type and fill the audit * message with some fields common to all NetLabel audit messages. This * function should only be used by protocol engines, not LSMs. Returns a * pointer to the audit buffer on success, NULL on failure. * */ struct audit_buffer *netlbl_audit_start(int type, struct netlbl_audit *audit_info) { return netlbl_audit_start_common(type, audit_info); } /* * Setup Functions */ /** * netlbl_init - Initialize NetLabel * * Description: * Perform the required NetLabel initialization before first use. * */ static int __init netlbl_init(void) { int ret_val; printk(KERN_INFO "NetLabel: Initializing\n"); printk(KERN_INFO "NetLabel: domain hash size = %u\n", (1 << NETLBL_DOMHSH_BITSIZE)); printk(KERN_INFO "NetLabel: protocols =" " UNLABELED" " CIPSOv4" "\n"); ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_init(NETLBL_UNLHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_netlink_init(); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_defconf(); if (ret_val != 0) goto init_failure; printk(KERN_INFO "NetLabel: unlabeled traffic allowed by default\n"); return 0; init_failure: panic("NetLabel: failed to initialize properly (%d)\n", ret_val); } subsys_initcall(netlbl_init);
gpl-2.0
TeamTwisted/hells-Core-N5
drivers/usb/misc/trancevibrator.c
7562
3891
/* * PlayStation 2 Trance Vibrator driver * * Copyright (C) 2006 Sam Hocevar <sam@zoy.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Standard include files */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> /* Version Information */ #define DRIVER_VERSION "v1.1" #define DRIVER_AUTHOR "Sam Hocevar, sam@zoy.org" #define DRIVER_DESC "PlayStation 2 Trance Vibrator driver" #define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ #define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ static const struct usb_device_id id_table[] = { { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, { }, }; MODULE_DEVICE_TABLE (usb, id_table); /* Driver-local specific stuff */ struct trancevibrator { struct usb_device *udev; unsigned int speed; }; static ssize_t show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); return sprintf(buf, "%d\n", tv->speed); } static ssize_t set_speed(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); int temp, retval, old; temp = simple_strtoul(buf, NULL, 10); if (temp > 255) temp = 255; else if (temp < 0) temp = 0; old = tv->speed; tv->speed = temp; dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed); /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ 0, NULL, 0, USB_CTRL_GET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); return retval; } return count; } static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct trancevibrator *dev; int retval; dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); retval = -ENOMEM; goto error; } dev->udev = usb_get_dev(udev); usb_set_intfdata(interface, dev); retval = device_create_file(&interface->dev, &dev_attr_speed); if (retval) goto error_create_file; return 0; error_create_file: usb_put_dev(udev); usb_set_intfdata(interface, NULL); error: kfree(dev); return retval; } static void tv_disconnect(struct usb_interface *interface) { struct trancevibrator *dev; dev = usb_get_intfdata (interface); device_remove_file(&interface->dev, &dev_attr_speed); usb_set_intfdata(interface, NULL); usb_put_dev(dev->udev); kfree(dev); } /* USB subsystem object */ static struct usb_driver tv_driver = { .name = "trancevibrator", .probe = tv_probe, .disconnect = tv_disconnect, .id_table = id_table, }; module_usb_driver(tv_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
TheStrix/android_kernel_xiaomi_armani_OLD
net/sunrpc/auth_gss/gss_krb5_seal.c
7818
6606
/* * linux/net/sunrpc/gss_krb5_seal.c * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c * * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> * J. Bruce Fields <bfields@umich.edu> */ /* * Copyright 1993 by OpenVision Technologies, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appears in all copies and * that both that copyright notice and this permission notice appear in * supporting documentation, and that the name of OpenVision not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. OpenVision makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (C) 1998 by the FundsXpress, INC. * * All rights reserved. * * Export of this software from the United States of America may require * a specific license from the United States Government. It is the * responsibility of any person or organization contemplating export to * obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of FundsXpress. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. FundsXpress makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> #include <linux/jiffies.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/random.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif DEFINE_SPINLOCK(krb5_seq_lock); static char * setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) { __be16 *ptr, *krb5_hdr; int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; token->len = g_token_size(&ctx->mech_used, body_size); ptr = (__be16 *)token->data; g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); /* ptr now at start of header described in rfc 1964, section 1.2.1: */ krb5_hdr = ptr; *ptr++ = KG_TOK_MIC_MSG; *ptr++ = cpu_to_le16(ctx->gk5e->signalg); *ptr++ = SEAL_ALG_NONE; *ptr++ = 0xffff; return (char *)krb5_hdr; } static void * setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) { __be16 *ptr, *krb5_hdr; u8 *p, flags = 0x00; if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) flags |= 0x01; if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) flags |= 0x04; /* Per rfc 4121, sec 4.2.6.1, there is no header, * just start the token */ krb5_hdr = ptr = (__be16 *)token->data; *ptr++ = KG2_TOK_MIC; p = (u8 *)ptr; *p++ = flags; *p++ = 0xff; ptr = (__be16 *)p; *ptr++ = 0xffff; *ptr++ = 0xffff; token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; return krb5_hdr; } static u32 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), .data = cksumdata}; void *ptr; s32 now; u32 seq_send; u8 *cksumkey; dprintk("RPC: %s\n", __func__); BUG_ON(ctx == NULL); now = get_seconds(); ptr = setup_token(ctx, token); if (ctx->gk5e->keyed_cksum) cksumkey = ctx->cksum; else cksumkey = NULL; if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, KG_USAGE_SIGN, &md5cksum)) return GSS_S_FAILURE; memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); spin_lock(&krb5_seq_lock); seq_send = ctx->seq_send++; spin_unlock(&krb5_seq_lock); if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) return GSS_S_FAILURE; return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } static u32 gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; struct xdr_netobj cksumobj = { .len = sizeof(cksumdata), .data = cksumdata}; void *krb5_hdr; s32 now; u64 seq_send; u8 *cksumkey; unsigned int cksum_usage; dprintk("RPC: %s\n", __func__); krb5_hdr = setup_token_v2(ctx, token); /* Set up the sequence number. Now 64-bits in clear * text and w/o direction indicator */ spin_lock(&krb5_seq_lock); seq_send = ctx->seq_send64++; spin_unlock(&krb5_seq_lock); *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send); if (ctx->initiate) { cksumkey = ctx->initiator_sign; cksum_usage = KG_USAGE_INITIATOR_SIGN; } else { cksumkey = ctx->acceptor_sign; cksum_usage = KG_USAGE_ACCEPTOR_SIGN; } if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, text, 0, cksumkey, cksum_usage, &cksumobj)) return GSS_S_FAILURE; memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len); now = get_seconds(); return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } u32 gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, struct xdr_netobj *token) { struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; switch (ctx->enctype) { default: BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: return gss_get_mic_v1(ctx, text, token); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: return gss_get_mic_v2(ctx, text, token); } }
gpl-2.0
Silverblade-nz/Alpha15Copy
drivers/mfd/mcp-core.c
7818
5750
/* * linux/drivers/mfd/mcp-core.c * * Copyright (C) 2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * Generic MCP (Multimedia Communications Port) layer. All MCP locking * is solely held within this file. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mfd/mcp.h> #define to_mcp(d) container_of(d, struct mcp, attached_device) #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) static int mcp_bus_match(struct device *dev, struct device_driver *drv) { return 1; } static int mcp_bus_probe(struct device *dev) { struct mcp *mcp = to_mcp(dev); struct mcp_driver *drv = to_mcp_driver(dev->driver); return drv->probe(mcp); } static int mcp_bus_remove(struct device *dev) { struct mcp *mcp = to_mcp(dev); struct mcp_driver *drv = to_mcp_driver(dev->driver); drv->remove(mcp); return 0; } static struct bus_type mcp_bus_type = { .name = "mcp", .match = mcp_bus_match, .probe = mcp_bus_probe, .remove = mcp_bus_remove, }; /** * mcp_set_telecom_divisor - set the telecom divisor * @mcp: MCP interface structure * @div: SIB clock divisor * * Set the telecom divisor on the MCP interface. The resulting * sample rate is SIBCLOCK/div. */ void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div) { unsigned long flags; spin_lock_irqsave(&mcp->lock, flags); mcp->ops->set_telecom_divisor(mcp, div); spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_set_telecom_divisor); /** * mcp_set_audio_divisor - set the audio divisor * @mcp: MCP interface structure * @div: SIB clock divisor * * Set the audio divisor on the MCP interface. */ void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div) { unsigned long flags; spin_lock_irqsave(&mcp->lock, flags); mcp->ops->set_audio_divisor(mcp, div); spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_set_audio_divisor); /** * mcp_reg_write - write a device register * @mcp: MCP interface structure * @reg: 4-bit register index * @val: 16-bit data value * * Write a device register. The MCP interface must be enabled * to prevent this function hanging. */ void mcp_reg_write(struct mcp *mcp, unsigned int reg, unsigned int val) { unsigned long flags; spin_lock_irqsave(&mcp->lock, flags); mcp->ops->reg_write(mcp, reg, val); spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_reg_write); /** * mcp_reg_read - read a device register * @mcp: MCP interface structure * @reg: 4-bit register index * * Read a device register and return its value. The MCP interface * must be enabled to prevent this function hanging. */ unsigned int mcp_reg_read(struct mcp *mcp, unsigned int reg) { unsigned long flags; unsigned int val; spin_lock_irqsave(&mcp->lock, flags); val = mcp->ops->reg_read(mcp, reg); spin_unlock_irqrestore(&mcp->lock, flags); return val; } EXPORT_SYMBOL(mcp_reg_read); /** * mcp_enable - enable the MCP interface * @mcp: MCP interface to enable * * Enable the MCP interface. Each call to mcp_enable will need * a corresponding call to mcp_disable to disable the interface. */ void mcp_enable(struct mcp *mcp) { unsigned long flags; spin_lock_irqsave(&mcp->lock, flags); if (mcp->use_count++ == 0) mcp->ops->enable(mcp); spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_enable); /** * mcp_disable - disable the MCP interface * @mcp: MCP interface to disable * * Disable the MCP interface. The MCP interface will only be * disabled once the number of calls to mcp_enable matches the * number of calls to mcp_disable. */ void mcp_disable(struct mcp *mcp) { unsigned long flags; spin_lock_irqsave(&mcp->lock, flags); if (--mcp->use_count == 0) mcp->ops->disable(mcp); spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_disable); static void mcp_release(struct device *dev) { struct mcp *mcp = container_of(dev, struct mcp, attached_device); kfree(mcp); } struct mcp *mcp_host_alloc(struct device *parent, size_t size) { struct mcp *mcp; mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL); if (mcp) { spin_lock_init(&mcp->lock); device_initialize(&mcp->attached_device); mcp->attached_device.parent = parent; mcp->attached_device.bus = &mcp_bus_type; mcp->attached_device.dma_mask = parent->dma_mask; mcp->attached_device.release = mcp_release; } return mcp; } EXPORT_SYMBOL(mcp_host_alloc); int mcp_host_add(struct mcp *mcp, void *pdata) { mcp->attached_device.platform_data = pdata; dev_set_name(&mcp->attached_device, "mcp0"); return device_add(&mcp->attached_device); } EXPORT_SYMBOL(mcp_host_add); void mcp_host_del(struct mcp *mcp) { device_del(&mcp->attached_device); } EXPORT_SYMBOL(mcp_host_del); void mcp_host_free(struct mcp *mcp) { put_device(&mcp->attached_device); } EXPORT_SYMBOL(mcp_host_free); int mcp_driver_register(struct mcp_driver *mcpdrv) { mcpdrv->drv.bus = &mcp_bus_type; return driver_register(&mcpdrv->drv); } EXPORT_SYMBOL(mcp_driver_register); void mcp_driver_unregister(struct mcp_driver *mcpdrv) { driver_unregister(&mcpdrv->drv); } EXPORT_SYMBOL(mcp_driver_unregister); static int __init mcp_init(void) { return bus_register(&mcp_bus_type); } static void __exit mcp_exit(void) { bus_unregister(&mcp_bus_type); } module_init(mcp_init); module_exit(mcp_exit); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("Core multimedia communications port driver"); MODULE_LICENSE("GPL");
gpl-2.0
coolshou/htc_dlxub1_kernel-3.4.10
sound/pci/echoaudio/indigoio.c
8074
2949
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_IO #define ECHOCARD_NAME "Indigo IO" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 0 */ #define PX_NUM 10 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 2 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_io_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_IO_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_io_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x00A0, 0, 0, 0}, /* Indigo IO*/ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigoio_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
htc-mirror/runnymede-ics-crc-3.0.16-c297c99
drivers/scsi/stex.c
8330
44285
/* * SuperTrak EX Series Storage Controller driver for Linux * * Copyright (C) 2005-2009 Promise Technology Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Written By: * Ed Lin <promise_linux@promise.com> * */ #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/module.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #define DRV_NAME "stex" #define ST_DRIVER_VERSION "4.6.0000.4" #define ST_VER_MAJOR 4 #define ST_VER_MINOR 6 #define ST_OEM 0 #define ST_BUILD_VER 4 enum { /* MU register offset */ IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ IDBL = 0x20, /* MU_INBOUND_DOORBELL */ IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ YIOA_STATUS = 0x00, YH2I_INT = 0x20, YINT_EN = 0x34, YI2H_INT = 0x9c, YI2H_INT_C = 0xa0, YH2I_REQ = 0xc0, YH2I_REQ_HI = 0xc4, /* MU register value */ MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), MU_INBOUND_DOORBELL_RESET = (1 << 4), MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), /* MU status code */ MU_STATE_STARTING = 1, MU_STATE_STARTED = 2, MU_STATE_RESETTING = 3, MU_STATE_FAILED = 4, MU_MAX_DELAY = 120, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, MU_HARD_RESET_WAIT = 30000, HMU_PARTNER_TYPE = 2, /* firmware returned values */ SRB_STATUS_SUCCESS = 0x01, SRB_STATUS_ERROR = 0x04, SRB_STATUS_BUSY = 0x05, SRB_STATUS_INVALID_REQUEST = 0x06, SRB_STATUS_SELECTION_TIMEOUT = 0x0A, SRB_SEE_SENSE = 0x80, /* task attribute */ TASK_ATTRIBUTE_SIMPLE = 0x0, TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, SS_HEAD_HANDSHAKE = 0x80, SS_H2I_INT_RESET = 0x100, SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, /* sg flags */ SG_CF_EOT = 0x80, /* end of table */ SG_CF_64B = 0x40, /* 64 bit item */ SG_CF_HOST = 0x20, /* sg in host memory */ MSG_DATA_DIR_ND = 0, MSG_DATA_DIR_IN = 1, MSG_DATA_DIR_OUT = 2, st_shasta = 0, st_vsc = 1, st_yosemite = 2, st_seq = 3, st_yel = 4, PASSTHRU_REQ_TYPE = 0x00000001, PASSTHRU_REQ_NO_WAKEUP = 0x00000100, ST_INTERNAL_TIMEOUT = 180, ST_TO_CMD = 0, ST_FROM_CMD = 1, /* vendor specific commands of Promise */ MGT_CMD = 0xd8, SINBAND_MGT_CMD = 0xd9, ARRAY_CMD = 0xe0, CONTROLLER_CMD = 0xe1, DEBUGGING_CMD = 0xe2, PASSTHRU_CMD = 0xe3, PASSTHRU_GET_ADAPTER = 0x05, PASSTHRU_GET_DRVVER = 0x10, CTLR_CONFIG_CMD = 0x03, CTLR_SHUTDOWN = 0x0d, CTLR_POWER_STATE_CHANGE = 0x0e, CTLR_POWER_SAVING = 0x01, PASSTHRU_SIGNATURE = 0x4e415041, MGT_CMD_SIGNATURE = 0xba, INQUIRY_EVPD = 0x01, ST_ADDITIONAL_MEM = 0x200000, ST_ADDITIONAL_MEM_MIN = 0x80000, }; struct st_sgitem { u8 ctrl; /* SG_CF_xxx */ u8 reserved[3]; __le32 count; __le64 addr; }; struct st_ss_sgitem { __le32 addr; __le32 addr_hi; __le32 count; }; struct st_sgtable { __le16 sg_count; __le16 max_sg_count; __le32 sz_in_byte; }; struct st_msg_header { __le64 handle; u8 flag; u8 channel; __le16 timeout; u32 reserved; }; struct handshake_frame { __le64 rb_phy; /* request payload queue physical address */ __le16 req_sz; /* size of each request payload */ __le16 req_cnt; /* count of reqs the buffer can hold */ __le16 status_sz; /* size of each status payload */ __le16 status_cnt; /* count of status the buffer can hold */ __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */ u8 partner_type; /* who sends this frame */ u8 reserved0[7]; __le32 partner_ver_major; __le32 partner_ver_minor; __le32 partner_ver_oem; __le32 partner_ver_build; __le32 extra_offset; /* NEW */ __le32 extra_size; /* NEW */ __le32 scratch_size; u32 reserved1; }; struct req_msg { __le16 tag; u8 lun; u8 target; u8 task_attr; u8 task_manage; u8 data_dir; u8 payload_sz; /* payload size in 4-byte, not used */ u8 cdb[STEX_CDB_LENGTH]; u32 variable[0]; }; struct status_msg { __le16 tag; u8 lun; u8 target; u8 srb_status; u8 scsi_status; u8 reserved; u8 payload_sz; /* payload size in 4-byte */ u8 variable[STATUS_VAR_LEN]; }; struct ver_info { u32 major; u32 minor; u32 oem; u32 build; u32 reserved[2]; }; struct st_frame { u32 base[6]; u32 rom_addr; struct ver_info drv_ver; struct ver_info bios_ver; u32 bus; u32 slot; u32 irq_level; u32 irq_vec; u32 id; u32 subid; u32 dimm_size; u8 dimm_type; u8 reserved[3]; u32 channel; u32 reserved1; }; struct st_drvver { u32 major; u32 minor; u32 oem; u32 build; u32 signature[2]; u8 console_id; u8 host_no; u8 reserved0[2]; u32 reserved[3]; }; struct st_ccb { struct req_msg *req; struct scsi_cmnd *cmd; void *sense_buffer; unsigned int sense_bufflen; int sg_count; u32 req_type; u8 srb_status; u8 scsi_status; u8 reserved[2]; }; struct st_hba { void __iomem *mmio_base; /* iomapped PCI memory space */ void *dma_mem; dma_addr_t dma_handle; size_t dma_size; struct Scsi_Host *host; struct pci_dev *pdev; struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); u32 req_head; u32 req_tail; u32 status_head; u32 status_tail; struct status_msg *status_buffer; void *copy_buffer; /* temp buffer for driver-handled commands */ struct st_ccb *ccb; struct st_ccb *wait_ccb; __le32 *scratch; char work_q_name[20]; struct workqueue_struct *work_q; struct work_struct reset_work; wait_queue_head_t reset_waitq; unsigned int mu_status; unsigned int cardtype; int msi_enabled; int out_req_cnt; u32 extra_offset; u16 rq_count; u16 rq_size; u16 sts_count; }; struct st_card_info { struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); unsigned int max_id; unsigned int max_lun; unsigned int max_channel; u16 rq_count; u16 rq_size; u16 sts_count; }; static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)"); static const char console_inq_page[] = { 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 }; MODULE_AUTHOR("Ed Lin"); MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); MODULE_LICENSE("GPL"); MODULE_VERSION(ST_DRIVER_VERSION); static void stex_gettime(__le64 *time) { struct timeval tv; do_gettimeofday(&tv); *time = cpu_to_le64(tv.tv_sec); } static struct status_msg *stex_get_status(struct st_hba *hba) { struct status_msg *status = hba->status_buffer + hba->status_tail; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; return status; } static void stex_invalid_field(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; /* "Invalid field in cdb" */ scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24, 0x0); done(cmd); } static struct req_msg *stex_alloc_req(struct st_hba *hba) { struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; ++hba->req_head; hba->req_head %= hba->rq_count+1; return req; } static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) { return (struct req_msg *)(hba->dma_mem + hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); } static int stex_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le64(sg_dma_address(sg)); table[i].ctrl = SG_CF_64B | SG_CF_HOST; } table[--i].ctrl |= SG_CF_EOT; } return nseg; } static int stex_ss_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_ss_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_ss_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le32(sg_dma_address(sg) & 0xffffffff); table[i].addr_hi = cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); } } return nseg; } static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) { struct st_frame *p; size_t count = sizeof(struct st_frame); p = hba->copy_buffer; scsi_sg_copy_to_buffer(ccb->cmd, p, count); memset(p->base, 0, sizeof(u32)*6); *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); p->rom_addr = 0; p->drv_ver.major = ST_VER_MAJOR; p->drv_ver.minor = ST_VER_MINOR; p->drv_ver.oem = ST_OEM; p->drv_ver.build = ST_BUILD_VER; p->bus = hba->pdev->bus->number; p->slot = hba->pdev->devfn; p->irq_level = 0; p->irq_vec = hba->pdev->irq; p->id = hba->pdev->vendor << 16 | hba->pdev->device; p->subid = hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; scsi_sg_copy_from_buffer(ccb->cmd, p, count); } static void stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; writel(hba->req_head, hba->mmio_base + IMR0); writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); readl(hba->mmio_base + IDBL); /* flush */ } static void stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { struct scsi_cmnd *cmd; struct st_msg_header *msg_h; dma_addr_t addr; req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; cmd = hba->ccb[tag].cmd; msg_h = (struct st_msg_header *)req - 1; if (likely(cmd)) { msg_h->channel = (u8)cmd->device->channel; msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ); } addr = hba->dma_handle + hba->req_head * hba->rq_size; addr += (hba->ccb[tag].sg_count+4)/11; msg_h->handle = cpu_to_le64(addr); ++hba->req_head; hba->req_head %= hba->rq_count+1; writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ writel(addr, hba->mmio_base + YH2I_REQ); readl(hba->mmio_base + YH2I_REQ); /* flush */ } static int stex_slave_alloc(struct scsi_device *sdev) { /* Cheat: usually extracted from Inquiry data */ sdev->tagged_supported = 1; scsi_activate_tcq(sdev, sdev->host->can_queue); return 0; } static int stex_slave_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); sdev->tagged_supported = 1; return 0; } static void stex_slave_destroy(struct scsi_device *sdev) { scsi_deactivate_tcq(sdev, 1); } static int stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct st_hba *hba; struct Scsi_Host *host; unsigned int id, lun; struct req_msg *req; u16 tag; host = cmd->device->host; id = cmd->device->id; lun = cmd->device->lun; hba = (struct st_hba *) &host->hostdata[0]; if (unlikely(hba->mu_status == MU_STATE_RESETTING)) return SCSI_MLQUEUE_HOST_BUSY; switch (cmd->cmnd[0]) { case MODE_SENSE_10: { static char ms10_caching_page[12] = { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; unsigned char page; page = cmd->cmnd[2] & 0x3f; if (page == 0x8 || page == 0x3f) { scsi_sg_copy_from_buffer(cmd, ms10_caching_page, sizeof(ms10_caching_page)); cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); } else stex_invalid_field(cmd, done); return 0; } case REPORT_LUNS: /* * The shasta firmware does not report actual luns in the * target, so fail the command to force sequential lun scan. * Also, the console device does not support this command. */ if (hba->cardtype == st_shasta || id == host->max_id - 1) { stex_invalid_field(cmd, done); return 0; } break; case TEST_UNIT_READY: if (id == host->max_id - 1) { cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); return 0; } break; case INQUIRY: if (lun >= host->max_lun) { cmd->result = DID_NO_CONNECT << 16; done(cmd); return 0; } if (id != host->max_id - 1) break; if (!lun && !cmd->device->channel && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, sizeof(console_inq_page)); cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); } else stex_invalid_field(cmd, done); return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { struct st_drvver ver; size_t cp_len = sizeof(ver); ver.major = ST_VER_MAJOR; ver.minor = ST_VER_MINOR; ver.oem = ST_OEM; ver.build = ST_BUILD_VER; ver.signature[0] = PASSTHRU_SIGNATURE; ver.console_id = host->max_id - 1; ver.host_no = hba->host->host_no; cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); cmd->result = sizeof(ver) == cp_len ? DID_OK << 16 | COMMAND_COMPLETE << 8 : DID_ERROR << 16 | COMMAND_COMPLETE << 8; done(cmd); return 0; } default: break; } cmd->scsi_done = done; tag = cmd->request->tag; if (unlikely(tag >= host->can_queue)) return SCSI_MLQUEUE_HOST_BUSY; req = hba->alloc_rq(hba); req->lun = lun; req->target = id; /* cdb */ memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); if (cmd->sc_data_direction == DMA_FROM_DEVICE) req->data_dir = MSG_DATA_DIR_IN; else if (cmd->sc_data_direction == DMA_TO_DEVICE) req->data_dir = MSG_DATA_DIR_OUT; else req->data_dir = MSG_DATA_DIR_ND; hba->ccb[tag].cmd = cmd; hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; hba->ccb[tag].sense_buffer = cmd->sense_buffer; if (!hba->map_sg(hba, req, &hba->ccb[tag])) { hba->ccb[tag].sg_count = 0; memset(&req->variable[0], 0, 8); } hba->send(hba, req, tag); return 0; } static DEF_SCSI_QCMD(stex_queuecommand) static void stex_scsi_done(struct st_ccb *ccb) { struct scsi_cmnd *cmd = ccb->cmd; int result; if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { result = ccb->scsi_status; switch (ccb->scsi_status) { case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8; break; case SAM_STAT_CHECK_CONDITION: result |= DRIVER_SENSE << 24; break; case SAM_STAT_BUSY: result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; break; default: result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } } else if (ccb->srb_status & SRB_SEE_SENSE) result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION; else switch (ccb->srb_status) { case SRB_STATUS_SELECTION_TIMEOUT: result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; break; case SRB_STATUS_BUSY: result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; break; case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_ERROR: default: result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } cmd->result = result; cmd->scsi_done(cmd); } static void stex_copy_data(struct st_ccb *ccb, struct status_msg *resp, unsigned int variable) { if (resp->scsi_status != SAM_STAT_GOOD) { if (ccb->sense_buffer != NULL) memcpy(ccb->sense_buffer, resp->variable, min(variable, ccb->sense_bufflen)); return; } if (ccb->cmd == NULL) return; scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable); } static void stex_check_cmd(struct st_hba *hba, struct st_ccb *ccb, struct status_msg *resp) { if (ccb->cmd->cmnd[0] == MGT_CMD && resp->scsi_status != SAM_STAT_CHECK_CONDITION) scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - le32_to_cpu(*(__le32 *)&resp->variable[0])); } static void stex_mu_intr(struct st_hba *hba, u32 doorbell) { void __iomem *base = hba->mmio_base; struct status_msg *resp; struct st_ccb *ccb; unsigned int size; u16 tag; if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))) return; /* status payloads */ hba->status_head = readl(base + OMR1); if (unlikely(hba->status_head > hba->sts_count)) { printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", pci_name(hba->pdev)); return; } /* * it's not a valid status payload if: * 1. there are no pending requests(e.g. during init stage) * 2. there are some pending requests, but the controller is in * reset status, and its type is not st_yosemite * firmware of st_yosemite in reset status will return pending requests * to driver, so we allow it to pass */ if (unlikely(hba->out_req_cnt <= 0 || (hba->mu_status == MU_STATE_RESETTING && hba->cardtype != st_yosemite))) { hba->status_tail = hba->status_head; goto update_status; } while (hba->status_tail != hba->status_head) { resp = stex_get_status(hba); tag = le16_to_cpu(resp->tag); if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } size = resp->payload_sz * sizeof(u32); /* payload size */ if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ if (size) stex_copy_data(ccb, resp, size); } ccb->req = NULL; ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; if (likely(ccb->cmd != NULL)) { if (hba->cardtype == st_yosemite) stex_check_cmd(hba, ccb, resp); if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) stex_controller_info(hba, ccb); scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } update_status: writel(hba->status_head, base + IMR1); readl(base + IMR1); /* flush */ } static irqreturn_t stex_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); data = readl(base + ODBL); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && hba->cardtype == st_shasta)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static void stex_ss_mu_intr(struct st_hba *hba) { struct status_msg *resp; struct st_ccb *ccb; __le32 *scratch; unsigned int size; int count = 0; u32 value; u16 tag; if (unlikely(hba->out_req_cnt <= 0 || hba->mu_status == MU_STATE_RESETTING)) return; while (count < hba->sts_count) { scratch = hba->scratch + hba->status_tail; value = le32_to_cpu(*scratch); if (unlikely(!(value & SS_STS_NORMAL))) return; resp = hba->status_buffer + hba->status_tail; *scratch = 0; ++count; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; tag = (u16)value; if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } ccb->req = NULL; if (likely(value & SS_STS_DONE)) { /* normal case */ ccb->srb_status = SRB_STATUS_SUCCESS; ccb->scsi_status = SAM_STAT_GOOD; } else { ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; size = resp->payload_sz * sizeof(u32); if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; if (size) stex_copy_data(ccb, resp, size); } if (likely(ccb->cmd != NULL)) stex_check_cmd(hba, ccb, resp); } if (likely(ccb->cmd != NULL)) { scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } } static irqreturn_t stex_ss_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); data = readl(base + YI2H_INT); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & SS_I2H_REQUEST_RESET)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static int stex_common_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct handshake_frame *h; dma_addr_t status_phys; u32 data; unsigned long before; if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no handshake signature\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } } udelay(10); data = readl(base + OMR1); if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) { data &= 0x0000ffff; if (hba->host->can_queue > data) { hba->host->can_queue = data; hba->host->cmd_per_lun = data; } } h = (struct handshake_frame *)hba->status_buffer; h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); stex_gettime(&h->hosttime); h->partner_type = HMU_PARTNER_TYPE; if (hba->extra_offset) { h->extra_offset = cpu_to_le32(hba->extra_offset); h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); } else h->extra_offset = h->extra_size = 0; status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; writel(status_phys, base + IMR0); readl(base + IMR0); writel((status_phys >> 16) >> 16, base + IMR1); readl(base + IMR1); writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ readl(base + OMR0); writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); /* flush */ udelay(10); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } writel(0, base + IMR0); readl(base + IMR0); writel(0, base + OMR0); readl(base + OMR0); writel(0, base + IMR1); readl(base + IMR1); writel(0, base + OMR1); readl(base + OMR1); /* flush */ return 0; } static int stex_ss_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct st_msg_header *msg_h; struct handshake_frame *h; __le32 *scratch; u32 data, scratch_size; unsigned long before; int ret = 0; before = jiffies; while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): firmware not operational\n", pci_name(hba->pdev)); return -1; } msleep(1); } msg_h = (struct st_msg_header *)hba->dma_mem; msg_h->handle = cpu_to_le64(hba->dma_handle); msg_h->flag = SS_HEAD_HANDSHAKE; h = (struct handshake_frame *)(msg_h + 1); h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); stex_gettime(&h->hosttime); h->partner_type = HMU_PARTNER_TYPE; h->extra_offset = h->extra_size = 0; scratch_size = (hba->sts_count+1)*sizeof(u32); h->scratch_size = cpu_to_le32(scratch_size); data = readl(base + YINT_EN); data &= ~4; writel(data, base + YINT_EN); writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); readl(base + YH2I_REQ_HI); writel(hba->dma_handle, base + YH2I_REQ); readl(base + YH2I_REQ); /* flush */ scratch = hba->scratch; before = jiffies; while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); ret = -1; break; } rmb(); msleep(1); } memset(scratch, 0, scratch_size); msg_h->flag = 0; return ret; } static int stex_handshake(struct st_hba *hba) { int err; unsigned long flags; unsigned int mu_status; err = (hba->cardtype == st_yel) ? stex_ss_handshake(hba) : stex_common_handshake(hba); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; if (err == 0) { hba->req_head = 0; hba->req_tail = 0; hba->status_head = 0; hba->status_tail = 0; hba->out_req_cnt = 0; hba->mu_status = MU_STATE_STARTED; } else hba->mu_status = MU_STATE_FAILED; if (mu_status == MU_STATE_RESETTING) wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return err; } static int stex_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct st_hba *hba = (struct st_hba *)host->hostdata; u16 tag = cmd->request->tag; void __iomem *base; u32 data; int result = SUCCESS; unsigned long flags; printk(KERN_INFO DRV_NAME "(%s): aborting command\n", pci_name(hba->pdev)); scsi_print_command(cmd); base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); if (tag < host->can_queue && hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) hba->wait_ccb = &hba->ccb[tag]; else goto out; if (hba->cardtype == st_yel) { data = readl(base + YI2H_INT); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); } else { data = readl(base + ODBL); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); } if (hba->wait_ccb == NULL) { printk(KERN_WARNING DRV_NAME "(%s): lost interrupt\n", pci_name(hba->pdev)); goto out; } fail_out: scsi_dma_unmap(cmd); hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba->wait_ccb = NULL; result = FAILED; out: spin_unlock_irqrestore(host->host_lock, flags); return result; } static void stex_hard_reset(struct st_hba *hba) { struct pci_bus *bus; int i; u16 pci_cmd; u8 pci_bctl; for (i = 0; i < 16; i++) pci_read_config_dword(hba->pdev, i * 4, &hba->pdev->saved_config_space[i]); /* Reset secondary bus. Our controller(MU/ATU) is the only device on secondary bus. Consult Intel 80331/3 developer's manual for detail */ bus = hba->pdev->bus; pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); /* * 1 ms may be enough for 8-port controllers. But 16-port controllers * require more time to finish bus reset. Use 100 ms here for safety */ msleep(100); pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); for (i = 0; i < MU_HARD_RESET_WAIT; i++) { pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER)) break; msleep(1); } ssleep(5); for (i = 0; i < 16; i++) pci_write_config_dword(hba->pdev, i * 4, hba->pdev->saved_config_space[i]); } static int stex_yos_reset(struct st_hba *hba) { void __iomem *base; unsigned long flags, before; int ret = 0; base = hba->mmio_base; writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); readl(base + IDBL); /* flush */ before = jiffies; while (hba->out_req_cnt > 0) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { printk(KERN_WARNING DRV_NAME "(%s): reset timeout\n", pci_name(hba->pdev)); ret = -1; break; } msleep(1); } spin_lock_irqsave(hba->host->host_lock, flags); if (ret == -1) hba->mu_status = MU_STATE_FAILED; else hba->mu_status = MU_STATE_STARTED; wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return ret; } static void stex_ss_reset(struct st_hba *hba) { writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); readl(hba->mmio_base + YH2I_INT); ssleep(5); } static int stex_do_reset(struct st_hba *hba) { struct st_ccb *ccb; unsigned long flags; unsigned int mu_status = MU_STATE_RESETTING; u16 tag; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->mu_status == MU_STATE_STARTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", pci_name(hba->pdev)); return 0; } while (hba->mu_status == MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); wait_event_timeout(hba->reset_waitq, hba->mu_status != MU_STATE_RESETTING, MU_MAX_DELAY * HZ); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; } if (mu_status != MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); return (mu_status == MU_STATE_STARTED) ? 0 : -1; } hba->mu_status = MU_STATE_RESETTING; spin_unlock_irqrestore(hba->host->host_lock, flags); if (hba->cardtype == st_yosemite) return stex_yos_reset(hba); if (hba->cardtype == st_shasta) stex_hard_reset(hba); else if (hba->cardtype == st_yel) stex_ss_reset(hba); spin_lock_irqsave(hba->host->host_lock, flags); for (tag = 0; tag < hba->host->can_queue; tag++) { ccb = &hba->ccb[tag]; if (ccb->req == NULL) continue; ccb->req = NULL; if (ccb->cmd) { scsi_dma_unmap(ccb->cmd); ccb->cmd->result = DID_RESET << 16; ccb->cmd->scsi_done(ccb->cmd); ccb->cmd = NULL; } } spin_unlock_irqrestore(hba->host->host_lock, flags); if (stex_handshake(hba) == 0) return 0; printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", pci_name(hba->pdev)); return -1; } static int stex_reset(struct scsi_cmnd *cmd) { struct st_hba *hba; hba = (struct st_hba *) &cmd->device->host->hostdata[0]; printk(KERN_INFO DRV_NAME "(%s): resetting host\n", pci_name(hba->pdev)); scsi_print_command(cmd); return stex_do_reset(hba) ? FAILED : SUCCESS; } static void stex_reset_work(struct work_struct *work) { struct st_hba *hba = container_of(work, struct st_hba, reset_work); stex_do_reset(hba); } static int stex_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads = 255, sectors = 63; if (capacity < 0x200000) { heads = 64; sectors = 32; } sector_div(capacity, heads * sectors); geom[0] = heads; geom[1] = sectors; geom[2] = capacity; return 0; } static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = DRV_NAME, .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, .slave_alloc = stex_slave_alloc, .slave_configure = stex_slave_config, .slave_destroy = stex_slave_destroy, .eh_abort_handler = stex_abort, .eh_host_reset_handler = stex_reset, .this_id = -1, }; static struct pci_device_id stex_pci_tbl[] = { /* st_shasta */ { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */ { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX12350 */ { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX4350 */ { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX24350 */ /* st_vsc */ { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, /* st_yosemite */ { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite }, /* st_seq */ { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, /* st_yel */ { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, { } /* terminate list */ }; static struct st_card_info stex_card_info[] = { /* st_shasta */ { .max_id = 17, .max_lun = 8, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_vsc */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yosemite */ { .max_id = 2, .max_lun = 256, .max_channel = 0, .rq_count = 256, .rq_size = 1048, .sts_count = 256, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_seq */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yel */ { .max_id = 129, .max_lun = 256, .max_channel = 3, .rq_count = 801, .rq_size = 512, .sts_count = 801, .alloc_rq = stex_ss_alloc_req, .map_sg = stex_ss_map_sg, .send = stex_ss_send_cmd, }, }; static int stex_set_dma_mask(struct pci_dev * pdev) { int ret; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 0; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); return ret; } static int stex_request_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; int status; if (msi) { status = pci_enable_msi(pdev); if (status != 0) printk(KERN_ERR DRV_NAME "(%s): error %d setting up MSI\n", pci_name(pdev), status); else hba->msi_enabled = 1; } else hba->msi_enabled = 0; status = request_irq(pdev->irq, hba->cardtype == st_yel ? stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); if (status != 0) { if (hba->msi_enabled) pci_disable_msi(pdev); } return status; } static void stex_free_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; free_irq(pdev->irq, hba); if (hba->msi_enabled) pci_disable_msi(pdev); } static int __devinit stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct st_hba *hba; struct Scsi_Host *host; const struct st_card_info *ci = NULL; u32 sts_offset, cp_offset, scratch_offset; int err; err = pci_enable_device(pdev); if (err) return err; pci_set_master(pdev); host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); if (!host) { printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto out_disable; } hba = (struct st_hba *)host->hostdata; memset(hba, 0, sizeof(struct st_hba)); err = pci_request_regions(pdev, DRV_NAME); if (err < 0) { printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", pci_name(pdev)); goto out_scsi_host_put; } hba->mmio_base = pci_ioremap_bar(pdev, 0); if ( !hba->mmio_base) { printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", pci_name(pdev)); err = -ENOMEM; goto out_release_regions; } err = stex_set_dma_mask(pdev); if (err) { printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", pci_name(pdev)); goto out_iounmap; } hba->cardtype = (unsigned int) id->driver_data; ci = &stex_card_info[hba->cardtype]; sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; if (hba->cardtype == st_yel) sts_offset += (ci->sts_count+1) * sizeof(u32); cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); hba->dma_size = cp_offset + sizeof(struct st_frame); if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { hba->extra_offset = hba->dma_size; hba->dma_size += ST_ADDITIONAL_MEM; } hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); if (!hba->dma_mem) { /* Retry minimum coherent mapping for st_seq and st_vsc */ if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { printk(KERN_WARNING DRV_NAME "(%s): allocating min buffer for controller\n", pci_name(pdev)); hba->dma_size = hba->extra_offset + ST_ADDITIONAL_MEM_MIN; hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); } if (!hba->dma_mem) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", pci_name(pdev)); goto out_iounmap; } } hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); if (!hba->ccb) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n", pci_name(pdev)); goto out_pci_free; } if (hba->cardtype == st_yel) hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); hba->copy_buffer = hba->dma_mem + cp_offset; hba->rq_count = ci->rq_count; hba->rq_size = ci->rq_size; hba->sts_count = ci->sts_count; hba->alloc_rq = ci->alloc_rq; hba->map_sg = ci->map_sg; hba->send = ci->send; hba->mu_status = MU_STATE_STARTING; if (hba->cardtype == st_yel) host->sg_tablesize = 38; else host->sg_tablesize = 32; host->can_queue = ci->rq_count; host->cmd_per_lun = ci->rq_count; host->max_id = ci->max_id; host->max_lun = ci->max_lun; host->max_channel = ci->max_channel; host->unique_id = host->host_no; host->max_cmd_len = STEX_CDB_LENGTH; hba->host = host; hba->pdev = pdev; init_waitqueue_head(&hba->reset_waitq); snprintf(hba->work_q_name, sizeof(hba->work_q_name), "stex_wq_%d", host->host_no); hba->work_q = create_singlethread_workqueue(hba->work_q_name); if (!hba->work_q) { printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", pci_name(pdev)); err = -ENOMEM; goto out_ccb_free; } INIT_WORK(&hba->reset_work, stex_reset_work); err = stex_request_irq(hba); if (err) { printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", pci_name(pdev)); goto out_free_wq; } err = stex_handshake(hba); if (err) goto out_free_irq; err = scsi_init_shared_tag_map(host, host->can_queue); if (err) { printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", pci_name(pdev)); goto out_free_irq; } pci_set_drvdata(pdev, hba); err = scsi_add_host(host, &pdev->dev); if (err) { printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", pci_name(pdev)); goto out_free_irq; } scsi_scan_host(host); return 0; out_free_irq: stex_free_irq(hba); out_free_wq: destroy_workqueue(hba->work_q); out_ccb_free: kfree(hba->ccb); out_pci_free: dma_free_coherent(&pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); out_iounmap: iounmap(hba->mmio_base); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: scsi_host_put(host); out_disable: pci_disable_device(pdev); return err; } static void stex_hba_stop(struct st_hba *hba) { struct req_msg *req; struct st_msg_header *msg_h; unsigned long flags; unsigned long before; u16 tag = 0; spin_lock_irqsave(hba->host->host_lock, flags); req = hba->alloc_rq(hba); if (hba->cardtype == st_yel) { msg_h = (struct st_msg_header *)req - 1; memset(msg_h, 0, hba->rq_size); } else memset(req, 0, hba->rq_size); if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = CTLR_SHUTDOWN; } else { req->cdb[0] = CONTROLLER_CMD; req->cdb[1] = CTLR_POWER_STATE_CHANGE; req->cdb[2] = CTLR_POWER_SAVING; } hba->ccb[tag].cmd = NULL; hba->ccb[tag].sg_count = 0; hba->ccb[tag].sense_bufflen = 0; hba->ccb[tag].sense_buffer = NULL; hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; hba->send(hba, req, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); before = jiffies; while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { hba->ccb[tag].req_type = 0; return; } msleep(1); } } static void stex_hba_free(struct st_hba *hba) { stex_free_irq(hba); destroy_workqueue(hba->work_q); iounmap(hba->mmio_base); pci_release_regions(hba->pdev); kfree(hba->ccb); dma_free_coherent(&hba->pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); } static void stex_remove(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); scsi_remove_host(hba->host); pci_set_drvdata(pdev, NULL); stex_hba_stop(hba); stex_hba_free(hba); scsi_host_put(hba->host); pci_disable_device(pdev); } static void stex_shutdown(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); stex_hba_stop(hba); } MODULE_DEVICE_TABLE(pci, stex_pci_tbl); static struct pci_driver stex_pci_driver = { .name = DRV_NAME, .id_table = stex_pci_tbl, .probe = stex_probe, .remove = __devexit_p(stex_remove), .shutdown = stex_shutdown, }; static int __init stex_init(void) { printk(KERN_INFO DRV_NAME ": Promise SuperTrak EX Driver version: %s\n", ST_DRIVER_VERSION); return pci_register_driver(&stex_pci_driver); } static void __exit stex_exit(void) { pci_unregister_driver(&stex_pci_driver); } module_init(stex_init); module_exit(stex_exit);
gpl-2.0
v-superuser/android_kernel_sony_taoshan
arch/sh/kernel/crash_dump.c
11914
1310
/* * crash_dump.c - Memory preserving reboot related code. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * Copyright (C) IBM Corporation, 2004. All rights reserved */ #include <linux/errno.h> #include <linux/crash_dump.h> #include <linux/io.h> #include <asm/uaccess.h> /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); if (userbuf) { if (copy_to_user(buf, (vaddr + offset), csize)) { iounmap(vaddr); return -EFAULT; } } else memcpy(buf, (vaddr + offset), csize); iounmap(vaddr); return csize; }
gpl-2.0
MoKee/android_kernel_asus_grouper
drivers/video/geode/video_gx.c
13962
10122
/* * Geode GX video processor device. * * Copyright (C) 2006 Arcom Control Systems Ltd. * * Portions from AMD's original 2.4 driver: * Copyright (C) 2004 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/fb.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/msr.h> #include <linux/cs5535.h> #include "gxfb.h" /* * Tables of register settings for various DOTCLKs. */ struct gx_pll_entry { long pixclock; /* ps */ u32 sys_rstpll_bits; u32 dotpll_value; }; #define POSTDIV3 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3) #define PREMULT2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPREMULT2) #define PREDIV2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3) static const struct gx_pll_entry gx_pll_table_48MHz[] = { { 40123, POSTDIV3, 0x00000BF2 }, /* 24.9230 */ { 39721, 0, 0x00000037 }, /* 25.1750 */ { 35308, POSTDIV3|PREMULT2, 0x00000B1A }, /* 28.3220 */ { 31746, POSTDIV3, 0x000002D2 }, /* 31.5000 */ { 27777, POSTDIV3|PREMULT2, 0x00000FE2 }, /* 36.0000 */ { 26666, POSTDIV3, 0x0000057A }, /* 37.5000 */ { 25000, POSTDIV3, 0x0000030A }, /* 40.0000 */ { 22271, 0, 0x00000063 }, /* 44.9000 */ { 20202, 0, 0x0000054B }, /* 49.5000 */ { 20000, 0, 0x0000026E }, /* 50.0000 */ { 19860, PREMULT2, 0x00000037 }, /* 50.3500 */ { 18518, POSTDIV3|PREMULT2, 0x00000B0D }, /* 54.0000 */ { 17777, 0, 0x00000577 }, /* 56.2500 */ { 17733, 0, 0x000007F7 }, /* 56.3916 */ { 17653, 0, 0x0000057B }, /* 56.6444 */ { 16949, PREMULT2, 0x00000707 }, /* 59.0000 */ { 15873, POSTDIV3|PREMULT2, 0x00000B39 }, /* 63.0000 */ { 15384, POSTDIV3|PREMULT2, 0x00000B45 }, /* 65.0000 */ { 14814, POSTDIV3|PREMULT2, 0x00000FC1 }, /* 67.5000 */ { 14124, POSTDIV3, 0x00000561 }, /* 70.8000 */ { 13888, POSTDIV3, 0x000007E1 }, /* 72.0000 */ { 13426, PREMULT2, 0x00000F4A }, /* 74.4810 */ { 13333, 0, 0x00000052 }, /* 75.0000 */ { 12698, 0, 0x00000056 }, /* 78.7500 */ { 12500, POSTDIV3|PREMULT2, 0x00000709 }, /* 80.0000 */ { 11135, PREMULT2, 0x00000262 }, /* 89.8000 */ { 10582, 0, 0x000002D2 }, /* 94.5000 */ { 10101, PREMULT2, 0x00000B4A }, /* 99.0000 */ { 10000, PREMULT2, 0x00000036 }, /* 100.0000 */ { 9259, 0, 0x000007E2 }, /* 108.0000 */ { 8888, 0, 0x000007F6 }, /* 112.5000 */ { 7692, POSTDIV3|PREMULT2, 0x00000FB0 }, /* 130.0000 */ { 7407, POSTDIV3|PREMULT2, 0x00000B50 }, /* 135.0000 */ { 6349, 0, 0x00000055 }, /* 157.5000 */ { 6172, 0, 0x000009C1 }, /* 162.0000 */ { 5787, PREMULT2, 0x0000002D }, /* 172.798 */ { 5698, 0, 0x000002C1 }, /* 175.5000 */ { 5291, 0, 0x000002D1 }, /* 189.0000 */ { 4938, 0, 0x00000551 }, /* 202.5000 */ { 4357, 0, 0x0000057D }, /* 229.5000 */ }; static const struct gx_pll_entry gx_pll_table_14MHz[] = { { 39721, 0, 0x00000037 }, /* 25.1750 */ { 35308, 0, 0x00000B7B }, /* 28.3220 */ { 31746, 0, 0x000004D3 }, /* 31.5000 */ { 27777, 0, 0x00000BE3 }, /* 36.0000 */ { 26666, 0, 0x0000074F }, /* 37.5000 */ { 25000, 0, 0x0000050B }, /* 40.0000 */ { 22271, 0, 0x00000063 }, /* 44.9000 */ { 20202, 0, 0x0000054B }, /* 49.5000 */ { 20000, 0, 0x0000026E }, /* 50.0000 */ { 19860, 0, 0x000007C3 }, /* 50.3500 */ { 18518, 0, 0x000007E3 }, /* 54.0000 */ { 17777, 0, 0x00000577 }, /* 56.2500 */ { 17733, 0, 0x000002FB }, /* 56.3916 */ { 17653, 0, 0x0000057B }, /* 56.6444 */ { 16949, 0, 0x0000058B }, /* 59.0000 */ { 15873, 0, 0x0000095E }, /* 63.0000 */ { 15384, 0, 0x0000096A }, /* 65.0000 */ { 14814, 0, 0x00000BC2 }, /* 67.5000 */ { 14124, 0, 0x0000098A }, /* 70.8000 */ { 13888, 0, 0x00000BE2 }, /* 72.0000 */ { 13333, 0, 0x00000052 }, /* 75.0000 */ { 12698, 0, 0x00000056 }, /* 78.7500 */ { 12500, 0, 0x0000050A }, /* 80.0000 */ { 11135, 0, 0x0000078E }, /* 89.8000 */ { 10582, 0, 0x000002D2 }, /* 94.5000 */ { 10101, 0, 0x000011F6 }, /* 99.0000 */ { 10000, 0, 0x0000054E }, /* 100.0000 */ { 9259, 0, 0x000007E2 }, /* 108.0000 */ { 8888, 0, 0x000002FA }, /* 112.5000 */ { 7692, 0, 0x00000BB1 }, /* 130.0000 */ { 7407, 0, 0x00000975 }, /* 135.0000 */ { 6349, 0, 0x00000055 }, /* 157.5000 */ { 6172, 0, 0x000009C1 }, /* 162.0000 */ { 5698, 0, 0x000002C1 }, /* 175.5000 */ { 5291, 0, 0x00000539 }, /* 189.0000 */ { 4938, 0, 0x00000551 }, /* 202.5000 */ { 4357, 0, 0x0000057D }, /* 229.5000 */ }; void gx_set_dclk_frequency(struct fb_info *info) { const struct gx_pll_entry *pll_table; int pll_table_len; int i, best_i; long min, diff; u64 dotpll, sys_rstpll; int timeout = 1000; /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ if (cpu_data(0).x86_mask == 1) { pll_table = gx_pll_table_14MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); } else { pll_table = gx_pll_table_48MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_48MHz); } /* Search the table for the closest pixclock. */ best_i = 0; min = abs(pll_table[0].pixclock - info->var.pixclock); for (i = 1; i < pll_table_len; i++) { diff = abs(pll_table[i].pixclock - info->var.pixclock); if (diff < min) { min = diff; best_i = i; } } rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); rdmsrl(MSR_GLCP_DOTPLL, dotpll); /* Program new M, N and P. */ dotpll &= 0x00000000ffffffffull; dotpll |= (u64)pll_table[best_i].dotpll_value << 32; dotpll |= MSR_GLCP_DOTPLL_DOTRESET; dotpll &= ~MSR_GLCP_DOTPLL_BYPASS; wrmsrl(MSR_GLCP_DOTPLL, dotpll); /* Program dividers. */ sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 | MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 ); sys_rstpll |= pll_table[best_i].sys_rstpll_bits; wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); /* Clear reset bit to start PLL. */ dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET); wrmsrl(MSR_GLCP_DOTPLL, dotpll); /* Wait for LOCK bit. */ do { rdmsrl(MSR_GLCP_DOTPLL, dotpll); } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK)); } static void gx_configure_tft(struct fb_info *info) { struct gxfb_par *par = info->par; unsigned long val; unsigned long fp; /* Set up the DF pad select MSR */ rdmsrl(MSR_GX_MSR_PADSEL, val); val &= ~MSR_GX_MSR_PADSEL_MASK; val |= MSR_GX_MSR_PADSEL_TFT; wrmsrl(MSR_GX_MSR_PADSEL, val); /* Turn off the panel */ fp = read_fp(par, FP_PM); fp &= ~FP_PM_P; write_fp(par, FP_PM, fp); /* Set timing 1 */ fp = read_fp(par, FP_PT1); fp &= FP_PT1_VSIZE_MASK; fp |= info->var.yres << FP_PT1_VSIZE_SHIFT; write_fp(par, FP_PT1, fp); /* Timing 2 */ /* Set bits that are always on for TFT */ fp = 0x0F100000; /* Configure sync polarity */ if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) fp |= FP_PT2_VSP; if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) fp |= FP_PT2_HSP; write_fp(par, FP_PT2, fp); /* Set the dither control */ write_fp(par, FP_DFC, FP_DFC_NFI); /* Enable the FP data and power (in case the BIOS didn't) */ fp = read_vp(par, VP_DCFG); fp |= VP_DCFG_FP_PWR_EN | VP_DCFG_FP_DATA_EN; write_vp(par, VP_DCFG, fp); /* Unblank the panel */ fp = read_fp(par, FP_PM); fp |= FP_PM_P; write_fp(par, FP_PM, fp); } void gx_configure_display(struct fb_info *info) { struct gxfb_par *par = info->par; u32 dcfg, misc; /* Write the display configuration */ dcfg = read_vp(par, VP_DCFG); /* Disable hsync and vsync */ dcfg &= ~(VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN); write_vp(par, VP_DCFG, dcfg); /* Clear bits from existing mode. */ dcfg &= ~(VP_DCFG_CRT_SYNC_SKW | VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL | VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN); /* Set default sync skew. */ dcfg |= VP_DCFG_CRT_SYNC_SKW_DEFAULT; /* Enable hsync and vsync. */ dcfg |= VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN; misc = read_vp(par, VP_MISC); /* Disable gamma correction */ misc |= VP_MISC_GAM_EN; if (par->enable_crt) { /* Power up the CRT DACs */ misc &= ~(VP_MISC_APWRDN | VP_MISC_DACPWRDN); write_vp(par, VP_MISC, misc); /* Only change the sync polarities if we are running * in CRT mode. The FP polarities will be handled in * gxfb_configure_tft */ if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) dcfg |= VP_DCFG_CRT_HSYNC_POL; if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) dcfg |= VP_DCFG_CRT_VSYNC_POL; } else { /* Power down the CRT DACs if in FP mode */ misc |= (VP_MISC_APWRDN | VP_MISC_DACPWRDN); write_vp(par, VP_MISC, misc); } /* Enable the display logic */ /* Set up the DACS to blank normally */ dcfg |= VP_DCFG_CRT_EN | VP_DCFG_DAC_BL_EN; /* Enable the external DAC VREF? */ write_vp(par, VP_DCFG, dcfg); /* Set up the flat panel (if it is enabled) */ if (par->enable_crt == 0) gx_configure_tft(info); } int gx_blank_display(struct fb_info *info, int blank_mode) { struct gxfb_par *par = info->par; u32 dcfg, fp_pm; int blank, hsync, vsync, crt; /* CRT power saving modes. */ switch (blank_mode) { case FB_BLANK_UNBLANK: blank = 0; hsync = 1; vsync = 1; crt = 1; break; case FB_BLANK_NORMAL: blank = 1; hsync = 1; vsync = 1; crt = 1; break; case FB_BLANK_VSYNC_SUSPEND: blank = 1; hsync = 1; vsync = 0; crt = 1; break; case FB_BLANK_HSYNC_SUSPEND: blank = 1; hsync = 0; vsync = 1; crt = 1; break; case FB_BLANK_POWERDOWN: blank = 1; hsync = 0; vsync = 0; crt = 0; break; default: return -EINVAL; } dcfg = read_vp(par, VP_DCFG); dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN | VP_DCFG_CRT_EN); if (!blank) dcfg |= VP_DCFG_DAC_BL_EN; if (hsync) dcfg |= VP_DCFG_HSYNC_EN; if (vsync) dcfg |= VP_DCFG_VSYNC_EN; if (crt) dcfg |= VP_DCFG_CRT_EN; write_vp(par, VP_DCFG, dcfg); /* Power on/off flat panel. */ if (par->enable_crt == 0) { fp_pm = read_fp(par, FP_PM); if (blank_mode == FB_BLANK_POWERDOWN) fp_pm &= ~FP_PM_P; else fp_pm |= FP_PM_P; write_fp(par, FP_PM, fp_pm); } return 0; }
gpl-2.0
OpenSwift/android_kernel_swift
fs/ncpfs/ioctl.c
139
22395
/* * ioctl.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * Modified 1998, 1999 Wolfram Pienkoss for NLS * */ #include <linux/capability.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/mount.h> #include <linux/highuid.h> #include <linux/smp_lock.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/ncp_fs.h> #include <asm/uaccess.h> #include "ncplib_kernel.h" /* maximum limit for ncp_objectname_ioctl */ #define NCP_OBJECT_NAME_MAX_LEN 4096 /* maximum limit for ncp_privatedata_ioctl */ #define NCP_PRIVATE_DATA_MAX_LEN 8192 /* maximum negotiable packet size */ #define NCP_PACKET_SIZE_INTERNAL 65536 static int ncp_get_fs_info(struct ncp_server * server, struct file *file, struct ncp_fs_info __user *arg) { struct inode *inode = file->f_path.dentry->d_inode; struct ncp_fs_info info; if (file_permission(file, MAY_WRITE) != 0 && current_uid() != server->m.mounted_uid) return -EACCES; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (info.version != NCP_GET_FS_INFO_VERSION) { DPRINTK("info.version invalid: %d\n", info.version); return -EINVAL; } /* TODO: info.addr = server->m.serv_addr; */ SET_UID(info.mounted_uid, server->m.mounted_uid); info.connection = server->connection; info.buffer_size = server->buffer_size; info.volume_number = NCP_FINFO(inode)->volNumber; info.directory_id = NCP_FINFO(inode)->DosDirNum; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int ncp_get_fs_info_v2(struct ncp_server * server, struct file *file, struct ncp_fs_info_v2 __user * arg) { struct inode *inode = file->f_path.dentry->d_inode; struct ncp_fs_info_v2 info2; if (file_permission(file, MAY_WRITE) != 0 && current_uid() != server->m.mounted_uid) return -EACCES; if (copy_from_user(&info2, arg, sizeof(info2))) return -EFAULT; if (info2.version != NCP_GET_FS_INFO_VERSION_V2) { DPRINTK("info.version invalid: %d\n", info2.version); return -EINVAL; } info2.mounted_uid = server->m.mounted_uid; info2.connection = server->connection; info2.buffer_size = server->buffer_size; info2.volume_number = NCP_FINFO(inode)->volNumber; info2.directory_id = NCP_FINFO(inode)->DosDirNum; info2.dummy1 = info2.dummy2 = info2.dummy3 = 0; if (copy_to_user(arg, &info2, sizeof(info2))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT struct compat_ncp_objectname_ioctl { s32 auth_type; u32 object_name_len; compat_caddr_t object_name; /* a userspace data, in most cases user name */ }; struct compat_ncp_fs_info_v2 { s32 version; u32 mounted_uid; u32 connection; u32 buffer_size; u32 volume_number; u32 directory_id; u32 dummy1; u32 dummy2; u32 dummy3; }; struct compat_ncp_ioctl_request { u32 function; u32 size; compat_caddr_t data; }; struct compat_ncp_privatedata_ioctl { u32 len; compat_caddr_t data; /* ~1000 for NDS */ }; #define NCP_IOC_GET_FS_INFO_V2_32 _IOWR('n', 4, struct compat_ncp_fs_info_v2) #define NCP_IOC_NCPREQUEST_32 _IOR('n', 1, struct compat_ncp_ioctl_request) #define NCP_IOC_GETOBJECTNAME_32 _IOWR('n', 9, struct compat_ncp_objectname_ioctl) #define NCP_IOC_SETOBJECTNAME_32 _IOR('n', 9, struct compat_ncp_objectname_ioctl) #define NCP_IOC_GETPRIVATEDATA_32 _IOWR('n', 10, struct compat_ncp_privatedata_ioctl) #define NCP_IOC_SETPRIVATEDATA_32 _IOR('n', 10, struct compat_ncp_privatedata_ioctl) static int ncp_get_compat_fs_info_v2(struct ncp_server * server, struct file *file, struct compat_ncp_fs_info_v2 __user * arg) { struct inode *inode = file->f_path.dentry->d_inode; struct compat_ncp_fs_info_v2 info2; if (file_permission(file, MAY_WRITE) != 0 && current_uid() != server->m.mounted_uid) return -EACCES; if (copy_from_user(&info2, arg, sizeof(info2))) return -EFAULT; if (info2.version != NCP_GET_FS_INFO_VERSION_V2) { DPRINTK("info.version invalid: %d\n", info2.version); return -EINVAL; } info2.mounted_uid = server->m.mounted_uid; info2.connection = server->connection; info2.buffer_size = server->buffer_size; info2.volume_number = NCP_FINFO(inode)->volNumber; info2.directory_id = NCP_FINFO(inode)->DosDirNum; info2.dummy1 = info2.dummy2 = info2.dummy3 = 0; if (copy_to_user(arg, &info2, sizeof(info2))) return -EFAULT; return 0; } #endif #define NCP_IOC_GETMOUNTUID16 _IOW('n', 2, u16) #define NCP_IOC_GETMOUNTUID32 _IOW('n', 2, u32) #define NCP_IOC_GETMOUNTUID64 _IOW('n', 2, u64) #ifdef CONFIG_NCPFS_NLS /* Here we are select the iocharset and the codepage for NLS. * Thanks Petr Vandrovec for idea and many hints. */ static int ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) { struct ncp_nls_ioctl user; struct nls_table *codepage; struct nls_table *iocharset; struct nls_table *oldset_io; struct nls_table *oldset_cp; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (server->root_setuped) return -EBUSY; if (copy_from_user(&user, arg, sizeof(user))) return -EFAULT; codepage = NULL; user.codepage[NCP_IOCSNAME_LEN] = 0; if (!user.codepage[0] || !strcmp(user.codepage, "default")) codepage = load_nls_default(); else { codepage = load_nls(user.codepage); if (!codepage) { return -EBADRQC; } } iocharset = NULL; user.iocharset[NCP_IOCSNAME_LEN] = 0; if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) { iocharset = load_nls_default(); NCP_CLR_FLAG(server, NCP_FLAG_UTF8); } else if (!strcmp(user.iocharset, "utf8")) { iocharset = load_nls_default(); NCP_SET_FLAG(server, NCP_FLAG_UTF8); } else { iocharset = load_nls(user.iocharset); if (!iocharset) { unload_nls(codepage); return -EBADRQC; } NCP_CLR_FLAG(server, NCP_FLAG_UTF8); } oldset_cp = server->nls_vol; server->nls_vol = codepage; oldset_io = server->nls_io; server->nls_io = iocharset; if (oldset_cp) unload_nls(oldset_cp); if (oldset_io) unload_nls(oldset_io); return 0; } static int ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) { struct ncp_nls_ioctl user; int len; memset(&user, 0, sizeof(user)); if (server->nls_vol && server->nls_vol->charset) { len = strlen(server->nls_vol->charset); if (len > NCP_IOCSNAME_LEN) len = NCP_IOCSNAME_LEN; strncpy(user.codepage, server->nls_vol->charset, len); user.codepage[len] = 0; } if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) strcpy(user.iocharset, "utf8"); else if (server->nls_io && server->nls_io->charset) { len = strlen(server->nls_io->charset); if (len > NCP_IOCSNAME_LEN) len = NCP_IOCSNAME_LEN; strncpy(user.iocharset, server->nls_io->charset, len); user.iocharset[len] = 0; } if (copy_to_user(arg, &user, sizeof(user))) return -EFAULT; return 0; } #endif /* CONFIG_NCPFS_NLS */ static int __ncp_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct ncp_server *server = NCP_SERVER(inode); int result; struct ncp_ioctl_request request; char* bouncebuffer; void __user *argp = (void __user *)arg; uid_t uid = current_uid(); switch (cmd) { #ifdef CONFIG_COMPAT case NCP_IOC_NCPREQUEST_32: #endif case NCP_IOC_NCPREQUEST: if (file_permission(filp, MAY_WRITE) != 0 && uid != server->m.mounted_uid) return -EACCES; #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_NCPREQUEST_32) { struct compat_ncp_ioctl_request request32; if (copy_from_user(&request32, argp, sizeof(request32))) return -EFAULT; request.function = request32.function; request.size = request32.size; request.data = compat_ptr(request32.data); } else #endif if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; if ((request.function > 255) || (request.size > NCP_PACKET_SIZE - sizeof(struct ncp_request_header))) { return -EINVAL; } bouncebuffer = vmalloc(NCP_PACKET_SIZE_INTERNAL); if (!bouncebuffer) return -ENOMEM; if (copy_from_user(bouncebuffer, request.data, request.size)) { vfree(bouncebuffer); return -EFAULT; } ncp_lock_server(server); /* FIXME: We hack around in the server's structures here to be able to use ncp_request */ server->has_subfunction = 0; server->current_size = request.size; memcpy(server->packet, bouncebuffer, request.size); result = ncp_request2(server, request.function, bouncebuffer, NCP_PACKET_SIZE_INTERNAL); if (result < 0) result = -EIO; else result = server->reply_size; ncp_unlock_server(server); DPRINTK("ncp_ioctl: copy %d bytes\n", result); if (result >= 0) if (copy_to_user(request.data, bouncebuffer, result)) result = -EFAULT; vfree(bouncebuffer); return result; case NCP_IOC_CONN_LOGGED_IN: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE)) return -EINVAL; if (server->root_setuped) return -EBUSY; server->root_setuped = 1; return ncp_conn_logged_in(inode->i_sb); case NCP_IOC_GET_FS_INFO: return ncp_get_fs_info(server, filp, argp); case NCP_IOC_GET_FS_INFO_V2: return ncp_get_fs_info_v2(server, filp, argp); #ifdef CONFIG_COMPAT case NCP_IOC_GET_FS_INFO_V2_32: return ncp_get_compat_fs_info_v2(server, filp, argp); #endif /* we have too many combinations of CONFIG_COMPAT, * CONFIG_64BIT and CONFIG_UID16, so just handle * any of the possible ioctls */ case NCP_IOC_GETMOUNTUID16: case NCP_IOC_GETMOUNTUID32: case NCP_IOC_GETMOUNTUID64: if (file_permission(filp, MAY_READ) != 0 && uid != server->m.mounted_uid) return -EACCES; if (cmd == NCP_IOC_GETMOUNTUID16) { u16 uid; SET_UID(uid, server->m.mounted_uid); if (put_user(uid, (u16 __user *)argp)) return -EFAULT; } else if (cmd == NCP_IOC_GETMOUNTUID32) { if (put_user(server->m.mounted_uid, (u32 __user *)argp)) return -EFAULT; } else { if (put_user(server->m.mounted_uid, (u64 __user *)argp)) return -EFAULT; } return 0; case NCP_IOC_GETROOT: { struct ncp_setroot_ioctl sr; if (file_permission(filp, MAY_READ) != 0 && uid != server->m.mounted_uid) return -EACCES; if (server->m.mounted_vol[0]) { struct dentry* dentry = inode->i_sb->s_root; if (dentry) { struct inode* s_inode = dentry->d_inode; if (s_inode) { sr.volNumber = NCP_FINFO(s_inode)->volNumber; sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum; sr.namespace = server->name_space[sr.volNumber]; } else DPRINTK("ncpfs: s_root->d_inode==NULL\n"); } else DPRINTK("ncpfs: s_root==NULL\n"); } else { sr.volNumber = -1; sr.namespace = 0; sr.dirEntNum = 0; } if (copy_to_user(argp, &sr, sizeof(sr))) return -EFAULT; return 0; } case NCP_IOC_SETROOT: { struct ncp_setroot_ioctl sr; __u32 vnum; __le32 de; __le32 dosde; struct dentry* dentry; if (!capable(CAP_SYS_ADMIN)) { return -EACCES; } if (server->root_setuped) return -EBUSY; if (copy_from_user(&sr, argp, sizeof(sr))) return -EFAULT; if (sr.volNumber < 0) { server->m.mounted_vol[0] = 0; vnum = NCP_NUMBER_OF_VOLUMES; de = 0; dosde = 0; } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) { return -EINVAL; } else if (ncp_mount_subdir(server, sr.volNumber, sr.namespace, sr.dirEntNum, &vnum, &de, &dosde)) { return -ENOENT; } dentry = inode->i_sb->s_root; server->root_setuped = 1; if (dentry) { struct inode* s_inode = dentry->d_inode; if (inode) { NCP_FINFO(s_inode)->volNumber = vnum; NCP_FINFO(s_inode)->dirEntNum = de; NCP_FINFO(s_inode)->DosDirNum = dosde; } else DPRINTK("ncpfs: s_root->d_inode==NULL\n"); } else DPRINTK("ncpfs: s_root==NULL\n"); return 0; } #ifdef CONFIG_NCPFS_PACKET_SIGNING case NCP_IOC_SIGN_INIT: if (file_permission(filp, MAY_WRITE) != 0 && uid != server->m.mounted_uid) return -EACCES; if (argp) { if (server->sign_wanted) { struct ncp_sign_init sign; if (copy_from_user(&sign, argp, sizeof(sign))) return -EFAULT; memcpy(server->sign_root,sign.sign_root,8); memcpy(server->sign_last,sign.sign_last,16); server->sign_active = 1; } /* ignore when signatures not wanted */ } else { server->sign_active = 0; } return 0; case NCP_IOC_SIGN_WANTED: if (file_permission(filp, MAY_READ) != 0 && uid != server->m.mounted_uid) return -EACCES; if (put_user(server->sign_wanted, (int __user *)argp)) return -EFAULT; return 0; case NCP_IOC_SET_SIGN_WANTED: { int newstate; if (file_permission(filp, MAY_WRITE) != 0 && uid != server->m.mounted_uid) return -EACCES; /* get only low 8 bits... */ if (get_user(newstate, (unsigned char __user *)argp)) return -EFAULT; if (server->sign_active) { /* cannot turn signatures OFF when active */ if (!newstate) return -EINVAL; } else { server->sign_wanted = newstate != 0; } return 0; } #endif /* CONFIG_NCPFS_PACKET_SIGNING */ #ifdef CONFIG_NCPFS_IOCTL_LOCKING case NCP_IOC_LOCKUNLOCK: if (file_permission(filp, MAY_WRITE) != 0 && uid != server->m.mounted_uid) return -EACCES; { struct ncp_lock_ioctl rqdata; if (copy_from_user(&rqdata, argp, sizeof(rqdata))) return -EFAULT; if (rqdata.origin != 0) return -EINVAL; /* check for cmd */ switch (rqdata.cmd) { case NCP_LOCK_EX: case NCP_LOCK_SH: if (rqdata.timeout == 0) rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) rqdata.timeout = NCP_LOCK_MAX_TIMEOUT; break; case NCP_LOCK_LOG: rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; /* has no effect */ case NCP_LOCK_CLEAR: break; default: return -EINVAL; } /* locking needs both read and write access */ if ((result = ncp_make_open(inode, O_RDWR)) != 0) { return result; } result = -EIO; if (!ncp_conn_valid(server)) goto outrel; result = -EISDIR; if (!S_ISREG(inode->i_mode)) goto outrel; if (rqdata.cmd == NCP_LOCK_CLEAR) { result = ncp_ClearPhysicalRecord(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, rqdata.offset, rqdata.length); if (result > 0) result = 0; /* no such lock */ } else { int lockcmd; switch (rqdata.cmd) { case NCP_LOCK_EX: lockcmd=1; break; case NCP_LOCK_SH: lockcmd=3; break; default: lockcmd=0; break; } result = ncp_LogPhysicalRecord(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, lockcmd, rqdata.offset, rqdata.length, rqdata.timeout); if (result > 0) result = -EAGAIN; } outrel: ncp_inode_close(inode); return result; } #endif /* CONFIG_NCPFS_IOCTL_LOCKING */ #ifdef CONFIG_COMPAT case NCP_IOC_GETOBJECTNAME_32: if (uid != server->m.mounted_uid) return -EACCES; { struct compat_ncp_objectname_ioctl user; size_t outl; if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; user.auth_type = server->auth.auth_type; outl = user.object_name_len; user.object_name_len = server->auth.object_name_len; if (outl > user.object_name_len) outl = user.object_name_len; if (outl) { if (copy_to_user(compat_ptr(user.object_name), server->auth.object_name, outl)) return -EFAULT; } if (copy_to_user(argp, &user, sizeof(user))) return -EFAULT; return 0; } #endif case NCP_IOC_GETOBJECTNAME: if (uid != server->m.mounted_uid) return -EACCES; { struct ncp_objectname_ioctl user; size_t outl; if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; user.auth_type = server->auth.auth_type; outl = user.object_name_len; user.object_name_len = server->auth.object_name_len; if (outl > user.object_name_len) outl = user.object_name_len; if (outl) { if (copy_to_user(user.object_name, server->auth.object_name, outl)) return -EFAULT; } if (copy_to_user(argp, &user, sizeof(user))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT case NCP_IOC_SETOBJECTNAME_32: #endif case NCP_IOC_SETOBJECTNAME: if (uid != server->m.mounted_uid) return -EACCES; { struct ncp_objectname_ioctl user; void* newname; void* oldname; size_t oldnamelen; void* oldprivate; size_t oldprivatelen; #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_SETOBJECTNAME_32) { struct compat_ncp_objectname_ioctl user32; if (copy_from_user(&user32, argp, sizeof(user32))) return -EFAULT; user.auth_type = user32.auth_type; user.object_name_len = user32.object_name_len; user.object_name = compat_ptr(user32.object_name); } else #endif if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN) return -ENOMEM; if (user.object_name_len) { newname = kmalloc(user.object_name_len, GFP_USER); if (!newname) return -ENOMEM; if (copy_from_user(newname, user.object_name, user.object_name_len)) { kfree(newname); return -EFAULT; } } else { newname = NULL; } /* enter critical section */ /* maybe that kfree can sleep so do that this way */ /* it is at least more SMP friendly (in future...) */ oldname = server->auth.object_name; oldnamelen = server->auth.object_name_len; oldprivate = server->priv.data; oldprivatelen = server->priv.len; server->auth.auth_type = user.auth_type; server->auth.object_name_len = user.object_name_len; server->auth.object_name = newname; server->priv.len = 0; server->priv.data = NULL; /* leave critical section */ kfree(oldprivate); kfree(oldname); return 0; } #ifdef CONFIG_COMPAT case NCP_IOC_GETPRIVATEDATA_32: #endif case NCP_IOC_GETPRIVATEDATA: if (uid != server->m.mounted_uid) return -EACCES; { struct ncp_privatedata_ioctl user; size_t outl; #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_GETPRIVATEDATA_32) { struct compat_ncp_privatedata_ioctl user32; if (copy_from_user(&user32, argp, sizeof(user32))) return -EFAULT; user.len = user32.len; user.data = compat_ptr(user32.data); } else #endif if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; outl = user.len; user.len = server->priv.len; if (outl > user.len) outl = user.len; if (outl) { if (copy_to_user(user.data, server->priv.data, outl)) return -EFAULT; } #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_GETPRIVATEDATA_32) { struct compat_ncp_privatedata_ioctl user32; user32.len = user.len; user32.data = (unsigned long) user.data; if (copy_to_user(argp, &user32, sizeof(user32))) return -EFAULT; } else #endif if (copy_to_user(argp, &user, sizeof(user))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT case NCP_IOC_SETPRIVATEDATA_32: #endif case NCP_IOC_SETPRIVATEDATA: if (uid != server->m.mounted_uid) return -EACCES; { struct ncp_privatedata_ioctl user; void* new; void* old; size_t oldlen; #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_SETPRIVATEDATA_32) { struct compat_ncp_privatedata_ioctl user32; if (copy_from_user(&user32, argp, sizeof(user32))) return -EFAULT; user.len = user32.len; user.data = compat_ptr(user32.data); } else #endif if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; if (user.len > NCP_PRIVATE_DATA_MAX_LEN) return -ENOMEM; if (user.len) { new = kmalloc(user.len, GFP_USER); if (!new) return -ENOMEM; if (copy_from_user(new, user.data, user.len)) { kfree(new); return -EFAULT; } } else { new = NULL; } /* enter critical section */ old = server->priv.data; oldlen = server->priv.len; server->priv.len = user.len; server->priv.data = new; /* leave critical section */ kfree(old); return 0; } #ifdef CONFIG_NCPFS_NLS case NCP_IOC_SETCHARSETS: return ncp_set_charsets(server, argp); case NCP_IOC_GETCHARSETS: return ncp_get_charsets(server, argp); #endif /* CONFIG_NCPFS_NLS */ case NCP_IOC_SETDENTRYTTL: if (file_permission(filp, MAY_WRITE) != 0 && uid != server->m.mounted_uid) return -EACCES; { u_int32_t user; if (copy_from_user(&user, argp, sizeof(user))) return -EFAULT; /* 20 secs at most... */ if (user > 20000) return -EINVAL; user = (user * HZ) / 1000; server->dentry_ttl = user; return 0; } case NCP_IOC_GETDENTRYTTL: { u_int32_t user = (server->dentry_ttl * 1000) / HZ; if (copy_to_user(argp, &user, sizeof(user))) return -EFAULT; return 0; } } return -EINVAL; } static int ncp_ioctl_need_write(unsigned int cmd) { switch (cmd) { case NCP_IOC_GET_FS_INFO: case NCP_IOC_GET_FS_INFO_V2: case NCP_IOC_NCPREQUEST: case NCP_IOC_SETDENTRYTTL: case NCP_IOC_SIGN_INIT: case NCP_IOC_LOCKUNLOCK: case NCP_IOC_SET_SIGN_WANTED: return 1; case NCP_IOC_GETOBJECTNAME: case NCP_IOC_SETOBJECTNAME: case NCP_IOC_GETPRIVATEDATA: case NCP_IOC_SETPRIVATEDATA: case NCP_IOC_SETCHARSETS: case NCP_IOC_GETCHARSETS: case NCP_IOC_CONN_LOGGED_IN: case NCP_IOC_GETDENTRYTTL: case NCP_IOC_GETMOUNTUID2: case NCP_IOC_SIGN_WANTED: case NCP_IOC_GETROOT: case NCP_IOC_SETROOT: return 0; default: /* unkown IOCTL command, assume write */ return 1; } } int ncp_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int ret; if (ncp_ioctl_need_write(cmd)) { /* * inside the ioctl(), any failures which * are because of file_permission() are * -EACCESS, so it seems consistent to keep * that here. */ if (mnt_want_write(filp->f_path.mnt)) return -EACCES; } ret = __ncp_ioctl(inode, filp, cmd, arg); if (ncp_ioctl_need_write(cmd)) mnt_drop_write(filp->f_path.mnt); return ret; } #ifdef CONFIG_COMPAT long ncp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_path.dentry->d_inode; int ret; lock_kernel(); arg = (unsigned long) compat_ptr(arg); ret = ncp_ioctl(inode, file, cmd, arg); unlock_kernel(); return ret; } #endif
gpl-2.0
Philippe12/linux-3.4-a20
drivers/net/wireless/rtl8189es/core/rtw_mp_ioctl.c
139
90538
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_MP_IOCTL_C_ #include <drv_conf.h> #include <osdep_service.h> #include <drv_types.h> #include <mlme_osdep.h> //#include <rtw_mp.h> #include <rtw_mp_ioctl.h> //**************** oid_rtl_seg_81_85 section start **************** NDIS_STATUS oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->information_buf_len < sizeof(u8)) return NDIS_STATUS_INVALID_LENGTH; if (poid_par_priv->type_of_oid == SET_OID) { Adapter->registrypriv.wireless_mode = *(u8*)poid_par_priv->information_buf; } else if (poid_par_priv->type_of_oid == QUERY_OID) { *(u8*)poid_par_priv->information_buf = Adapter->registrypriv.wireless_mode; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_info_, ("-query Wireless Mode=%d\n", Adapter->registrypriv.wireless_mode)); } else { status = NDIS_STATUS_NOT_ACCEPTED; } _func_exit_; return status; } //**************** oid_rtl_seg_81_87_80 section start **************** NDIS_STATUS oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif struct bb_reg_param *pbbreg; u16 offset; u32 value; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_write_bb_reg_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param)) return NDIS_STATUS_INVALID_LENGTH; pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf); offset = (u16)(pbbreg->offset) & 0xFFF; //0ffset :0x800~0xfff if (offset < BB_REG_BASE_ADDR) offset |= BB_REG_BASE_ADDR; value = pbbreg->value; RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_write_bb_reg_hdl: offset=0x%03X value=0x%08X\n", offset, value)); _irqlevel_changed_(&oldirql, LOWER); write_bbreg(Adapter, offset, 0xFFFFFFFF, value); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif struct bb_reg_param *pbbreg; u16 offset; u32 value; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_read_bb_reg_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param)) return NDIS_STATUS_INVALID_LENGTH; pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf); offset = (u16)(pbbreg->offset) & 0xFFF; //0ffset :0x800~0xfff if (offset < BB_REG_BASE_ADDR) offset |= BB_REG_BASE_ADDR; _irqlevel_changed_(&oldirql, LOWER); value = read_bbreg(Adapter, offset, 0xFFFFFFFF); _irqlevel_changed_(&oldirql, RAISE); pbbreg->value = value; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_read_bb_reg_hdl: offset=0x%03X value:0x%08X\n", offset, value)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif struct rf_reg_param *pbbreg; u8 path; u8 offset; u32 value; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_write_rf_reg_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param)) return NDIS_STATUS_INVALID_LENGTH; pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf); if (pbbreg->path >= MAX_RF_PATH_NUMS) return NDIS_STATUS_NOT_ACCEPTED; if (pbbreg->offset > 0xFF) return NDIS_STATUS_NOT_ACCEPTED; if (pbbreg->value > 0xFFFFF) return NDIS_STATUS_NOT_ACCEPTED; path = (u8)pbbreg->path; offset = (u8)pbbreg->offset; value = pbbreg->value; RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_write_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n", path, offset, value)); _irqlevel_changed_(&oldirql, LOWER); write_rfreg(Adapter, path, offset, value); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif struct rf_reg_param *pbbreg; u8 path; u8 offset; u32 value; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_read_rf_reg_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param)) return NDIS_STATUS_INVALID_LENGTH; pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf); if (pbbreg->path >= MAX_RF_PATH_NUMS) return NDIS_STATUS_NOT_ACCEPTED; if (pbbreg->offset > 0xFF) return NDIS_STATUS_NOT_ACCEPTED; path = (u8)pbbreg->path; offset = (u8)pbbreg->offset; _irqlevel_changed_(&oldirql, LOWER); value = read_rfreg(Adapter, path, offset); _irqlevel_changed_(&oldirql, RAISE); pbbreg->value = value; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_read_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n", path, offset, value)); _func_exit_; return status; } //**************** oid_rtl_seg_81_87_00 section end**************** //------------------------------------------------------------------------------ //**************** oid_rtl_seg_81_80_00 section start **************** //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 ratevalue;//4 NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_data_rate_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; ratevalue = *((u32*)poid_par_priv->information_buf);//4 RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_set_data_rate_hdl: data rate idx=%d\n", ratevalue)); if (ratevalue >= MPT_RATE_LAST) return NDIS_STATUS_INVALID_DATA; Adapter->mppriv.rateidx = ratevalue; _irqlevel_changed_(&oldirql, LOWER); SetDataRate(Adapter); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 mode; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_start_test_hdl\n")); if (Adapter->registrypriv.mp_mode == 0) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); //IQCalibrateBcut(Adapter); mode = *((u32*)poid_par_priv->information_buf); Adapter->mppriv.mode = mode;// 1 for loopback if (mp_start_test(Adapter) == _FAIL) { status = NDIS_STATUS_NOT_ACCEPTED; goto exit; } exit: _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_start_test_hdl: mp_mode=%d\n", Adapter->mppriv.mode)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_STOP_TEST\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); mp_stop_test(Adapter); _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_STOP_TEST\n")); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 Channel; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_channel_direct_call_hdl\n")); if (poid_par_priv->information_buf_len != sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; if (poid_par_priv->type_of_oid == QUERY_OID) { *((u32*)poid_par_priv->information_buf) = Adapter->mppriv.channel; return NDIS_STATUS_SUCCESS; } if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; Channel = *((u32*)poid_par_priv->information_buf); RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_set_channel_direct_call_hdl: Channel=%d\n", Channel)); if (Channel > 14) return NDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.channel = Channel; _irqlevel_changed_(&oldirql, LOWER); SetChannel(Adapter); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u16 bandwidth; u16 channel_offset; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_set_bandwidth_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; bandwidth = *((u32*)poid_par_priv->information_buf);//4 channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; if (bandwidth != HT_CHANNEL_WIDTH_40) bandwidth = HT_CHANNEL_WIDTH_20; padapter->mppriv.bandwidth = (u8)bandwidth; padapter->mppriv.prime_channel_offset = (u8)channel_offset; _irqlevel_changed_(&oldirql, LOWER); SetBandwidth(padapter); _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_set_bandwidth_hdl: bandwidth=%d channel_offset=%d\n", bandwidth, channel_offset)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 antenna; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_antenna_bb_hdl\n")); if (poid_par_priv->information_buf_len != sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; if (poid_par_priv->type_of_oid == SET_OID) { antenna = *(u32*)poid_par_priv->information_buf; Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16); Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF); RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_set_antenna_bb_hdl: tx_ant=0x%04x rx_ant=0x%04x\n", Adapter->mppriv.antenna_tx, Adapter->mppriv.antenna_rx)); _irqlevel_changed_(&oldirql, LOWER); SetAntenna(Adapter); _irqlevel_changed_(&oldirql, RAISE); } else { antenna = (Adapter->mppriv.antenna_tx << 16)|Adapter->mppriv.antenna_rx; *(u32*)poid_par_priv->information_buf = antenna; } _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 tx_pwr_idx; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_pro_set_tx_power_control_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; tx_pwr_idx = *((u32*)poid_par_priv->information_buf); if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE) return NDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.txpoweridx = (u8)tx_pwr_idx; RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_set_tx_power_control_hdl: idx=0x%2x\n", Adapter->mppriv.txpoweridx)); _irqlevel_changed_(&oldirql, LOWER); SetTxPower(Adapter); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ //**************** oid_rtl_seg_81_80_20 section start **************** //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid !=QUERY_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len == sizeof(ULONG)) { *(ULONG*)poid_par_priv->information_buf = Adapter->mppriv.tx_pktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else { status = NDIS_STATUS_INVALID_LENGTH; } _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } RT_TRACE(_module_mp_, _drv_alert_, ("===> oid_rt_pro_query_rx_packet_received_hdl.\n")); if (poid_par_priv->information_buf_len == sizeof(ULONG)) { *(ULONG*)poid_par_priv->information_buf = Adapter->mppriv.rx_pktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_alert_, ("recv_ok:%d \n",Adapter->mppriv.rx_pktcount)); } else { status = NDIS_STATUS_INVALID_LENGTH; } _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } RT_TRACE(_module_mp_, _drv_alert_, ("===> oid_rt_pro_query_rx_packet_crc32_error_hdl.\n")); if (poid_par_priv->information_buf_len == sizeof(ULONG)) { *(ULONG*)poid_par_priv->information_buf = Adapter->mppriv.rx_crcerrpktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_alert_, ("recv_err:%d \n",Adapter->mppriv.rx_crcerrpktcount)); } else { status = NDIS_STATUS_INVALID_LENGTH; } _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } RT_TRACE(_module_mp_, _drv_alert_, ("===> oid_rt_pro_reset_tx_packet_sent_hdl.\n")); Adapter->mppriv.tx_pktcount = 0; _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len == sizeof(ULONG)) { Adapter->mppriv.rx_pktcount = 0; Adapter->mppriv.rx_crcerrpktcount = 0; } else { status = NDIS_STATUS_INVALID_LENGTH; } _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } _irqlevel_changed_(&oldirql, LOWER); ResetPhyRxPktCount(Adapter); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_get_phy_rx_packet_received_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(ULONG)) return NDIS_STATUS_INVALID_LENGTH; _irqlevel_changed_(&oldirql, LOWER); *(ULONG*)poid_par_priv->information_buf = GetPhyRxPktReceived(Adapter); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_get_phy_rx_packet_received_hdl: recv_ok=%d\n", *(ULONG*)poid_par_priv->information_buf)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_get_phy_rx_packet_crc32_error_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(ULONG)) return NDIS_STATUS_INVALID_LENGTH; _irqlevel_changed_(&oldirql, LOWER); *(ULONG*)poid_par_priv->information_buf = GetPhyRxPktCRC32Error(Adapter); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_get_phy_rx_packet_crc32_error_hdl: recv_err=%d\n", *(ULONG*)poid_par_priv->information_buf)); _func_exit_; return status; } //**************** oid_rtl_seg_81_80_20 section end **************** NDIS_STATUS oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 bStartTest; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_continuous_tx_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32*)poid_par_priv->information_buf); _irqlevel_changed_(&oldirql, LOWER); SetContinuousTx(Adapter,(u8)bStartTest); if (bStartTest) { struct mp_priv *pmp_priv = &Adapter->mppriv; if (pmp_priv->tx.stop == 0) { pmp_priv->tx.stop = 1; DBG_871X("%s: pkt tx is running...\n", __func__); rtw_msleep_os(5); } pmp_priv->tx.stop = 0; pmp_priv->tx.count = 1; SetPacketTx(Adapter); } _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 bStartTest; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_alert_, ("+oid_rt_pro_set_single_carrier_tx_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32*)poid_par_priv->information_buf); _irqlevel_changed_(&oldirql, LOWER); SetSingleCarrierTx(Adapter, (u8)bStartTest); if (bStartTest) { struct mp_priv *pmp_priv = &Adapter->mppriv; if (pmp_priv->tx.stop == 0) { pmp_priv->tx.stop = 1; DBG_871X("%s: pkt tx is running...\n", __func__); rtw_msleep_os(5); } pmp_priv->tx.stop = 0; pmp_priv->tx.count = 1; SetPacketTx(Adapter); } _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 bStartTest; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_carrier_suppression_tx_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32*)poid_par_priv->information_buf); _irqlevel_changed_(&oldirql, LOWER); SetCarrierSuppressionTx(Adapter, (u8)bStartTest); if (bStartTest) { struct mp_priv *pmp_priv = &Adapter->mppriv; if (pmp_priv->tx.stop == 0) { pmp_priv->tx.stop = 1; DBG_871X("%s: pkt tx is running...\n", __func__); rtw_msleep_os(5); } pmp_priv->tx.stop = 0; pmp_priv->tx.count = 1; SetPacketTx(Adapter); } _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 bStartTest; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_alert_, ("+oid_rt_pro_set_single_tone_tx_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32*)poid_par_priv->information_buf); _irqlevel_changed_(&oldirql, LOWER); SetSingleToneTx(Adapter,(u8)bStartTest); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_modulation_hdl(struct oid_par_priv* poid_par_priv) { return 0; } NDIS_STATUS oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv) { PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, 0); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //**************** oid_rtl_seg_81_80_00 section end **************** //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PNDIS_802_11_SSID pssid; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = (u32)sizeof(NDIS_802_11_SSID); *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return NDIS_STATUS_INVALID_LENGTH; pssid = (PNDIS_802_11_SSID)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); if (mp_start_joinbss(Adapter, pssid) == _FAIL) status = NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = sizeof(NDIS_802_11_SSID); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pRW_Reg RegRWStruct; u32 offset, width; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_pro_read_register_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; RegRWStruct = (pRW_Reg)poid_par_priv->information_buf; offset = RegRWStruct->offset; width = RegRWStruct->width; if (offset > 0xFFF) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); switch (width) { case 1: RegRWStruct->value = rtw_read8(Adapter, offset); break; case 2: RegRWStruct->value = rtw_read16(Adapter, offset); break; default: width = 4; RegRWStruct->value = rtw_read32(Adapter, offset); break; } RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_read_register_hdl: offset:0x%04X value:0x%X\n", offset, RegRWStruct->value)); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = width; _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pRW_Reg RegRWStruct; u32 offset, width, value; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_pro_write_register_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; RegRWStruct = (pRW_Reg)poid_par_priv->information_buf; offset = RegRWStruct->offset; width = RegRWStruct->width; value = RegRWStruct->value; if (offset > 0xFFF) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); switch (RegRWStruct->width) { case 1: if (value > 0xFF) { status = NDIS_STATUS_NOT_ACCEPTED; break; } rtw_write8(padapter, offset, (u8)value); break; case 2: if (value > 0xFFFF) { status = NDIS_STATUS_NOT_ACCEPTED; break; } rtw_write16(padapter, offset, (u16)value); break; case 4: rtw_write32(padapter, offset, value); break; default: status = NDIS_STATUS_NOT_ACCEPTED; break; } _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_write_register_hdl: offset=0x%08X width=%d value=0x%X\n", offset, width, value)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv) { #if 0 #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pBurst_RW_Reg pBstRwReg; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_burst_read_register_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; pBstRwReg = (pBurst_RW_Reg)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); rtw_read_mem(padapter, pBstRwReg->offset, (u32)pBstRwReg->len, pBstRwReg->Data); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_burst_read_register_hdl\n")); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv) { #if 0 #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pBurst_RW_Reg pBstRwReg; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_burst_write_register_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; pBstRwReg = (pBurst_RW_Reg)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); rtw_write_mem(padapter, pBstRwReg->offset, (u32)pBstRwReg->len, pBstRwReg->Data); _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_burst_write_register_hdl\n")); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv) { #if 0 NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)( poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif TX_CMD_Desc *TxCmd_Info; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; RT_TRACE(_module_mp_, _drv_info_, ("+Set OID_RT_PRO_WRITE_TXCMD\n")); TxCmd_Info=(TX_CMD_Desc*)poid_par_priv->information_buf; RT_TRACE(_module_mp_, _drv_info_, ("WRITE_TXCMD:Addr=%.8X\n", TxCmd_Info->offset)); RT_TRACE(_module_mp_, _drv_info_, ("WRITE_TXCMD:1.)%.8X\n", (ULONG)TxCmd_Info->TxCMD.value[0])); RT_TRACE(_module_mp_, _drv_info_, ("WRITE_TXCMD:2.)%.8X\n", (ULONG)TxCmd_Info->TxCMD.value[1])); RT_TRACE(_module_mp_, _drv_info_, (("WRITE_TXCMD:3.)%.8X\n", (ULONG)TxCmd_Info->TxCMD.value[2])); RT_TRACE(_module_mp_, _drv_info_, ("WRITE_TXCMD:4.)%.8X\n", (ULONG)TxCmd_Info->TxCMD.value[3])); _irqlevel_changed_(&oldirql, LOWER); rtw_write32(Adapter, TxCmd_Info->offset + 0, (unsigned int)TxCmd_Info->TxCMD.value[0]); rtw_write32(Adapter, TxCmd_Info->offset + 4, (unsigned int)TxCmd_Info->TxCMD.value[1]); _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_WRITE_TXCMD: status=0x%08X\n", status)); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv) { #if 0 #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pEEPROM_RWParam pEEPROM; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+Query OID_RT_PRO_READ16_EEPROM\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; pEEPROM = (pEEPROM_RWParam)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); pEEPROM->value = eeprom_read16(padapter, (u16)(pEEPROM->offset >> 1)); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-Query OID_RT_PRO_READ16_EEPROM: offset=0x%x value=0x%x\n", pEEPROM->offset, pEEPROM->value)); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_write16_eeprom_hdl (struct oid_par_priv *poid_par_priv) { #if 0 #ifdef PLATFORM_OS_XP _irqL oldirql; #endif pEEPROM_RWParam pEEPROM; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_WRITE16_EEPROM\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; pEEPROM = (pEEPROM_RWParam)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); eeprom_write16(padapter, (u16)(pEEPROM->offset >> 1), pEEPROM->value); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)( poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; struct mp_wiparam *pwi_param; _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct mp_wiparam)) return NDIS_STATUS_INVALID_LENGTH; if (Adapter->mppriv.workparam.bcompleted == _FALSE) return NDIS_STATUS_NOT_ACCEPTED; pwi_param = (struct mp_wiparam *)poid_par_priv->information_buf; _rtw_memcpy(pwi_param, &Adapter->mppriv.workparam, sizeof(struct mp_wiparam)); Adapter->mppriv.act_in_progress = _FALSE; // RT_TRACE(_module_mp_, _drv_info_, ("rf:%x\n", pwiparam->IoValue)); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)( poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro8711_pkt_loss_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(uint)*2) { RT_TRACE(_module_mp_, _drv_err_, ("-oid_rt_pro8711_pkt_loss_hdl: buf_len=%d\n", (int)poid_par_priv->information_buf_len)); return NDIS_STATUS_INVALID_LENGTH; } if (*(uint*)poid_par_priv->information_buf == 1)//init==1 Adapter->mppriv.rx_pktloss = 0; *((uint*)poid_par_priv->information_buf+1) = Adapter->mppriv.rx_pktloss; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)( poid_par_priv->adapter_context); struct io_queue *pio_queue = (struct io_queue *)Adapter->pio_queue; struct intf_hdl *pintfhdl = &pio_queue->intf; #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; #ifdef CONFIG_SDIO_HCI void (*_attrib_read)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); #endif _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+Query OID_RT_RD_ATTRIB_MEM\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; #ifdef CONFIG_SDIO_HCI _irqlevel_changed_(&oldirql, LOWER); { u32 *plmem = (u32*)poid_par_priv->information_buf+2; _attrib_read = pintfhdl->io_ops._attrib_read; _attrib_read(pintfhdl, *((u32*)poid_par_priv->information_buf), *((u32*)poid_par_priv->information_buf+1), (u8*)plmem); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } _irqlevel_changed_(&oldirql, RAISE); #endif _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); struct io_queue *pio_queue = (struct io_queue *)Adapter->pio_queue; struct intf_hdl *pintfhdl = &pio_queue->intf; #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; #ifdef CONFIG_SDIO_HCI void (*_attrib_write)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); #endif _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; #ifdef CONFIG_SDIO_HCI _irqlevel_changed_(&oldirql, LOWER); { u32 *plmem = (u32*)poid_par_priv->information_buf + 2; _attrib_write = pintfhdl->io_ops._attrib_write; _attrib_write(pintfhdl, *(u32*)poid_par_priv->information_buf, *((u32*)poid_par_priv->information_buf+1), (u8*)plmem); } _irqlevel_changed_(&oldirql, RAISE); #endif _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_RF_INTFS\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); if (rtw_setrfintfs_cmd(Adapter, *(unsigned char*)poid_par_priv->information_buf) == _FAIL) status = NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; _rtw_memcpy(poid_par_priv->information_buf, (unsigned char*)&Adapter->mppriv.rxstat, sizeof(struct recv_stat)); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; PCFG_DBG_MSG_STRUCT pdbg_msg; _func_enter_; // RT_TRACE(0xffffffffff,_drv_alert_,("===> oid_rt_pro_cfg_debug_message_hdl.\n")); #if 0//#ifdef CONFIG_DEBUG_RTL871X pdbg_msg = (PCFG_DBG_MSG_STRUCT)(poid_par_priv->information_buf); if (poid_par_priv->type_of_oid == SET_OID) { RT_TRACE(0xffffffffff, _drv_alert_, ("===>Set level :0x%08x, H32:0x%08x L32:0x%08x\n", pdbg_msg->DebugLevel, pdbg_msg->DebugComponent_H32, pdbg_msg->DebugComponent_L32)); GlobalDebugLevel = pdbg_msg->DebugLevel; GlobalDebugComponents = (pdbg_msg->DebugComponent_H32 << 32) | pdbg_msg->DebugComponent_L32; RT_TRACE(0xffffffffff, _drv_alert_, ("===> Set level :0x%08x, component:0x%016x\n", GlobalDebugLevel, (u32)GlobalDebugComponents)); } else { pdbg_msg->DebugLevel = GlobalDebugLevel; pdbg_msg->DebugComponent_H32 = (u32)(GlobalDebugComponents >> 32); pdbg_msg->DebugComponent_L32 = (u32)GlobalDebugComponents; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(0xffffffffff, _drv_alert_, ("===>Query level:0x%08x H32:0x%08x L32:0x%08x\n", (u32)pdbg_msg->DebugLevel, (u32)pdbg_msg->DebugComponent_H32, (u32)pdbg_msg->DebugComponent_L32)); } #endif _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv) { PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_DATA_RATE_EX\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, LOWER); if (rtw_setdatarate_cmd(Adapter, poid_par_priv->information_buf) !=_SUCCESS) status = NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //----------------------------------------------------------------------------- NDIS_STATUS oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; u8 thermal = 0; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_get_thermal_meter_hdl\n")); if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; _irqlevel_changed_(&oldirql, LOWER); GetThermalMeter(Adapter, &thermal); _irqlevel_changed_(&oldirql, RAISE); *(u32*)poid_par_priv->information_buf = (u32)thermal; *poid_par_priv->bytes_rw = sizeof(u32); _func_exit_; return status; } //----------------------------------------------------------------------------- NDIS_STATUS oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_read_tssi_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (Adapter->mppriv.act_in_progress == _TRUE) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return NDIS_STATUS_INVALID_LENGTH; //init workparam Adapter->mppriv.act_in_progress = _TRUE; Adapter->mppriv.workparam.bcompleted = _FALSE; Adapter->mppriv.workparam.act_type = MPT_READ_TSSI; Adapter->mppriv.workparam.io_offset = 0; Adapter->mppriv.workparam.io_value = 0xFFFFFFFF; _irqlevel_changed_(&oldirql, LOWER); if (!rtw_gettssi_cmd(Adapter,0, (u8*)&Adapter->mppriv.workparam.io_value)) status = NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; // if (poid_par_priv->type_of_oid != SET_OID) // return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return NDIS_STATUS_INVALID_LENGTH; _irqlevel_changed_(&oldirql, LOWER); if (poid_par_priv->type_of_oid == SET_OID) { u8 enable; enable = *(u8*)poid_par_priv->information_buf; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_set_power_tracking_hdl: enable=%d\n", enable)); SetPowerTracking(Adapter, enable); } else { GetPowerTracking(Adapter, (u8*)poid_par_priv->information_buf); } _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //----------------------------------------------------------------------------- NDIS_STATUS oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv) { #if 0 #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 ratevalue; u8 datarates[NumRates]; int i; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+OID_RT_PRO_SET_BASIC_RATE\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; #if 0 ratevalue = *((u32*)poid_par_priv->information_buf); for (i = 0; i < NumRates; i++) { if (ratevalue == mpdatarate[i]) datarates[i] = mpdatarate[i]; else datarates[i] = 0xff; RT_TRACE(_module_rtl871x_ioctl_c_, _drv_info_, ("basicrate_inx=%d\n", datarates[i])); } _irqlevel_changed_(&oldirql, LOWER); if (rtw_setbasicrate_cmd(padapter, datarates) != _SUCCESS) status = NDIS_STATUS_NOT_ACCEPTED; _irqlevel_changed_(&oldirql, RAISE); #endif RT_TRACE(_module_mp_, _drv_notice_, ("-OID_RT_PRO_SET_BASIC_RATE: status=0x%08X\n", status)); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < 8) return NDIS_STATUS_INVALID_LENGTH; *poid_par_priv->bytes_rw = 8; _rtw_memcpy(poid_par_priv->information_buf, &(Adapter->pwrctrlpriv.pwr_mode), 8); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_qry_pwrstate_hdl: pwr_mode=%d smart_ps=%d\n", Adapter->pwrctrlpriv.pwr_mode, Adapter->pwrctrlpriv.smart_ps)); _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; uint pwr_mode, smart_ps; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_SET_PWRSTATE\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_rw = 0; *poid_par_priv->bytes_needed = 8; if (poid_par_priv->information_buf_len < 8) return NDIS_STATUS_INVALID_LENGTH; pwr_mode = *(uint *)(poid_par_priv->information_buf); smart_ps = *(uint *)((int)poid_par_priv->information_buf + 4); *poid_par_priv->bytes_rw = 8; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; struct setratable_parm *prate_table; u8 res; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = sizeof(struct setratable_parm); if (poid_par_priv->information_buf_len < sizeof(struct setratable_parm)) return NDIS_STATUS_INVALID_LENGTH; prate_table = (struct setratable_parm*)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); res = rtw_setrttbl_cmd(Adapter, prate_table); _irqlevel_changed_(&oldirql, RAISE); if (res == _FAIL) status = NDIS_STATUS_FAILURE; _func_exit_; return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; #if 0 struct mp_wi_cntx *pmp_wi_cntx=&(Adapter->mppriv.wi_cntx); u8 res=_SUCCESS; DEBUG_INFO(("===> Set OID_RT_PRO_H2C_GET_RATE_TABLE.\n")); if(pmp_wi_cntx->bmp_wi_progress ==_TRUE){ DEBUG_ERR(("\n mp workitem is progressing, not allow to set another workitem right now!!!\n")); Status = NDIS_STATUS_NOT_ACCEPTED; break; } else{ pmp_wi_cntx->bmp_wi_progress=_TRUE; pmp_wi_cntx->param.bcompleted=_FALSE; pmp_wi_cntx->param.act_type=MPT_GET_RATE_TABLE; pmp_wi_cntx->param.io_offset=0x0; pmp_wi_cntx->param.bytes_cnt=sizeof(struct getratable_rsp); pmp_wi_cntx->param.io_value=0xffffffff; res=rtw_getrttbl_cmd(Adapter,(struct getratable_rsp *)pmp_wi_cntx->param.data); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; if(res != _SUCCESS) { Status = NDIS_STATUS_NOT_ACCEPTED; } } DEBUG_INFO(("\n <=== Set OID_RT_PRO_H2C_GET_RATE_TABLE.\n")); #endif _func_exit_; return status; #else return 0; #endif } //**************** oid_rtl_seg_87_12_00 section start **************** NDIS_STATUS oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); struct security_priv *psecuritypriv = &Adapter->securitypriv; NDIS_STATUS status = NDIS_STATUS_SUCCESS; ENCRY_CTRL_STATE encry_mode; *poid_par_priv->bytes_needed = sizeof(u8); if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return NDIS_STATUS_INVALID_LENGTH; if (poid_par_priv->type_of_oid == SET_OID) { encry_mode = *((u8*)poid_par_priv->information_buf); switch (encry_mode) { case HW_CONTROL: #if 0 Adapter->registrypriv.software_decrypt=_FALSE; Adapter->registrypriv.software_encrypt=_FALSE; #else psecuritypriv->sw_decrypt = _FALSE; psecuritypriv->sw_encrypt = _FALSE; #endif break; case SW_CONTROL: #if 0 Adapter->registrypriv.software_decrypt=_TRUE; Adapter->registrypriv.software_encrypt=_TRUE; #else psecuritypriv->sw_decrypt = _TRUE; psecuritypriv->sw_encrypt = _TRUE; #endif break; case HW_ENCRY_SW_DECRY: #if 0 Adapter->registrypriv.software_decrypt=_TRUE; Adapter->registrypriv.software_encrypt=_FALSE; #else psecuritypriv->sw_decrypt = _TRUE; psecuritypriv->sw_encrypt = _FALSE; #endif break; case SW_ENCRY_HW_DECRY: #if 0 Adapter->registrypriv.software_decrypt=_FALSE; Adapter->registrypriv.software_encrypt=_TRUE; #else psecuritypriv->sw_decrypt = _FALSE; psecuritypriv->sw_encrypt = _TRUE; #endif break; } RT_TRACE(_module_rtl871x_ioctl_c_, _drv_notice_, ("-oid_rt_pro_encryption_ctrl_hdl: SET encry_mode=0x%x sw_encrypt=0x%x sw_decrypt=0x%x\n", encry_mode, psecuritypriv->sw_encrypt, psecuritypriv->sw_decrypt)); } else { #if 0 if (Adapter->registrypriv.software_encrypt == _FALSE) { if (Adapter->registrypriv.software_decrypt == _FALSE) encry_mode = HW_CONTROL; else encry_mode = HW_ENCRY_SW_DECRY; } else { if (Adapter->registrypriv.software_decrypt == _FALSE) encry_mode = SW_ENCRY_HW_DECRY; else encry_mode = SW_CONTROL; } #else if ((psecuritypriv->sw_encrypt == _FALSE) && (psecuritypriv->sw_decrypt == _FALSE)) encry_mode = HW_CONTROL; else if ((psecuritypriv->sw_encrypt == _FALSE) && (psecuritypriv->sw_decrypt == _TRUE)) encry_mode = HW_ENCRY_SW_DECRY; else if ((psecuritypriv->sw_encrypt == _TRUE) && (psecuritypriv->sw_decrypt == _FALSE)) encry_mode = SW_ENCRY_HW_DECRY; else if ((psecuritypriv->sw_encrypt == _TRUE) && (psecuritypriv->sw_decrypt == _TRUE)) encry_mode = SW_CONTROL; #endif *(u8*)poid_par_priv->information_buf = encry_mode; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_encryption_ctrl_hdl: QUERY encry_mode=0x%x\n", encry_mode)); } return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; struct sta_info *psta = NULL; UCHAR *macaddr; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = ETH_ALEN; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return NDIS_STATUS_INVALID_LENGTH; macaddr = (UCHAR *) poid_par_priv->information_buf ; RT_TRACE(_module_rtl871x_ioctl_c_,_drv_notice_, ("OID_RT_PRO_ADD_STA_INFO: addr="MAC_FMT"\n", MAC_ARG(macaddr) )); _irqlevel_changed_(&oldirql, LOWER); psta = rtw_get_stainfo(&Adapter->stapriv, macaddr); if (psta == NULL) { // the sta have been in sta_info_queue => do nothing psta = rtw_alloc_stainfo(&Adapter->stapriv, macaddr); if (psta == NULL) { RT_TRACE(_module_rtl871x_ioctl_c_,_drv_err_,("Can't alloc sta_info when OID_RT_PRO_ADD_STA_INFO\n")); status = NDIS_STATUS_FAILURE; } } else { //(between drv has received this event before and fw have not yet to set key to CAM_ENTRY) RT_TRACE(_module_rtl871x_ioctl_c_, _drv_err_, ("Error: OID_RT_PRO_ADD_STA_INFO: sta has been in sta_hash_queue \n")); } _irqlevel_changed_(&oldirql, RAISE); return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; struct sta_info *psta = NULL; UCHAR *macaddr; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = ETH_ALEN; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return NDIS_STATUS_INVALID_LENGTH; macaddr = (UCHAR *) poid_par_priv->information_buf ; RT_TRACE(_module_rtl871x_ioctl_c_,_drv_notice_, ("+OID_RT_PRO_ADD_STA_INFO: addr="MAC_FMT"\n", MAC_ARG(macaddr) )); psta = rtw_get_stainfo(&Adapter->stapriv, macaddr); if (psta != NULL) { _enter_critical(&(Adapter->stapriv.sta_hash_lock), &irqL); rtw_free_stainfo(Adapter, psta); _exit_critical(&(Adapter->stapriv.sta_hash_lock), &irqL); } return status; #else return 0; #endif } //------------------------------------------------------------------------------ #if 0 #include <sdio_osintf.h> static u32 mp_query_drv_var(_adapter *padapter, u8 offset, u32 var) { #ifdef CONFIG_SDIO_HCI if (offset == 1) { u16 tmp_blk_num; tmp_blk_num = rtw_read16(padapter, SDIO_RX0_RDYBLK_NUM); RT_TRACE(_module_mp_, _drv_err_, ("Query Information, mp_query_drv_var SDIO_RX0_RDYBLK_NUM=0x%x dvobj.rxblknum=0x%x\n", tmp_blk_num, adapter_to_dvobj(padapter)->rxblknum)); if (adapter_to_dvobj(padapter)->rxblknum != tmp_blk_num) { RT_TRACE(_module_mp_,_drv_err_, ("Query Information, mp_query_drv_var call recv rx\n")); // sd_recv_rxfifo(padapter); } } #if 0 if(offset <=100){ //For setting data rate and query data rate if(offset==100){ //For query data rate RT_TRACE(_module_mp_, _drv_emerg_, ("\n mp_query_drv_var: offset(%d): query rate=0x%.2x \n",offset,padapter->registrypriv.tx_rate)); var=padapter->registrypriv.tx_rate; } else if(offset<0x1d){ //For setting data rate padapter->registrypriv.tx_rate=offset; var=padapter->registrypriv.tx_rate; padapter->registrypriv.use_rate=_TRUE; RT_TRACE(_module_mp_, _drv_emerg_, ("\n mp_query_drv_var: offset(%d): set rate=0x%.2x \n",offset,padapter->registrypriv.tx_rate)); } else{ //not use the data rate padapter->registrypriv.use_rate=_FALSE; RT_TRACE(_module_mp_, _drv_emerg_, ("\n mp_query_drv_var: offset(%d) out of rate range\n",offset)); } } else if (offset<=110){ //for setting debug level RT_TRACE(_module_mp_, _drv_emerg_, (" mp_query_drv_var: offset(%d) for set debug level\n",offset)); if(offset==110){ //For query data rate RT_TRACE(_module_mp_, _drv_emerg_, (" mp_query_drv_var: offset(%d): query dbg level=0x%.2x \n",offset,padapter->registrypriv.dbg_level)); padapter->registrypriv.dbg_level=GlobalDebugLevel; var=padapter->registrypriv.dbg_level; } else if(offset<110 && offset>100){ RT_TRACE(_module_mp_, _drv_emerg_, (" mp_query_drv_var: offset(%d): set dbg level=0x%.2x \n",offset,offset-100)); padapter->registrypriv.dbg_level=GlobalDebugLevel=offset-100; var=padapter->registrypriv.dbg_level; RT_TRACE(_module_mp_, _drv_emerg_, (" mp_query_drv_var(_drv_emerg_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_alert_, (" mp_query_drv_var(_drv_alert_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_crit_, (" mp_query_drv_var(_drv_crit_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_err_, (" mp_query_drv_var(_drv_err_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_warning_, (" mp_query_drv_var(_drv_warning_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_notice_, (" mp_query_drv_var(_drv_notice_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_info_, (" mp_query_drv_var(_drv_info_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); RT_TRACE(_module_mp_, _drv_debug_, (" mp_query_drv_var(_drv_debug_): offset(%d): set dbg level=0x%.2x \n",offset,GlobalDebugLevel)); } } else if(offset >110 &&offset <116){ if(115==offset){ RT_TRACE(_module_mp_, _drv_emerg_, (" mp_query_drv_var(_drv_emerg_): offset(%d): query TRX access type: [tx_block_mode=%x,rx_block_mode=%x]\n",\ offset, adapter_to_dvobj(padapter)->tx_block_mode, adapter_to_dvobj(padapter)->rx_block_mode)); } else { switch(offset){ case 111: adapter_to_dvobj(padapter)->tx_block_mode=1; adapter_to_dvobj(padapter)->rx_block_mode=1; RT_TRACE(_module_mp_, _drv_emerg_, \ (" mp_query_drv_var(_drv_emerg_): offset(%d): SET TRX access type:(TX block/RX block) [tx_block_mode=%x,rx_block_mode=%x]\n",\ offset, adapter_to_dvobj(padapter)->tx_block_mode, adapter_to_dvobj(padapter)->rx_block_mode)); break; case 112: adapter_to_dvobj(padapter)->tx_block_mode=1; adapter_to_dvobj(padapter)->rx_block_mode=0; RT_TRACE(_module_mp_, _drv_emerg_, \ (" mp_query_drv_var(_drv_emerg_): offset(%d): SET TRX access type:(TX block/RX byte) [tx_block_mode=%x,rx_block_mode=%x]\n",\ offset, adapter_to_dvobj(padapter)->tx_block_mode, adapter_to_dvobj(padapter)->rx_block_mode)); break; case 113: adapter_to_dvobj(padapter)->tx_block_mode=0; adapter_to_dvobj(padapter)->rx_block_mode=1; RT_TRACE(_module_mp_, _drv_emerg_, \ (" mp_query_drv_var(_drv_emerg_): offset(%d): SET TRX access type:(TX byte/RX block) [tx_block_mode=%x,rx_block_mode=%x]\n",\ offset, adapter_to_dvobj(padapter)->tx_block_mode, adapter_to_dvobj(padapter)->rx_block_mode)); break; case 114: adapter_to_dvobj(padapter)->tx_block_mode=0; adapter_to_dvobj(padapter)->rx_block_mode=0; RT_TRACE(_module_mp_, _drv_emerg_, \ (" mp_query_drv_var(_drv_emerg_): offset(%d): SET TRX access type:(TX byte/RX byte) [tx_block_mode=%x,rx_block_mode=%x]\n",\ offset, adapter_to_dvobj(padapter)->tx_block_mode, adapter_to_dvobj(padapter)->rx_block_mode)); break; default : break; } } } else if(offset>=127){ u64 prnt_dbg_comp; u8 chg_idx; u64 tmp_dbg_comp; chg_idx=offset-0x80; tmp_dbg_comp=BIT(chg_idx); prnt_dbg_comp=padapter->registrypriv.dbg_component= GlobalDebugComponents; RT_TRACE(_module_mp_, _drv_emerg_, (" 1: mp_query_drv_var: offset(%d;0x%x):for dbg conpoment prnt_dbg_comp=0x%.16x GlobalDebugComponents=0x%.16x padapter->registrypriv.dbg_component=0x%.16x\n",offset,offset,prnt_dbg_comp,GlobalDebugComponents,padapter->registrypriv.dbg_component)); if(offset==127){ // prnt_dbg_comp=padapter->registrypriv.dbg_component= GlobalDebugComponents; var=(u32)(padapter->registrypriv.dbg_component); RT_TRACE(0xffffffff, _drv_emerg_, ("2: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h) \n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp)); prnt_dbg_comp=GlobalDebugComponents; RT_TRACE(0xffffffff, _drv_emerg_, ("2-1: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h)\n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp)); prnt_dbg_comp=GlobalDebugComponents=padapter->registrypriv.dbg_component; RT_TRACE(0xffffffff, _drv_emerg_, ("2-2: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h)\n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp)); } else{ RT_TRACE(0xffffffff, _drv_emerg_, ("3: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h) chg_idx=%d\n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp,chg_idx)); prnt_dbg_comp=GlobalDebugComponents; RT_TRACE(0xffffffff, _drv_emerg_,("3-1: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h) chg_idx=%d\n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp,chg_idx));// ("3-1: mp_query_drv_var: offset(%d;0x%x):before set dbg conpoment=0x%x chg_idx=%d or0x%x BIT(chg_idx[%d]=0x%x)\n",offset,offset,prnt_dbg_comp,chg_idx,chg_idx,(chg_idx),tmp_dbg_comp) prnt_dbg_comp=GlobalDebugComponents=padapter->registrypriv.dbg_component; RT_TRACE(0xffffffff, _drv_emerg_, ("3-2: mp_query_drv_var: offset(%d;0x%x):for query dbg conpoment=0x%x(l) 0x%x(h) GlobalDebugComponents=0x%x(l) 0x%x(h)\n",offset,offset,padapter->registrypriv.dbg_component,prnt_dbg_comp)); if(GlobalDebugComponents&tmp_dbg_comp){ //this bit is already set, now clear it GlobalDebugComponents=GlobalDebugComponents&(~tmp_dbg_comp); } else{ //this bit is not set, now set it. GlobalDebugComponents =GlobalDebugComponents|tmp_dbg_comp; } RT_TRACE(0xffffffff, _drv_emerg_, ("4: mp_query_drv_var: offset(%d;0x%x):before set dbg conpoment tmp_dbg_comp=0x%x GlobalDebugComponents=0x%x(l) 0x%x(h)",offset,offset,tmp_dbg_comp,prnt_dbg_comp)); prnt_dbg_comp=GlobalDebugComponents; RT_TRACE(0xffffffff, _drv_emerg_, ("4-1: mp_query_drv_var: offset(%d;0x%x):before set dbg conpoment tmp_dbg_comp=0x%x GlobalDebugComponents=0x%x(l) 0x%x(h)",offset,offset,tmp_dbg_comp,prnt_dbg_comp)); RT_TRACE(_module_rtl871x_xmit_c_, _drv_emerg_, ("0: mp_query_drv_var(_module_rtl871x_xmit_c_:0): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,prnt_dbg_comp)); RT_TRACE(_module_xmit_osdep_c_, _drv_emerg_, ("1: mp_query_drv_var(_module_xmit_osdep_c_:1): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_recv_c_, _drv_emerg_, ("2: mp_query_drv_var(_module_rtl871x_recv_c_:2): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_recv_osdep_c_, _drv_emerg_, ("3: mp_query_drv_var(_module_recv_osdep_c_:3): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_emerg_, ("4: mp_query_drv_var(_module_rtl871x_mlme_c_:4): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_mlme_osdep_c_, _drv_emerg_, (" 5:mp_query_drv_var(_module_mlme_osdep_c_:5): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_emerg_, ("6: mp_query_drv_var(_module_rtl871x_sta_mgt_c_:6): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_cmd_c_, _drv_emerg_, ("7: mp_query_drv_var(_module_rtl871x_cmd_c_:7): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_cmd_osdep_c_, _drv_emerg_, ("8: mp_query_drv_var(_module_cmd_osdep_c_:8): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_io_c_, _drv_emerg_, ("9: mp_query_drv_var(_module_rtl871x_io_c_:9): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_io_osdep_c_, _drv_emerg_, ("10: mp_query_drv_var(_module_io_osdep_c_:10): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_os_intfs_c_, _drv_emerg_, ("11: mp_query_drv_var(_module_os_intfs_c_:11): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_security_c_, _drv_emerg_, ("12: mp_query_drv_var(_module_rtl871x_security_c_:12): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_eeprom_c_, _drv_emerg_, ("13: mp_query_drv_var(_module_rtl871x_eeprom_c_:13): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_hal_init_c_, _drv_emerg_, ("14: mp_query_drv_var(_module_hal_init_c_:14): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_hci_hal_init_c_, _drv_emerg_, ("15: mp_query_drv_var(_module_hci_hal_init_c_:15): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_ioctl_c_, _drv_emerg_, ("16: mp_query_drv_var(_module_rtl871x_ioctl_c_:16): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_emerg_, ("17: mp_query_drv_var(_module_rtl871x_ioctl_set_c_:17): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_ioctl_query_c_, _drv_emerg_, ("18: mp_query_drv_var(_module_rtl871x_ioctl_query_c_:18): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_emerg_, ("19: mp_query_drv_var(_module_rtl871x_pwrctrl_c_:19): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_hci_intfs_c_, _drv_emerg_, ("20: mp_query_drv_var(_module_hci_intfs_c_:20): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_hci_ops_c_, _drv_emerg_, ("21: mp_query_drv_var(_module_hci_ops_c_:21): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_osdep_service_c_, _drv_emerg_, ("22: mp_query_drv_var(_module_osdep_service_c_:22): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_mp_, _drv_emerg_, ("23: mp_query_drv_var(_module_mp_:23): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); RT_TRACE(_module_hci_ops_os_c_, _drv_emerg_, ("24: mp_query_drv_var(_module_hci_ops_os_c_:24): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); var=(u32)(GlobalDebugComponents); //GlobalDebugComponents=padapter->registrypriv.dbg_component; RT_TRACE(0xffffffff, _drv_emerg_, (" ==mp_query_drv_var(_module_mp_): offset(%d;0x%x):before set dbg conpoment=0x%x(l) 0x%x(h)\n",offset,offset,GlobalDebugComponents)); } } else{ RT_TRACE(_module_mp_, _drv_emerg_, ("\n mp_query_drv_var: offset(%d) >110\n",offset)); } #endif #endif return var; } #endif NDIS_STATUS oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; DR_VARIABLE_STRUCT *pdrv_var; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = sizeof(DR_VARIABLE_STRUCT); if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return NDIS_STATUS_INVALID_LENGTH; RT_TRACE(_module_mp_, _drv_notice_, ("+Query Information, OID_RT_PRO_QUERY_DR_VARIABLE\n")); pdrv_var = (struct _DR_VARIABLE_STRUCT_ *)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); pdrv_var->variable = mp_query_drv_var(Adapter, pdrv_var->offset, pdrv_var->variable); _irqlevel_changed_(&oldirql, RAISE); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_notice_, ("-oid_rt_pro_query_dr_variable_hdl: offset=0x%x valule=0x%x\n", pdrv_var->offset, pdrv_var->variable)); return status; #else return 0; #endif } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; RT_TRACE(_module_mp_, _drv_err_, ("oid_rt_pro_rx_packet_type_hdl...................\n")); if (poid_par_priv->information_buf_len < sizeof (UCHAR)) { status = NDIS_STATUS_INVALID_LENGTH; *poid_par_priv->bytes_needed = sizeof(UCHAR); return status; } if (poid_par_priv->type_of_oid == SET_OID) { Adapter->mppriv.rx_with_status = *(UCHAR *) poid_par_priv->information_buf; RT_TRACE(_module_rtl871x_ioctl_c_,_drv_err_, ("Query Information, OID_RT_PRO_RX_PACKET_TYPE:%d \n",\ Adapter->mppriv.rx_with_status)); //*(u32 *)&Adapter->eeprompriv.mac_addr[0]=rtw_read32(Adapter, 0x10250050); //*(u16 *)&Adapter->eeprompriv.mac_addr[4]=rtw_read16(Adapter, 0x10250054); RT_TRACE(_module_rtl871x_ioctl_c_,_drv_err_,("MAC addr=0x%x:0x%x:0x%x:0x%x:0x%x:0x%x \n", Adapter->eeprompriv.mac_addr[0],Adapter->eeprompriv.mac_addr[1],Adapter->eeprompriv.mac_addr[2],\ Adapter->eeprompriv.mac_addr[3],Adapter->eeprompriv.mac_addr[4],Adapter->eeprompriv.mac_addr[5])); } else { *(UCHAR *) poid_par_priv->information_buf = Adapter->mppriv.rx_with_status; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_rtl871x_ioctl_c_,_drv_err_, ("Query Information, OID_RT_PRO_RX_PACKET_TYPE:%d \n", \ Adapter->mppriv.rx_with_status)); //*(u32 *)&Adapter->eeprompriv.mac_addr[0]=rtw_read32(Adapter, 0x10250050); //*(u16 *)&Adapter->eeprompriv.mac_addr[4]=rtw_read16(Adapter, 0x10250054); RT_TRACE(_module_rtl871x_ioctl_c_,_drv_err_,("MAC addr=0x%x:0x%x:0x%x:0x%x:0x%x:0x%x \n", Adapter->eeprompriv.mac_addr[0],Adapter->eeprompriv.mac_addr[1],Adapter->eeprompriv.mac_addr[2],\ Adapter->eeprompriv.mac_addr[3],Adapter->eeprompriv.mac_addr[4],Adapter->eeprompriv.mac_addr[5])); } #endif return NDIS_STATUS_SUCCESS; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif PEFUSE_ACCESS_STRUCT pefuse; u8 *data; u16 addr = 0, cnts = 0, max_available_size = 0; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(EFUSE_ACCESS_STRUCT)) return NDIS_STATUS_INVALID_LENGTH; pefuse = (PEFUSE_ACCESS_STRUCT)poid_par_priv->information_buf; addr = pefuse->start_addr; cnts = pefuse->cnts; data = pefuse->data; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_read_efuse_hd: buf_len=%d addr=%d cnts=%d\n", poid_par_priv->information_buf_len, addr, cnts)); EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { RT_TRACE(_module_mp_, _drv_err_, ("!oid_rt_pro_read_efuse_hdl: parameter error!\n")); return NDIS_STATUS_NOT_ACCEPTED; } _irqlevel_changed_(&oldirql, LOWER); if (rtw_efuse_access(Adapter, _FALSE, addr, cnts, data) == _FAIL) { RT_TRACE(_module_mp_, _drv_err_, ("!oid_rt_pro_read_efuse_hdl: rtw_efuse_access FAIL!\n")); status = NDIS_STATUS_FAILURE; } else *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif PEFUSE_ACCESS_STRUCT pefuse; u8 *data; u16 addr = 0, cnts = 0, max_available_size = 0; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; pefuse = (PEFUSE_ACCESS_STRUCT)poid_par_priv->information_buf; addr = pefuse->start_addr; cnts = pefuse->cnts; data = pefuse->data; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_write_efuse_hdl: buf_len=%d addr=0x%04x cnts=%d\n", poid_par_priv->information_buf_len, addr, cnts)); EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { RT_TRACE(_module_mp_, _drv_err_, ("!oid_rt_pro_write_efuse_hdl: parameter error")); return NDIS_STATUS_NOT_ACCEPTED; } _irqlevel_changed_(&oldirql, LOWER); if (rtw_efuse_access(Adapter, _TRUE, addr, cnts, data) == _FAIL) status = NDIS_STATUS_FAILURE; _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif PPGPKT_STRUCT ppgpkt; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; // RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_pro_rw_efuse_pgpkt_hdl\n")); *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < sizeof(PGPKT_STRUCT)) return NDIS_STATUS_INVALID_LENGTH; ppgpkt = (PPGPKT_STRUCT)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); if (poid_par_priv->type_of_oid == QUERY_OID) { RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_rw_efuse_pgpkt_hdl: Read offset=0x%x\n",\ ppgpkt->offset)); Efuse_PowerSwitch(Adapter, _FALSE, _TRUE); if (Efuse_PgPacketRead(Adapter, ppgpkt->offset, ppgpkt->data, _FALSE) == _TRUE) *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; else status = NDIS_STATUS_FAILURE; Efuse_PowerSwitch(Adapter, _FALSE, _FALSE); } else { RT_TRACE(_module_mp_, _drv_notice_, ("oid_rt_pro_rw_efuse_pgpkt_hdl: Write offset=0x%x word_en=0x%x\n",\ ppgpkt->offset, ppgpkt->word_en)); Efuse_PowerSwitch(Adapter, _TRUE, _TRUE); if (Efuse_PgPacketWrite(Adapter, ppgpkt->offset, ppgpkt->word_en, ppgpkt->data, _FALSE) == _TRUE) *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; else status = NDIS_STATUS_FAILURE; Efuse_PowerSwitch(Adapter, _TRUE, _FALSE); } _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_rw_efuse_pgpkt_hdl: status=0x%08X\n", status)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u16 size; u8 ret; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len <sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; _irqlevel_changed_(&oldirql, LOWER); ret = efuse_GetCurrentSize(Adapter, &size); _irqlevel_changed_(&oldirql, RAISE); if (ret == _SUCCESS) { *(u32*)poid_par_priv->information_buf = size; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else status = NDIS_STATUS_FAILURE; _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; *(u32*)poid_par_priv->information_buf = efuse_GetMaxSize(Adapter); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_get_efuse_max_size_hdl: size=%d status=0x%08X\n", *(int*)poid_par_priv->information_buf, status)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status; _func_enter_; RT_TRACE(_module_mp_, _drv_info_, ("+oid_rt_pro_efuse_hdl\n")); if (poid_par_priv->type_of_oid == QUERY_OID) status = oid_rt_pro_read_efuse_hdl(poid_par_priv); else status = oid_rt_pro_write_efuse_hdl(poid_par_priv); RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_efuse_hdl: status=0x%08X\n", status)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u8 *data; NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); u16 mapLen=0; _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_pro_efuse_map_hdl\n")); EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (PVOID)&mapLen, _FALSE); *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < mapLen) return NDIS_STATUS_INVALID_LENGTH; data = (u8*)poid_par_priv->information_buf; _irqlevel_changed_(&oldirql, LOWER); if (poid_par_priv->type_of_oid == QUERY_OID) { RT_TRACE(_module_mp_, _drv_info_, ("oid_rt_pro_efuse_map_hdl: READ\n")); if (rtw_efuse_map_read(Adapter, 0, mapLen, data) == _SUCCESS) *poid_par_priv->bytes_rw = mapLen; else { RT_TRACE(_module_mp_, _drv_err_, ("oid_rt_pro_efuse_map_hdl: READ fail\n")); status = NDIS_STATUS_FAILURE; } } else { // SET_OID RT_TRACE(_module_mp_, _drv_info_, ("oid_rt_pro_efuse_map_hdl: WRITE\n")); if (rtw_efuse_map_write(Adapter, 0, mapLen, data) == _SUCCESS) *poid_par_priv->bytes_rw = mapLen; else { RT_TRACE(_module_mp_, _drv_err_, ("oid_rt_pro_efuse_map_hdl: WRITE fail\n")); status = NDIS_STATUS_FAILURE; } } _irqlevel_changed_(&oldirql, RAISE); RT_TRACE(_module_mp_, _drv_info_, ("-oid_rt_pro_efuse_map_hdl: status=0x%08X\n", status)); _func_exit_; return status; } NDIS_STATUS oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u32 crystal_cap = 0; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len <sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; crystal_cap = *((u32*)poid_par_priv->information_buf);//4 if (crystal_cap > 0xf) return NDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.curr_crystalcap = crystal_cap; _irqlevel_changed_(&oldirql,LOWER); SetCrystalCap(Adapter); _irqlevel_changed_(&oldirql,RAISE); _func_exit_; #endif return status; } NDIS_STATUS oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u8 rx_pkt_type; // u32 rcr_val32; NDIS_STATUS status = NDIS_STATUS_SUCCESS; // PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); _func_enter_; RT_TRACE(_module_mp_, _drv_notice_, ("+oid_rt_set_rx_packet_type_hdl\n")); if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return NDIS_STATUS_INVALID_LENGTH; rx_pkt_type = *((u8*)poid_par_priv->information_buf);//4 RT_TRACE(_module_mp_, _drv_info_, ("rx_pkt_type: %x\n",rx_pkt_type )); #if 0 _irqlevel_changed_(&oldirql, LOWER); #if 0 rcr_val8 = rtw_read8(Adapter, 0x10250048);//RCR rcr_val8 &= ~(RCR_AB|RCR_AM|RCR_APM|RCR_AAP); if(rx_pkt_type == RX_PKT_BROADCAST){ rcr_val8 |= (RCR_AB | RCR_ACRC32 ); } else if(rx_pkt_type == RX_PKT_DEST_ADDR){ rcr_val8 |= (RCR_AAP| RCR_AM |RCR_ACRC32); } else if(rx_pkt_type == RX_PKT_PHY_MATCH){ rcr_val8 |= (RCR_APM|RCR_ACRC32); } else{ rcr_val8 &= ~(RCR_AAP|RCR_APM|RCR_AM|RCR_AB|RCR_ACRC32); } rtw_write8(padapter, 0x10250048,rcr_val8); #else rcr_val32 = rtw_read32(padapter, RCR);//RCR = 0x10250048 rcr_val32 &= ~(RCR_CBSSID|RCR_AB|RCR_AM|RCR_APM|RCR_AAP); #if 0 if(rx_pkt_type == RX_PKT_BROADCAST){ rcr_val32 |= (RCR_AB|RCR_AM|RCR_APM|RCR_AAP|RCR_ACRC32); } else if(rx_pkt_type == RX_PKT_DEST_ADDR){ //rcr_val32 |= (RCR_CBSSID|RCR_AAP|RCR_AM|RCR_ACRC32); rcr_val32 |= (RCR_CBSSID|RCR_APM|RCR_ACRC32); } else if(rx_pkt_type == RX_PKT_PHY_MATCH){ rcr_val32 |= (RCR_APM|RCR_ACRC32); //rcr_val32 |= (RCR_AAP|RCR_ACRC32); } else{ rcr_val32 &= ~(RCR_AAP|RCR_APM|RCR_AM|RCR_AB|RCR_ACRC32); } #else switch (rx_pkt_type) { case RX_PKT_BROADCAST : rcr_val32 |= (RCR_AB|RCR_AM|RCR_APM|RCR_AAP|RCR_ACRC32); break; case RX_PKT_DEST_ADDR : rcr_val32 |= (RCR_AB|RCR_AM|RCR_APM|RCR_AAP|RCR_ACRC32); break; case RX_PKT_PHY_MATCH: rcr_val32 |= (RCR_APM|RCR_ACRC32); break; default: rcr_val32 &= ~(RCR_AAP|RCR_APM|RCR_AM|RCR_AB|RCR_ACRC32); break; } if (rx_pkt_type == RX_PKT_DEST_ADDR) { padapter->mppriv.check_mp_pkt = 1; } else { padapter->mppriv.check_mp_pkt = 0; } #endif rtw_write32(padapter, RCR, rcr_val32); #endif _irqlevel_changed_(&oldirql, RAISE); #endif _func_exit_; return status; } NDIS_STATUS oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); #ifdef PLATFORM_OS_XP _irqL oldirql; #endif NDIS_STATUS status = NDIS_STATUS_SUCCESS; u32 txagc; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; txagc = *(u32*)poid_par_priv->information_buf; RT_TRACE(_module_mp_, _drv_info_, ("oid_rt_pro_set_tx_agc_offset_hdl: 0x%08x\n", txagc)); _irqlevel_changed_(&oldirql, LOWER); SetTxAGCOffset(Adapter, txagc); _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; #else return 0; #endif } NDIS_STATUS oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv) { #if 0 PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); NDIS_STATUS status = NDIS_STATUS_SUCCESS; struct mlme_priv *pmlmepriv = &Adapter->mlmepriv; struct mp_priv *pmppriv = &Adapter->mppriv; u32 type; _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) return NDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len <sizeof(u32)) return NDIS_STATUS_INVALID_LENGTH; type = *(u32*)poid_par_priv->information_buf; if (_LOOPBOOK_MODE_ == type) { pmppriv->mode = type; set_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE); //append txdesc RT_TRACE(_module_mp_, _drv_info_, ("test mode change to loopback mode:0x%08x.\n", get_fwstate(pmlmepriv))); } else if (_2MAC_MODE_ == type){ pmppriv->mode = type; _clr_fwstate_(pmlmepriv, WIFI_MP_LPBK_STATE); RT_TRACE(_module_mp_, _drv_info_, ("test mode change to 2mac mode:0x%08x.\n", get_fwstate(pmlmepriv))); } else status = NDIS_STATUS_NOT_ACCEPTED; _func_exit_; return status; #else return 0; #endif } unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv) { PMP_XMIT_PARM pparm; PADAPTER padapter; struct mp_priv *pmp_priv; struct pkt_attrib *pattrib; RT_TRACE(_module_mp_, _drv_notice_, ("+%s\n", __func__)); pparm = (PMP_XMIT_PARM)poid_par_priv->information_buf; padapter = (PADAPTER)poid_par_priv->adapter_context; pmp_priv = &padapter->mppriv; if (poid_par_priv->type_of_oid == QUERY_OID) { pparm->enable = !pmp_priv->tx.stop; pparm->count = pmp_priv->tx.sended; } else { if (pparm->enable == 0) { pmp_priv->tx.stop = 1; } else if (pmp_priv->tx.stop == 1) { pmp_priv->tx.stop = 0; pmp_priv->tx.count = pparm->count; pmp_priv->tx.payload = pparm->payload_type; pattrib = &pmp_priv->tx.attrib; pattrib->pktlen = pparm->length; _rtw_memcpy(pattrib->dst, pparm->da, ETH_ALEN); SetPacketTx(padapter); } else return NDIS_STATUS_FAILURE; } return NDIS_STATUS_SUCCESS; } #if 0 unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv) { unsigned char *pframe, *pmp_pkt; struct ethhdr *pethhdr; struct pkt_attrib *pattrib; struct rtw_ieee80211_hdr *pwlanhdr; unsigned short *fctrl; int llc_sz, payload_len; struct mp_xmit_frame *pxframe= NULL; struct mp_xmit_packet *pmp_xmitpkt = (struct mp_xmit_packet*)param; u8 addr3[] = {0x02, 0xE0, 0x4C, 0x87, 0x66, 0x55}; // DBG_871X("+mp_ioctl_xmit_packet_hdl\n"); pxframe = alloc_mp_xmitframe(&padapter->mppriv); if (pxframe == NULL) { DEBUG_ERR(("Can't alloc pmpframe %d:%s\n", __LINE__, __FILE__)); return -1; } //mp_xmit_pkt payload_len = pmp_xmitpkt->len - 14; pmp_pkt = (unsigned char*)pmp_xmitpkt->mem; pethhdr = (struct ethhdr *)pmp_pkt; //DBG_871X("payload_len=%d, pkt_mem=0x%x\n", pmp_xmitpkt->len, (void*)pmp_xmitpkt->mem); //DBG_871X("pxframe=0x%x\n", (void*)pxframe); //DBG_871X("pxframe->mem=0x%x\n", (void*)pxframe->mem); //update attribute pattrib = &pxframe->attrib; memset((u8 *)(pattrib), 0, sizeof (struct pkt_attrib)); pattrib->pktlen = pmp_xmitpkt->len; pattrib->ether_type = ntohs(pethhdr->h_proto); pattrib->hdrlen = 24; pattrib->nr_frags = 1; pattrib->priority = 0; #ifndef CONFIG_MP_LINUX if(IS_MCAST(pethhdr->h_dest)) pattrib->mac_id = 4; else pattrib->mac_id = 5; #else pattrib->mac_id = 5; #endif // memset(pxframe->mem, 0 , WLANHDR_OFFSET); pframe = (u8 *)(pxframe->mem) + WLANHDR_OFFSET; pwlanhdr = (struct rtw_ieee80211_hdr *)pframe; fctrl = &(pwlanhdr->frame_ctl); *(fctrl) = 0; SetFrameSubType(pframe, WIFI_DATA); _rtw_memcpy(pwlanhdr->addr1, pethhdr->h_dest, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pethhdr->h_source, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, addr3, ETH_ALEN); pwlanhdr->seq_ctl = 0; pframe += pattrib->hdrlen; llc_sz= rtw_put_snap(pframe, pattrib->ether_type); pframe += llc_sz; _rtw_memcpy(pframe, (void*)(pmp_pkt+14), payload_len); pattrib->last_txcmdsz = pattrib->hdrlen + llc_sz + payload_len; DEBUG_INFO(("issuing mp_xmit_frame, tx_len=%d, ether_type=0x%x\n", pattrib->last_txcmdsz, pattrib->ether_type)); xmit_mp_frame(padapter, pxframe); return _SUCCESS; } #endif //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv) { #ifdef PLATFORM_OS_XP _irqL oldirql; #endif u8 bpwrup; NDIS_STATUS status = NDIS_STATUS_SUCCESS; #ifdef PLATFORM_LINUX #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) PADAPTER padapter = (PADAPTER)(poid_par_priv->adapter_context); #endif #endif _func_enter_; if (poid_par_priv->type_of_oid != SET_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } RT_TRACE(_module_mp_, _drv_info_, ("\n ===> Setoid_rt_set_power_down_hdl.\n")); _irqlevel_changed_(&oldirql, LOWER); bpwrup = *(u8 *)poid_par_priv->information_buf; //CALL the power_down function #ifdef PLATFORM_LINUX #if defined(CONFIG_RTL8712) //Linux MP insmod unknown symbol dev_power_down(padapter,bpwrup); #endif #endif _irqlevel_changed_(&oldirql, RAISE); //DEBUG_ERR(("\n <=== Query OID_RT_PRO_READ_REGISTER. // Add:0x%08x Width:%d Value:0x%08x\n",RegRWStruct->offset,RegRWStruct->width,RegRWStruct->value)); _func_exit_; return status; } //------------------------------------------------------------------------------ NDIS_STATUS oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv) { #if 0 NDIS_STATUS status = NDIS_STATUS_SUCCESS; PADAPTER Adapter = (PADAPTER)(poid_par_priv->adapter_context); //#ifdef PLATFORM_OS_XP // _irqL oldirql; //#endif _func_enter_; if (poid_par_priv->type_of_oid != QUERY_OID) { status = NDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len < sizeof(u32)) { status = NDIS_STATUS_INVALID_LENGTH; return status; } RT_TRACE(_module_mp_, _drv_info_, ("\n ===> oid_rt_get_power_mode_hdl.\n")); // _irqlevel_changed_(&oldirql, LOWER); *(int*)poid_par_priv->information_buf = Adapter->registrypriv.low_power ? POWER_LOW : POWER_NORMAL; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; // _irqlevel_changed_(&oldirql, RAISE); _func_exit_; return status; #else return 0; #endif }
gpl-2.0
Red--Code/mt6589_kernel_3.4.67
fs/eventpoll.c
139
54766
/* * fs/eventpoll.c (Efficient event retrieval implementation) * Copyright (C) 2001,...,2009 Davide Libenzi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Davide Libenzi <davidel@xmailserver.org> * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/string.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/rbtree.h> #include <linux/wait.h> #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/anon_inodes.h> #include <linux/device.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/mman.h> #include <linux/atomic.h> /* * LOCKING: * There are three level of locking required by epoll : * * 1) epmutex (mutex) * 2) ep->mtx (mutex) * 3) ep->lock (spinlock) * * The acquire order is the one listed above, from 1 to 3. * We need a spinlock (ep->lock) because we manipulate objects * from inside the poll callback, that might be triggered from * a wake_up() that in turn might be called from IRQ context. * So we can't sleep inside the poll callback and hence we need * a spinlock. During the event transfer loop (from kernel to * user space) we could end up sleeping due a copy_to_user(), so * we need a lock that will allow us to sleep. This lock is a * mutex (ep->mtx). It is acquired during the event transfer loop, * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). * Then we also need a global mutex to serialize eventpoll_release_file() * and ep_free(). * This mutex is acquired by ep_free() during the epoll file * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL). * It is also acquired when inserting an epoll fd onto another epoll * fd. We do this so that we walk the epoll tree and ensure that this * insertion does not create a cycle of epoll file descriptors, which * could lead to deadlock. We need a global mutex to prevent two * simultaneous inserts (A into B and B into A) from racing and * constructing a cycle without either insert observing that it is * going to. * It is necessary to acquire multiple "ep->mtx"es at once in the * case when one epoll fd is added to another. In this case, we * always acquire the locks in the order of nesting (i.e. after * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired * before e2->mtx). Since we disallow cycles of epoll file * descriptors, this ensures that the mutexes are well-ordered. In * order to communicate this nesting to lockdep, when walking a tree * of epoll file descriptors, we use the current recursion depth as * the lockdep subkey. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->mtx" will guarantee * a better scalability. */ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET) /* Maximum number of nesting allowed inside epoll sets */ #define EP_MAX_NESTS 4 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) #define EP_UNACTIVE_PTR ((void *) -1L) #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) struct epoll_filefd { struct file *file; int fd; }; /* * Structure used to track possible nested calls, for too deep recursions * and loop cycles. */ struct nested_call_node { struct list_head llink; void *cookie; void *ctx; }; /* * This structure is used as collector for nested calls, to check for * maximum recursion dept and loop cycles. */ struct nested_calls { struct list_head tasks_call_list; spinlock_t lock; }; /* * Each file descriptor added to the eventpoll interface will * have an entry of this type linked to the "rbr" RB tree. */ struct epitem { /* RB tree node used to link this structure to the eventpoll RB tree */ struct rb_node rbn; /* List header used to link this structure to the eventpoll ready list */ struct list_head rdllink; /* * Works together "struct eventpoll"->ovflist in keeping the * single linked chain of items. */ struct epitem *next; /* The file descriptor information this item refers to */ struct epoll_filefd ffd; /* Number of active wait queue attached to poll operations */ int nwait; /* List containing poll wait queues */ struct list_head pwqlist; /* The "container" of this item */ struct eventpoll *ep; /* List header used to link this item to the "struct file" items list */ struct list_head fllink; /* wakeup_source used when EPOLLWAKEUP is set */ struct wakeup_source *ws; /* The structure that describe the interested events and the source fd */ struct epoll_event event; }; /* * This structure is stored inside the "private_data" member of the file * structure and represents the main data structure for the eventpoll * interface. */ struct eventpoll { /* Protect the access to this structure */ spinlock_t lock; /* * This mutex is used to ensure that files are not removed * while epoll is using them. This is held during the event * collection loop, the file cleanup path, the epoll file exit * code and the ctl operations. */ struct mutex mtx; /* Wait queue used by sys_epoll_wait() */ wait_queue_head_t wq; /* Wait queue used by file->poll() */ wait_queue_head_t poll_wait; /* List of ready file descriptors */ struct list_head rdllist; /* RB tree root used to store monitored fd structs */ struct rb_root rbr; /* * This is a single linked list that chains all the "struct epitem" that * happened while transferring ready events to userspace w/out * holding ->lock. */ struct epitem *ovflist; /* wakeup_source used when ep_scan_ready_list is running */ struct wakeup_source *ws; /* The user that created the eventpoll descriptor */ struct user_struct *user; struct file *file; /* used to optimize loop detection check */ int visited; struct list_head visited_list_link; }; /* Wait structure used by the poll hooks */ struct eppoll_entry { /* List header used to link this structure to the "struct epitem" */ struct list_head llink; /* The "base" pointer is set to the container "struct epitem" */ struct epitem *base; /* * Wait queue item that will be linked to the target file wait * queue head. */ wait_queue_t wait; /* The wait queue head that linked the "wait" wait queue item */ wait_queue_head_t *whead; }; /* Wrapper struct used by poll queueing */ struct ep_pqueue { poll_table pt; struct epitem *epi; }; /* Used by the ep_send_events() function as callback private data */ struct ep_send_events_data { int maxevents; struct epoll_event __user *events; }; /* * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ static long max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). */ static DEFINE_MUTEX(epmutex); /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; /* Used to call file's f_op->poll() under the nested calls boundaries */ static struct nested_calls poll_readywalk_ncalls; /* Slab cache used to allocate "struct epitem" */ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; /* Visited nodes during ep_loop_check(), so we can unset them when we finish */ static LIST_HEAD(visited_list); /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. */ static LIST_HEAD(tfile_check_list); #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long zero; static long long_max = LONG_MAX; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, .maxlen = sizeof(max_user_watches), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &zero, .extra2 = &long_max, }, { } }; #endif /* CONFIG_SYSCTL */ static const struct file_operations eventpoll_fops; static inline int is_file_epoll(struct file *f) { return f->f_op == &eventpoll_fops; } /* Setup the structure that is used as key for the RB tree */ static inline void ep_set_ffd(struct epoll_filefd *ffd, struct file *file, int fd) { ffd->file = file; ffd->fd = fd; } /* Compare RB tree keys */ static inline int ep_cmp_ffd(struct epoll_filefd *p1, struct epoll_filefd *p2) { return (p1->file > p2->file ? +1: (p1->file < p2->file ? -1 : p1->fd - p2->fd)); } /* Tells us if the item is currently linked */ static inline int ep_is_linked(struct list_head *p) { return !list_empty(p); } static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait); } /* Get the "struct epitem" from a wait queue pointer */ static inline struct epitem *ep_item_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait)->base; } /* Get the "struct epitem" from an epoll queue wrapper */ static inline struct epitem *ep_item_from_epqueue(poll_table *p) { return container_of(p, struct ep_pqueue, pt)->epi; } /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ static inline int ep_op_has_event(int op) { return op != EPOLL_CTL_DEL; } /* Initialize the poll safe wake up structure */ static void ep_nested_calls_init(struct nested_calls *ncalls) { INIT_LIST_HEAD(&ncalls->tasks_call_list); spin_lock_init(&ncalls->lock); } /** * ep_events_available - Checks if ready events might be available. * * @ep: Pointer to the eventpoll context. * * Returns: Returns a value different than zero if ready events are available, * or zero otherwise. */ static inline int ep_events_available(struct eventpoll *ep) { return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; } /** * ep_call_nested - Perform a bound (possibly) nested call, by checking * that the recursion limit is not exceeded, and that * the same nested call (by the meaning of same cookie) is * no re-entered. * * @ncalls: Pointer to the nested_calls structure to be used for this call. * @max_nests: Maximum number of allowed nesting calls. * @nproc: Nested call core function pointer. * @priv: Opaque data to be passed to the @nproc callback. * @cookie: Cookie to be used to identify this nested call. * @ctx: This instance context. * * Returns: Returns the code returned by the @nproc callback, or -1 if * the maximum recursion limit has been exceeded. */ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, int (*nproc)(void *, void *, int), void *priv, void *cookie, void *ctx) { int error, call_nests = 0; unsigned long flags; struct list_head *lsthead = &ncalls->tasks_call_list; struct nested_call_node *tncur; struct nested_call_node tnode; spin_lock_irqsave(&ncalls->lock, flags); /* * Try to see if the current task is already inside this wakeup call. * We use a list here, since the population inside this set is always * very much limited. */ list_for_each_entry(tncur, lsthead, llink) { if (tncur->ctx == ctx && (tncur->cookie == cookie || ++call_nests > max_nests)) { /* * Ops ... loop detected or maximum nest level reached. * We abort this wake by breaking the cycle itself. */ error = -1; goto out_unlock; } } /* Add the current task and cookie to the list */ tnode.ctx = ctx; tnode.cookie = cookie; list_add(&tnode.llink, lsthead); spin_unlock_irqrestore(&ncalls->lock, flags); /* Call the nested function */ error = (*nproc)(priv, cookie, call_nests); /* Remove the current task from the list */ spin_lock_irqsave(&ncalls->lock, flags); list_del(&tnode.llink); out_unlock: spin_unlock_irqrestore(&ncalls->lock, flags); return error; } /* * As described in commit 0ccf831cb lockdep: annotate epoll * the use of wait queues used by epoll is done in a very controlled * manner. Wake ups can nest inside each other, but are never done * with the same locking. For example: * * dfd = socket(...); * efd1 = epoll_create(); * efd2 = epoll_create(); * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); * * When a packet arrives to the device underneath "dfd", the net code will * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a * callback wakeup entry on that queue, and the wake_up() performed by the * "dfd" net code will end up in ep_poll_callback(). At this point epoll * (efd1) notices that it may have some event ready, so it needs to wake up * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() * that ends up in another wake_up(), after having checked about the * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to * avoid stack blasting. * * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle * this special case of epoll. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { unsigned long flags; spin_lock_irqsave_nested(&wqueue->lock, flags, subclass); wake_up_locked_poll(wqueue, events); spin_unlock_irqrestore(&wqueue->lock, flags); } #else static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { wake_up_poll(wqueue, events); } #endif static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) { ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN, 1 + call_nests); return 0; } /* * Perform a safe wake up of the poll wait list. The problem is that * with the new callback'd wake up system, it is possible that the * poll callback is reentered from inside the call to wake_up() done * on the poll wait queue head. The rule is that we cannot reenter the * wake up code from the same task more than EP_MAX_NESTS times, * and we cannot reenter the same wait queue head at all. This will * enable to have a hierarchy of epoll file descriptor of no more than * EP_MAX_NESTS deep. */ static void ep_poll_safewake(wait_queue_head_t *wq) { int this_cpu = get_cpu(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); put_cpu(); } static void ep_remove_wait_queue(struct eppoll_entry *pwq) { wait_queue_head_t *whead; rcu_read_lock(); /* If it is cleared by POLLFREE, it should be rcu-safe */ whead = rcu_dereference(pwq->whead); if (whead) remove_wait_queue(whead, &pwq->wait); rcu_read_unlock(); } /* * This function unregisters poll callbacks from the associated file * descriptor. Must be called with "mtx" held (or "epmutex" if called from * ep_free). */ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) { struct list_head *lsthead = &epi->pwqlist; struct eppoll_entry *pwq; while (!list_empty(lsthead)) { pwq = list_first_entry(lsthead, struct eppoll_entry, llink); list_del(&pwq->llink); ep_remove_wait_queue(pwq); kmem_cache_free(pwq_cache, pwq); } } /** * ep_scan_ready_list - Scans the ready list in a way that makes possible for * the scan code, to call f_op->poll(). Also allows for * O(NumReady) performance. * * @ep: Pointer to the epoll private data structure. * @sproc: Pointer to the scan callback. * @priv: Private opaque data passed to the @sproc callback. * @depth: The current depth of recursive f_op->poll calls. * * Returns: The same integer error code returned by the @sproc callback. */ static int ep_scan_ready_list(struct eventpoll *ep, int (*sproc)(struct eventpoll *, struct list_head *, void *), void *priv, int depth) { int error, pwake = 0; unsigned long flags; struct epitem *epi, *nepi; LIST_HEAD(txlist); /* * We need to lock this because we could be hit by * eventpoll_release_file() and epoll_ctl(). */ mutex_lock_nested(&ep->mtx, depth); /* * Steal the ready list, and re-init the original one to the * empty list. Also, set ep->ovflist to NULL so that events * happening while looping w/out locks, are not lost. We cannot * have the poll callback to queue directly on ep->rdllist, * because we want the "sproc" callback to be able to do it * in a lockless way. */ spin_lock_irqsave(&ep->lock, flags); list_splice_init(&ep->rdllist, &txlist); ep->ovflist = NULL; spin_unlock_irqrestore(&ep->lock, flags); /* * Now call the callback function. */ error = (*sproc)(ep, &txlist, priv); spin_lock_irqsave(&ep->lock, flags); /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. * We re-insert them inside the main ready-list here. */ for (nepi = ep->ovflist; (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * We need to check if the item is already in the list. * During the "sproc" callback execution time, items are * queued into ->ovflist but the "txlist" might already * contain them, and the list_splice() below takes care of them. */ if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } } /* * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after * releasing the lock, events will be queued in the normal way inside * ep->rdllist. */ ep->ovflist = EP_UNACTIVE_PTR; /* * Quickly re-inject items left on "txlist". */ list_splice(&txlist, &ep->rdllist); __pm_relax(ep->ws); if (!list_empty(&ep->rdllist)) { /* * Wake up (if active) both the eventpoll wait list and * the ->poll() wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); mutex_unlock(&ep->mtx); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return error; } /* * Removes a "struct epitem" from the eventpoll RB tree and deallocates * all the associated resources. Must be called with "mtx" held. */ static int ep_remove(struct eventpoll *ep, struct epitem *epi) { unsigned long flags; struct file *file = epi->ffd.file; /* * Removes poll wait queue hooks. We _have_ to do this without holding * the "ep->lock" otherwise a deadlock might occur. This because of the * sequence of the lock acquisition. Here we do "ep->lock" then the wait * queue head lock when unregistering the wait queue. The wakeup callback * will run by holding the wait queue head lock and will call our callback * that will try to get "ep->lock". */ ep_unregister_pollwait(ep, epi); /* Remove the current item from the list of epoll hooks */ spin_lock(&file->f_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&file->f_lock); rb_erase(&epi->rbn, &ep->rbr); spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); wakeup_source_unregister(epi->ws); /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); atomic_long_dec(&ep->user->epoll_watches); return 0; } static void ep_free(struct eventpoll *ep) { struct rb_node *rbp; struct epitem *epi; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) ep_poll_safewake(&ep->poll_wait); /* * We need to lock this because we could be hit by * eventpoll_release_file() while we're freeing the "struct eventpoll". * We do not need to hold "ep->mtx" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but * holding "epmutex" is sufficient here. */ mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. */ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); ep_unregister_pollwait(ep, epi); } /* * Walks through the whole tree by freeing each "struct epitem". At this * point we are sure no poll callbacks will be lingering around, and also by * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); free_uid(ep->user); wakeup_source_unregister(ep->ws); kfree(ep); } static int ep_eventpoll_release(struct inode *inode, struct file *file) { struct eventpoll *ep = file->private_data; if (ep) ep_free(ep); return 0; } static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct epitem *epi, *tmp; poll_table pt; init_poll_funcptr(&pt, NULL); list_for_each_entry_safe(epi, tmp, head, rdllink) { pt._key = epi->event.events; if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events) return POLLIN | POLLRDNORM; else { /* * Item has been dropped into the ready list by the poll * callback, but it's not actually ready, as far as * caller requested events goes. We can remove it here. */ __pm_relax(epi->ws); list_del_init(&epi->rdllink); } } return 0; } static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests) { return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1); } static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) { int pollflags; struct eventpoll *ep = file->private_data; /* Insert inside our poll wait queue */ poll_wait(file, &ep->poll_wait, wait); /* * Proceed to find out if wanted events are really available inside * the ready list. This need to be done under ep_call_nested() * supervision, since the call to f_op->poll() done on listed files * could re-enter here. */ pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, ep_poll_readyevents_proc, ep, ep, current); return pollflags != -1 ? pollflags : 0; } /* File callbacks that implement the eventpoll file behaviour */ static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_release, .poll = ep_eventpoll_poll, .llseek = noop_llseek, }; /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are * closed without being removed from the eventpoll interface. */ void eventpoll_release_file(struct file *file) { struct list_head *lsthead = &file->f_ep_links; struct eventpoll *ep; struct epitem *epi; /* * We don't want to get "file->f_lock" because it is not * necessary. It is not necessary because we're in the "struct file" * cleanup path, and this means that no one is using this file anymore. * So, for example, epoll_ctl() cannot hit here since if we reach this * point, the file counter already went to zero and fget() would fail. * The only hit might come from ep_free() but by holding the mutex * will correctly serialize the operation. We do need to acquire * "ep->mtx" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). * * Besides, ep_remove() acquires the lock, so we can't hold it here. */ mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_first_entry(lsthead, struct epitem, fllink); ep = epi->ep; list_del_init(&epi->fllink); mutex_lock_nested(&ep->mtx, 0); ep_remove(ep, epi); mutex_unlock(&ep->mtx); } mutex_unlock(&epmutex); } static int ep_alloc(struct eventpoll **pep) { int error; struct user_struct *user; struct eventpoll *ep; user = get_current_user(); error = -ENOMEM; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (unlikely(!ep)) goto free_uid; spin_lock_init(&ep->lock); mutex_init(&ep->mtx); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); INIT_LIST_HEAD(&ep->rdllist); ep->rbr = RB_ROOT; ep->ovflist = EP_UNACTIVE_PTR; ep->user = user; *pep = ep; return 0; free_uid: free_uid(user); return error; } /* * Search the file inside the eventpoll tree. The RB tree operations * are protected by the "mtx" mutex, and ep_find() must be called with * "mtx" held. */ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) { int kcmp; struct rb_node *rbp; struct epitem *epi, *epir = NULL; struct epoll_filefd ffd; ep_set_ffd(&ffd, file, fd); for (rbp = ep->rbr.rb_node; rbp; ) { epi = rb_entry(rbp, struct epitem, rbn); kcmp = ep_cmp_ffd(&ffd, &epi->ffd); if (kcmp > 0) rbp = rbp->rb_right; else if (kcmp < 0) rbp = rbp->rb_left; else { epir = epi; break; } } return epir; } /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they * have events to report. */ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; if ((unsigned long)key & POLLFREE) { ep_pwq_from_wait(wait)->whead = NULL; /* * whead = NULL above can race with ep_remove_wait_queue() * which can do another remove_wait_queue() after us, so we * can't use __remove_wait_queue(). whead->lock is held by * the caller. */ list_del_init(&wait->task_list); } spin_lock_irqsave(&ep->lock, flags); /* * If the event mask does not contain any poll(2) event, we consider the * descriptor to be disabled. This condition is likely the effect of the * EPOLLONESHOT bit that disables the descriptor when an event is received, * until the next EPOLL_CTL_MOD will be issued. */ if (!(epi->event.events & ~EP_PRIVATE_BITS)) goto out_unlock; /* * Check the events coming with the callback. At this stage, not * every device reports the events in the "key" parameter of the * callback. We need to be able to handle both cases here, hence the * test for "key" != NULL before the event match test. */ if (key && !((unsigned long) key & epi->event.events)) goto out_unlock; /* * If we are transferring events to userspace, we can hold no locks * (because we're accessing user memory, and because of linux f_op->poll() * semantics). All the events that happen during that period of time are * chained in ep->ovflist and requeued later on. */ if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { if (epi->next == EP_UNACTIVE_PTR) { epi->next = ep->ovflist; ep->ovflist = epi; if (epi->ws) { /* * Activate ep->ws since epi->ws may get * deactivated at any time. */ __pm_stay_awake(ep->ws); } } goto out_unlock; } /* If this file is already in the ready list we exit soon */ if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } /* * Wake up ( if active ) both the eventpoll wait list and the ->poll() * wait list. */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; out_unlock: spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 1; } /* * This is the callback that is used to add our wait queue to the * target file wakeup lists. */ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt) { struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; add_wait_queue(whead, &pwq->wait); list_add_tail(&pwq->llink, &epi->pwqlist); epi->nwait++; } else { /* We have to signal that an error occurred */ epi->nwait = -1; } } static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) { int kcmp; struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; struct epitem *epic; while (*p) { parent = *p; epic = rb_entry(parent, struct epitem, rbn); kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); if (kcmp > 0) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&epi->rbn, parent, p); rb_insert_color(&epi->rbn, &ep->rbr); } #define PATH_ARR_SIZE 5 /* * These are the number paths of length 1 to 5, that we are allowing to emanate * from a single file of interest. For example, we allow 1000 paths of length * 1, to emanate from each file of interest. This essentially represents the * potential wakeup paths, which need to be limited in order to avoid massive * uncontrolled wakeup storms. The common use case should be a single ep which * is connected to n file sources. In this case each file source has 1 path * of length 1. Thus, the numbers below should be more than sufficient. These * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify * and delete can't add additional paths. Protected by the epmutex. */ static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 }; static int path_count[PATH_ARR_SIZE]; static int path_count_inc(int nests) { /* Allow an arbitrary number of depth 1 paths */ if (nests == 0) return 0; if (++path_count[nests] > path_limits[nests]) return -1; return 0; } static void path_count_init(void) { int i; for (i = 0; i < PATH_ARR_SIZE; i++) path_count[i] = 0; } static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) { int error = 0; struct file *file = priv; struct file *child_file; struct epitem *epi; list_for_each_entry(epi, &file->f_ep_links, fllink) { child_file = epi->ep->file; if (is_file_epoll(child_file)) { if (list_empty(&child_file->f_ep_links)) { if (path_count_inc(call_nests)) { error = -1; break; } } else { error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, child_file, child_file, current); } if (error != 0) break; } else { printk(KERN_ERR "reverse_path_check_proc: " "file is not an ep!\n"); } } return error; } /** * reverse_path_check - The tfile_check_list is list of file *, which have * links that are proposed to be newly added. We need to * make sure that those added links don't add too many * paths such that we will spend all our time waking up * eventpoll objects. * * Returns: Returns zero if the proposed links don't create too many paths, * -1 otherwise. */ static int reverse_path_check(void) { int error = 0; struct file *current_file; /* let's call this for all tfiles */ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { path_count_init(); error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, current_file, current_file, current); if (error) break; } return error; } static int ep_create_wakeup_source(struct epitem *epi) { const char *name; if (!epi->ep->ws) { epi->ep->ws = wakeup_source_register("eventpoll"); if (!epi->ep->ws) return -ENOMEM; } name = epi->ffd.file->f_path.dentry->d_name.name; epi->ws = wakeup_source_register(name); if (!epi->ws) return -ENOMEM; return 0; } static void ep_destroy_wakeup_source(struct epitem *epi) { wakeup_source_unregister(epi->ws); epi->ws = NULL; } /* * Must be called with "mtx" held. */ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { int error, revents, pwake = 0; unsigned long flags; long user_watches; struct epitem *epi; struct ep_pqueue epq; user_watches = atomic_long_read(&ep->user->epoll_watches); if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); epi->ep = ep; ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; epi->nwait = 0; epi->next = EP_UNACTIVE_PTR; if (epi->event.events & EPOLLWAKEUP) { error = ep_create_wakeup_source(epi); if (error) goto error_create_wakeup_source; } else { epi->ws = NULL; } /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); epq.pt._key = event->events; /* * Attach the item to the poll hooks and get current event bits. * We can safely use the file* here because its usage count has * been increased by the caller of this function. Note that after * this operation completes, the poll callback can start hitting * the new item. */ revents = tfile->f_op->poll(tfile, &epq.pt); /* * We have to check if something went wrong during the poll wait queue * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (reverse_path_check()) goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); /* If the file is already "ready" we drop it inside the ready list */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; error_remove_epi: spin_lock(&tfile->f_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&tfile->f_lock); rb_erase(&epi->rbn, &ep->rbr); error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); wakeup_source_unregister(epi->ws); error_create_wakeup_source: kmem_cache_free(epi_cache, epi); return error; } /* * Modify the interest event mask by dropping an event if the new mask * has a match in the current file status. Must be called with "mtx" held. */ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) { int pwake = 0; unsigned int revents; poll_table pt; init_poll_funcptr(&pt, NULL); /* * Set the new event interest mask before calling f_op->poll(); * otherwise we might miss an event that happens between the * f_op->poll() call and the new event set registering. */ epi->event.events = event->events; pt._key = event->events; epi->event.data = event->data; /* protected by mtx */ if (epi->event.events & EPOLLWAKEUP) { if (!epi->ws) ep_create_wakeup_source(epi); } else if (epi->ws) { ep_destroy_wakeup_source(epi); } /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt); /* * If the item is "hot" and it is not registered inside the ready * list, push it inside. */ if (revents & event->events) { spin_lock_irq(&ep->lock); if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irq(&ep->lock); } /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; } static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct ep_send_events_data *esed = priv; int eventcnt; unsigned int revents; struct epitem *epi; struct epoll_event __user *uevent; poll_table pt; init_poll_funcptr(&pt, NULL); /* * We can loop without lock because we are passed a task private list. * Items cannot vanish during the loop because ep_scan_ready_list() is * holding "mtx" during this call. */ for (eventcnt = 0, uevent = esed->events; !list_empty(head) && eventcnt < esed->maxevents;) { epi = list_first_entry(head, struct epitem, rdllink); /* * Activate ep->ws before deactivating epi->ws to prevent * triggering auto-suspend here (in case we reactive epi->ws * below). * * This could be rearranged to delay the deactivation of epi->ws * instead, but then epi->ws would temporarily be out of sync * with ep_is_linked(). */ if (epi->ws && epi->ws->active) __pm_stay_awake(ep->ws); __pm_relax(epi->ws); list_del_init(&epi->rdllink); pt._key = epi->event.events; revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events; /* * If the event mask intersect the caller-requested one, * deliver the event to userspace. Again, ep_scan_ready_list() * is holding "mtx", so no operations coming from userspace * can change the item. */ if (revents) { if (__put_user(revents, &uevent->events) || __put_user(epi->event.data, &uevent->data)) { list_add(&epi->rdllink, head); __pm_stay_awake(epi->ws); return eventcnt ? eventcnt : -EFAULT; } eventcnt++; uevent++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; else if (!(epi->event.events & EPOLLET)) { /* * If this file has been added with Level * Trigger mode, we need to insert back inside * the ready list, so that the next call to * epoll_wait() will check again the events * availability. At this point, no one can insert * into ep->rdllist besides us. The epoll_ctl() * callers are locked out by * ep_scan_ready_list() holding "mtx" and the * poll callback will queue them in ep->ovflist. */ list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } } } return eventcnt; } static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) { struct ep_send_events_data esed; esed.maxevents = maxevents; esed.events = events; return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0); } static inline struct timespec ep_set_mstimeout(long ms) { struct timespec now, ts = { .tv_sec = ms / MSEC_PER_SEC, .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), }; ktime_get_ts(&now); return timespec_add_safe(now, ts); } /** * ep_poll - Retrieves ready events, and delivers them to the caller supplied * event buffer. * * @ep: Pointer to the eventpoll context. * @events: Pointer to the userspace buffer where the ready events should be * stored. * @maxevents: Size (in terms of number of events) of the caller event buffer. * @timeout: Maximum timeout for the ready events fetch operation, in * milliseconds. If the @timeout is zero, the function will not block, * while if the @timeout is less than zero, the function will block * until at least one event has been retrieved (or an error * occurred). * * Returns: Returns the number of ready events which have been fetched, or an * error code, in case of error. */ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { int res = 0, eavail, timed_out = 0; unsigned long flags; long slack = 0; wait_queue_t wait; ktime_t expires, *to = NULL; if (timeout > 0) { struct timespec end_time = ep_set_mstimeout(timeout); slack = select_estimate_accuracy(&end_time); to = &expires; *to = timespec_to_ktime(end_time); } else if (timeout == 0) { /* * Avoid the unnecessary trip to the wait queue loop, if the * caller specified a non blocking operation. */ timed_out = 1; spin_lock_irqsave(&ep->lock, flags); goto check_events; } fetch_events: spin_lock_irqsave(&ep->lock, flags); if (!ep_events_available(ep)) { /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by * ep_poll_callback() when events will become available. */ init_waitqueue_entry(&wait, current); __add_wait_queue_exclusive(&ep->wq, &wait); for (;;) { /* * We don't want to sleep if the ep_poll_callback() sends us * a wakeup in between. That's why we set the task state * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); if (ep_events_available(ep) || timed_out) break; if (signal_pending(current)) { res = -EINTR; break; } spin_unlock_irqrestore(&ep->lock, flags); if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; spin_lock_irqsave(&ep->lock, flags); } __remove_wait_queue(&ep->wq, &wait); set_current_state(TASK_RUNNING); } check_events: /* Is it worth to try to dig for events ? */ eavail = ep_events_available(ep); spin_unlock_irqrestore(&ep->lock, flags); /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of * more luck. */ if (!res && eavail && !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; return res; } /** * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() * API, to verify that adding an epoll file inside another * epoll structure, does not violate the constraints, in * terms of closed loops, or too deep chains (which can * result in excessive stack usage). * * @priv: Pointer to the epoll file to be currently checked. * @cookie: Original cookie for this call. This is the top-of-the-chain epoll * data structure pointer. * @call_nests: Current dept of the @ep_call_nested() call stack. * * Returns: Returns zero if adding the epoll @file inside current epoll * structure @ep does not violate the constraints, or -1 otherwise. */ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) { int error = 0; struct file *file = priv; struct eventpoll *ep = file->private_data; struct eventpoll *ep_tovisit; struct rb_node *rbp; struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); ep->visited = 1; list_add(&ep->visited_list_link, &visited_list); for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; if (ep_tovisit->visited) continue; error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, epi->ffd.file, ep_tovisit, current); if (error != 0) break; } else { /* * If we've reached a file that is not associated with * an ep, then we need to check if the newly added * links are going to add too many wakeup paths. We do * this by adding it to the tfile_check_list, if it's * not already there, and calling reverse_path_check() * during ep_insert(). */ if (list_empty(&epi->ffd.file->f_tfile_llink)) list_add(&epi->ffd.file->f_tfile_llink, &tfile_check_list); } } mutex_unlock(&ep->mtx); return error; } /** * ep_loop_check - Performs a check to verify that adding an epoll file (@file) * another epoll file (represented by @ep) does not create * closed loops or too deep chains. * * @ep: Pointer to the epoll private data structure. * @file: Pointer to the epoll file to be checked. * * Returns: Returns zero if adding the epoll @file inside current epoll * structure @ep does not violate the constraints, or -1 otherwise. */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { int ret; struct eventpoll *ep_cur, *ep_next; ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, file, ep, current); /* clear visited list */ list_for_each_entry_safe(ep_cur, ep_next, &visited_list, visited_list_link) { ep_cur->visited = 0; list_del(&ep_cur->visited_list_link); } return ret; } static void clear_tfile_check_list(void) { struct file *file; /* first clear the tfile_check_list */ while (!list_empty(&tfile_check_list)) { file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); } INIT_LIST_HEAD(&tfile_check_list); } /* * Open an eventpoll file descriptor. */ SYSCALL_DEFINE1(epoll_create1, int, flags) { int error, fd; struct eventpoll *ep = NULL; struct file *file; /* Check the EPOLL_* constant for consistency. */ BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); if (flags & ~EPOLL_CLOEXEC) return -EINVAL; /* * Create the internal data structure ("struct eventpoll"). */ error = ep_alloc(&ep); if (error < 0) return error; /* * Creates all the items needed to setup an eventpoll file. That is, * a file structure and a free file descriptor. */ fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC)); if (fd < 0) { error = fd; goto out_free_ep; } file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, O_RDWR | (flags & O_CLOEXEC)); if (IS_ERR(file)) { error = PTR_ERR(file); goto out_free_fd; } fd_install(fd, file); ep->file = file; return fd; out_free_fd: put_unused_fd(fd); out_free_ep: ep_free(ep); return error; } SYSCALL_DEFINE1(epoll_create, int, size) { if (size <= 0) return -EINVAL; return sys_epoll_create1(0); } /* * The following function implements the controller interface for * the eventpoll file that enables the insertion/removal/change of * file descriptors inside the interest set. */ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; struct epoll_event epds; error = -EFAULT; if (ep_op_has_event(op) && copy_from_user(&epds, event, sizeof(struct epoll_event))) goto error_return; /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* Get the "struct file *" for the target file */ tfile = fget(fd); if (!tfile) goto error_fput; /* The target file descriptor must support poll */ error = -EPERM; if (!tfile->f_op || !tfile->f_op->poll) goto error_tgt_fput; /* Check if EPOLLWAKEUP is allowed */ if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP)) epds.events &= ~EPOLLWAKEUP; /* * We have to check that the file structure underneath the file descriptor * the user passed to us _is_ an eventpoll file. And also we do not permit * adding an epoll file descriptor inside itself. */ error = -EINVAL; if (file == tfile || !is_file_epoll(file)) goto error_tgt_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; /* * When we insert an epoll file descriptor, inside another epoll file * descriptor, there is the change of creating closed loops, which are * better be handled here, than in more critical paths. While we are * checking for loops we also determine the list of files reachable * and hang them on the tfile_check_list, so we can check that we * haven't created too many possible wakeup paths. * * We need to hold the epmutex across both ep_insert and ep_remove * b/c we want to make sure we are looking at a coherent view of * epoll network. */ if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) { mutex_lock(&epmutex); did_lock_epmutex = 1; } if (op == EPOLL_CTL_ADD) { if (is_file_epoll(tfile)) { error = -ELOOP; if (ep_loop_check(ep, tfile) != 0) { clear_tfile_check_list(); goto error_tgt_fput; } } else list_add(&tfile->f_tfile_llink, &tfile_check_list); } mutex_lock_nested(&ep->mtx, 0); /* * Try to lookup the file inside our RB tree, Since we grabbed "mtx" * above, we can be sure to be able to use the item looked up by * ep_find() till we release the mutex. */ epi = ep_find(ep, tfile, fd); error = -EINVAL; switch (op) { case EPOLL_CTL_ADD: if (!epi) { epds.events |= POLLERR | POLLHUP; error = ep_insert(ep, &epds, tfile, fd); } else error = -EEXIST; clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) error = ep_remove(ep, epi); else error = -ENOENT; break; case EPOLL_CTL_MOD: if (epi) { epds.events |= POLLERR | POLLHUP; error = ep_modify(ep, epi, &epds); } else error = -ENOENT; break; } mutex_unlock(&ep->mtx); error_tgt_fput: if (did_lock_epmutex) mutex_unlock(&epmutex); fput(tfile); error_fput: fput(file); error_return: return error; } /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_wait(2). */ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { int error; struct file *file; struct eventpoll *ep; /* The maximum number of event must be greater than zero */ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) return -EINVAL; /* Verify that the area passed by the user is writeable */ if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { error = -EFAULT; goto error_return; } /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* * We have to check that the file structure underneath the fd * the user passed to us _is_ an eventpoll file. */ error = -EINVAL; if (!is_file_epoll(file)) goto error_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; /* Time to fish for events ... */ error = ep_poll(ep, events, maxevents, timeout); error_fput: fput(file); error_return: return error; } #ifdef HAVE_SET_RESTORE_SIGMASK /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_pwait(2). */ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout, const sigset_t __user *, sigmask, size_t, sigsetsize) { int error; sigset_t ksigmask, sigsaved; /* * If the caller wants a certain signal mask to be set during the wait, * we apply it here. */ if (sigmask) { if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) return -EFAULT; sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } error = sys_epoll_wait(epfd, events, maxevents, timeout); /* * If we changed the signal mask, we need to restore the original one. * In case we've got a signal while waiting, we do not restore the * signal mask yet, and we allow do_signal() to deliver the signal on * the way back to userspace, before the signal mask is restored. */ if (sigmask) { if (error == -EINTR) { memcpy(&current->saved_sigmask, &sigsaved, sizeof(sigsaved)); set_restore_sigmask(); } else sigprocmask(SIG_SETMASK, &sigsaved, NULL); } return error; } #endif /* HAVE_SET_RESTORE_SIGMASK */ static int __init eventpoll_init(void) { struct sysinfo si; si_meminfo(&si); /* * Allows top 4% of lomem to be allocated for epoll watches (per user). */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; BUG_ON(max_user_watches < 0); /* * Initialize the structure used to perform epoll file descriptor * inclusion loops checks. */ ep_nested_calls_init(&poll_loop_ncalls); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); /* Initialize the structure used to perform file's f_op->poll() calls */ ep_nested_calls_init(&poll_readywalk_ncalls); /* Allocates slab cache used to allocate "struct epitem" items */ epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* Allocates slab cache used to allocate "struct eppoll_entry" */ pwq_cache = kmem_cache_create("eventpoll_pwq", sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL); return 0; } fs_initcall(eventpoll_init);
gpl-2.0
Mrcl1450/f2fs
net/nfc/nci/rsp.c
395
9598
/* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* Handle NCI Response packets */ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_reset_rsp *rsp = (void *) skb->data; pr_debug("status 0x%x\n", rsp->status); if (rsp->status == NCI_STATUS_OK) { ndev->nci_ver = rsp->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", rsp->nci_ver, rsp->config_status); } nci_req_complete(ndev, rsp->status); } static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data; struct nci_core_init_rsp_2 *rsp_2; pr_debug("status 0x%x\n", rsp_1->status); if (rsp_1->status != NCI_STATUS_OK) goto exit; ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features); ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; if (ndev->num_supported_rf_interfaces > NCI_MAX_SUPPORTED_RF_INTERFACES) { ndev->num_supported_rf_interfaces = NCI_MAX_SUPPORTED_RF_INTERFACES; } memcpy(ndev->supported_rf_interfaces, rsp_1->supported_rf_interfaces, ndev->num_supported_rf_interfaces); rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); ndev->max_logical_connections = rsp_2->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp_2->max_routing_table_size); ndev->max_ctrl_pkt_payload_len = rsp_2->max_ctrl_pkt_payload_len; ndev->max_size_for_large_params = __le16_to_cpu(rsp_2->max_size_for_large_params); ndev->manufact_id = rsp_2->manufact_id; ndev->manufact_specific_info = __le32_to_cpu(rsp_2->manufact_specific_info); pr_debug("nfcc_features 0x%x\n", ndev->nfcc_features); pr_debug("num_supported_rf_interfaces %d\n", ndev->num_supported_rf_interfaces); pr_debug("supported_rf_interfaces[0] 0x%x\n", ndev->supported_rf_interfaces[0]); pr_debug("supported_rf_interfaces[1] 0x%x\n", ndev->supported_rf_interfaces[1]); pr_debug("supported_rf_interfaces[2] 0x%x\n", ndev->supported_rf_interfaces[2]); pr_debug("supported_rf_interfaces[3] 0x%x\n", ndev->supported_rf_interfaces[3]); pr_debug("max_logical_connections %d\n", ndev->max_logical_connections); pr_debug("max_routing_table_size %d\n", ndev->max_routing_table_size); pr_debug("max_ctrl_pkt_payload_len %d\n", ndev->max_ctrl_pkt_payload_len); pr_debug("max_size_for_large_params %d\n", ndev->max_size_for_large_params); pr_debug("manufact_id 0x%x\n", ndev->manufact_id); pr_debug("manufact_specific_info 0x%x\n", ndev->manufact_specific_info); exit: nci_req_complete(ndev, rsp_1->status); } static void nci_core_set_config_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_set_config_rsp *rsp = (void *) skb->data; pr_debug("status 0x%x\n", rsp->status); nci_req_complete(ndev, rsp->status); } static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); nci_req_complete(ndev, status); } static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_conn_info *conn_info; __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { atomic_set(&ndev->state, NCI_DISCOVERY); conn_info = ndev->rf_conn_info; if (!conn_info) { conn_info = devm_kzalloc(&ndev->nfc_dev->dev, sizeof(struct nci_conn_info), GFP_KERNEL); if (!conn_info) { status = NCI_STATUS_REJECTED; goto exit; } conn_info->conn_id = NCI_STATIC_RF_CONN_ID; INIT_LIST_HEAD(&conn_info->list); list_add(&conn_info->list, &ndev->conn_info_list); ndev->rf_conn_info = conn_info; } } exit: nci_req_complete(ndev, status); } static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* Complete the request on intf_activated_ntf or generic_error_ntf */ if (status != NCI_STATUS_OK) nci_req_complete(ndev, status); } static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* If target was active, complete the request only in deactivate_ntf */ if ((status != NCI_STATUS_OK) || (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); nci_req_complete(ndev, status); } } static void nci_nfcee_discover_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_nfcee_discover_rsp *discover_rsp; if (skb->len != 2) { nci_req_complete(ndev, NCI_STATUS_NFCEE_PROTOCOL_ERROR); return; } discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data; if (discover_rsp->status != NCI_STATUS_OK || discover_rsp->num_nfcee == 0) nci_req_complete(ndev, discover_rsp->status); } static void nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); nci_req_complete(ndev, status); } static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; struct nci_conn_info *conn_info; struct nci_core_conn_create_rsp *rsp; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { rsp = (struct nci_core_conn_create_rsp *)skb->data; conn_info = devm_kzalloc(&ndev->nfc_dev->dev, sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { status = NCI_STATUS_REJECTED; goto exit; } conn_info->id = ndev->cur_id; conn_info->conn_id = rsp->conn_id; /* Note: data_exchange_cb and data_exchange_cb_context need to * be specify out of nci_core_conn_create_rsp_packet */ INIT_LIST_HEAD(&conn_info->list); list_add(&conn_info->list, &ndev->conn_info_list); if (ndev->cur_id == ndev->hci_dev->nfcee_id) ndev->hci_dev->conn_info = conn_info; conn_info->conn_id = rsp->conn_id; conn_info->max_pkt_payload_len = rsp->max_ctrl_pkt_payload_len; atomic_set(&conn_info->credits_cnt, rsp->credits_cnt); } exit: nci_req_complete(ndev, status); } static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_conn_info *conn_info; __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id); if (conn_info) { list_del(&conn_info->list); devm_kfree(&ndev->nfc_dev->dev, conn_info); } } nci_req_complete(ndev, status); } void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 rsp_opcode = nci_opcode(skb->data); /* we got a rsp, stop the cmd timer */ del_timer(&ndev->cmd_timer); pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(rsp_opcode), nci_opcode_oid(rsp_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); if (nci_opcode_gid(rsp_opcode) == NCI_GID_PROPRIETARY) { if (nci_prop_rsp_packet(ndev, rsp_opcode, skb) == -ENOTSUPP) { pr_err("unsupported rsp opcode 0x%x\n", rsp_opcode); } goto end; } switch (rsp_opcode) { case NCI_OP_CORE_RESET_RSP: nci_core_reset_rsp_packet(ndev, skb); break; case NCI_OP_CORE_INIT_RSP: nci_core_init_rsp_packet(ndev, skb); break; case NCI_OP_CORE_SET_CONFIG_RSP: nci_core_set_config_rsp_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CREATE_RSP: nci_core_conn_create_rsp_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CLOSE_RSP: nci_core_conn_close_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_RSP: nci_rf_disc_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_SELECT_RSP: nci_rf_disc_select_rsp_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_RSP: nci_rf_deactivate_rsp_packet(ndev, skb); break; case NCI_OP_NFCEE_DISCOVER_RSP: nci_nfcee_discover_rsp_packet(ndev, skb); break; case NCI_OP_NFCEE_MODE_SET_RSP: nci_nfcee_mode_set_rsp_packet(ndev, skb); break; default: pr_err("unknown rsp opcode 0x%x\n", rsp_opcode); break; } end: kfree_skb(skb); /* trigger the next cmd */ atomic_set(&ndev->cmd_cnt, 1); if (!skb_queue_empty(&ndev->cmd_q)) queue_work(ndev->cmd_wq, &ndev->cmd_work); }
gpl-2.0
cile381/s7_flat_kernel
drivers/net/wireless/orinoco/orinoco_usb.c
395
45473
/* * USB Orinoco driver * * Copyright (c) 2003 Manuel Estrada Sainz * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. * * Queueing code based on linux-wlan-ng 0.2.1-pre5 * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * * The license is the same as above. * * Initialy based on USB Skeleton driver - 0.7 * * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * NOTE: The original USB Skeleton driver is GPL, but all that code is * gone so MPL/GPL applies. */ #define DRIVER_NAME "orinoco_usb" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/usb.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/firmware.h> #include "mic.h" #include "orinoco.h" #ifndef URB_ASYNC_UNLINK #define URB_ASYNC_UNLINK 0 #endif /* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; #define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) struct header_struct { /* 802.3 */ u8 dest[ETH_ALEN]; u8 src[ETH_ALEN]; __be16 len; /* 802.2 */ u8 dsap; u8 ssap; u8 ctrl; /* SNAP */ u8 oui[3]; __be16 ethertype; } __packed; struct ez_usb_fw { u16 size; const u8 *code; }; static struct ez_usb_fw firmware = { .size = 0, .code = NULL, }; /* Debugging macros */ #undef err #define err(format, arg...) \ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0) MODULE_FIRMWARE("orinoco_ezusb_fw"); /* * Under some conditions, the card gets stuck and stops paying attention * to the world (i.e. data communication stalls) until we do something to * it. Sending an INQ_TALLIES command seems to be enough and should be * harmless otherwise. This behaviour has been observed when using the * driver on a systemimager client during installation. In the past a * timer was used to send INQ_TALLIES commands when there was no other * activity, but it was troublesome and was removed. */ #define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */ #define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */ #define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */ #define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */ #define USB_MELCO_VENDOR_ID 0x0411 #define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */ #define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */ #define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */ #define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */ #define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */ #define USB_AVAYA8_VENDOR_ID 0x0D98 #define USB_AVAYAE_VENDOR_ID 0x0D9E #define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */ #define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */ #define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */ #define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */ #define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */ #define USB_ELSA_VENDOR_ID 0x05CC #define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */ #define USB_LEGEND_VENDOR_ID 0x0E7C #define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */ #define USB_SAMSUNG_VENDOR_ID 0x04E8 #define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */ #define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */ #define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */ #define USB_IGATE_VENDOR_ID 0x0681 #define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */ #define USB_FUJITSU_VENDOR_ID 0x0BF8 #define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */ #define USB_2WIRE_VENDOR_ID 0x1630 #define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */ #define EZUSB_REQUEST_FW_TRANS 0xA0 #define EZUSB_REQUEST_TRIGER 0xAA #define EZUSB_REQUEST_TRIG_AC 0xAC #define EZUSB_CPUCS_REG 0x7F92 #define EZUSB_RID_TX 0x0700 #define EZUSB_RID_RX 0x0701 #define EZUSB_RID_INIT1 0x0702 #define EZUSB_RID_ACK 0x0710 #define EZUSB_RID_READ_PDA 0x0800 #define EZUSB_RID_PROG_INIT 0x0852 #define EZUSB_RID_PROG_SET_ADDR 0x0853 #define EZUSB_RID_PROG_BYTES 0x0854 #define EZUSB_RID_PROG_END 0x0855 #define EZUSB_RID_DOCMD 0x0860 /* Recognize info frames */ #define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF)) #define EZUSB_MAGIC 0x0210 #define EZUSB_FRAME_DATA 1 #define EZUSB_FRAME_CONTROL 2 #define DEF_TIMEOUT (3 * HZ) #define BULK_BUF_SIZE 2048 #define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet)) #define FW_BUF_SIZE 64 #define FW_VAR_OFFSET_PTR 0x359 #define FW_VAR_VALUE 0 #define FW_HOLE_START 0x100 #define FW_HOLE_END 0x300 struct ezusb_packet { __le16 magic; /* 0x0210 */ u8 req_reply_count; u8 ans_reply_count; __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */ __le16 size; /* transport size */ __le16 crc; /* CRC up to here */ __le16 hermes_len; __le16 hermes_rid; u8 data[0]; } __packed; /* Table of devices that work or may work with this driver */ static struct usb_device_id ezusb_table[] = { {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)}, {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)}, {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)}, {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)}, {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)}, {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)}, {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID, 0, 0)}, {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)}, {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)}, {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)}, {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)}, {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ezusb_table); /* Structure to hold all of our device specific stuff */ struct ezusb_priv { struct usb_device *udev; struct net_device *dev; struct mutex mtx; spinlock_t req_lock; struct list_head req_pending; struct list_head req_active; spinlock_t reply_count_lock; u16 hermes_reg_fake[0x40]; u8 *bap_buf; struct urb *read_urb; int read_pipe; int write_pipe; u8 reply_count; }; enum ezusb_state { EZUSB_CTX_START, EZUSB_CTX_QUEUED, EZUSB_CTX_REQ_SUBMITTED, EZUSB_CTX_REQ_COMPLETE, EZUSB_CTX_RESP_RECEIVED, EZUSB_CTX_REQ_TIMEOUT, EZUSB_CTX_REQ_FAILED, EZUSB_CTX_RESP_TIMEOUT, EZUSB_CTX_REQSUBMIT_FAIL, EZUSB_CTX_COMPLETE, }; struct request_context { struct list_head list; atomic_t refcount; struct completion done; /* Signals that CTX is dead */ int killed; struct urb *outurb; /* OUT for req pkt */ struct ezusb_priv *upriv; struct ezusb_packet *buf; int buf_length; struct timer_list timer; /* Timeout handling */ enum ezusb_state state; /* Current state */ /* the RID that we will wait for */ u16 out_rid; u16 in_rid; }; /* Forward declarations */ static void ezusb_ctx_complete(struct request_context *ctx); static void ezusb_req_queue_run(struct ezusb_priv *upriv); static void ezusb_bulk_in_callback(struct urb *urb); static inline u8 ezusb_reply_inc(u8 count) { if (count < 0x7F) return count + 1; else return 1; } static void ezusb_request_context_put(struct request_context *ctx) { if (!atomic_dec_and_test(&ctx->refcount)) return; WARN_ON(!ctx->done.done); BUG_ON(ctx->outurb->status == -EINPROGRESS); BUG_ON(timer_pending(&ctx->timer)); usb_free_urb(ctx->outurb); kfree(ctx->buf); kfree(ctx); } static inline void ezusb_mod_timer(struct ezusb_priv *upriv, struct timer_list *timer, unsigned long expire) { if (!upriv->udev) return; mod_timer(timer, expire); } static void ezusb_request_timerfn(u_long _ctx) { struct request_context *ctx = (void *) _ctx; ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) { ctx->state = EZUSB_CTX_REQ_TIMEOUT; } else { ctx->state = EZUSB_CTX_RESP_TIMEOUT; dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n"); atomic_inc(&ctx->refcount); ctx->killed = 1; ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); } }; static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv, u16 out_rid, u16 in_rid) { struct request_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) return NULL; ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC); if (!ctx->buf) { kfree(ctx); return NULL; } ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC); if (!ctx->outurb) { kfree(ctx->buf); kfree(ctx); return NULL; } ctx->upriv = upriv; ctx->state = EZUSB_CTX_START; ctx->out_rid = out_rid; ctx->in_rid = in_rid; atomic_set(&ctx->refcount, 1); init_completion(&ctx->done); init_timer(&ctx->timer); ctx->timer.function = ezusb_request_timerfn; ctx->timer.data = (u_long) ctx; return ctx; } /* Hopefully the real complete_all will soon be exported, in the mean * while this should work. */ static inline void ezusb_complete_all(struct completion *comp) { complete(comp); complete(comp); complete(comp); complete(comp); } static void ezusb_ctx_complete(struct request_context *ctx) { struct ezusb_priv *upriv = ctx->upriv; unsigned long flags; spin_lock_irqsave(&upriv->req_lock, flags); list_del_init(&ctx->list); if (upriv->udev) { spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_req_queue_run(upriv); spin_lock_irqsave(&upriv->req_lock, flags); } switch (ctx->state) { case EZUSB_CTX_COMPLETE: case EZUSB_CTX_REQSUBMIT_FAIL: case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_REQ_TIMEOUT: case EZUSB_CTX_RESP_TIMEOUT: spin_unlock_irqrestore(&upriv->req_lock, flags); if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) { struct net_device *dev = upriv->dev; struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &priv->stats; if (ctx->state != EZUSB_CTX_COMPLETE) stats->tx_errors++; else stats->tx_packets++; netif_wake_queue(dev); } ezusb_complete_all(&ctx->done); ezusb_request_context_put(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); if (!upriv->udev) { /* This is normal, as all request contexts get flushed * when the device is disconnected */ err("Called, CTX not terminating, but device gone"); ezusb_complete_all(&ctx->done); ezusb_request_context_put(ctx); break; } err("Called, CTX not in terminating state."); /* Things are really bad if this happens. Just leak * the CTX because it may still be linked to the * queue or the OUT urb may still be active. * Just leaking at least prevents an Oops or Panic. */ break; } } /** * ezusb_req_queue_run: * Description: * Note: Only one active CTX at any one time, because there's no * other (reliable) way to match the response URB to the correct * CTX. **/ static void ezusb_req_queue_run(struct ezusb_priv *upriv) { unsigned long flags; struct request_context *ctx; int result; spin_lock_irqsave(&upriv->req_lock, flags); if (!list_empty(&upriv->req_active)) goto unlock; if (list_empty(&upriv->req_pending)) goto unlock; ctx = list_entry(upriv->req_pending.next, struct request_context, list); if (!ctx->upriv->udev) goto unlock; /* We need to split this off to avoid a race condition */ list_move_tail(&ctx->list, &upriv->req_active); if (ctx->state == EZUSB_CTX_QUEUED) { atomic_inc(&ctx->refcount); result = usb_submit_urb(ctx->outurb, GFP_ATOMIC); if (result) { ctx->state = EZUSB_CTX_REQSUBMIT_FAIL; spin_unlock_irqrestore(&upriv->req_lock, flags); err("Fatal, failed to submit command urb." " error=%d\n", result); ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); goto done; } ctx->state = EZUSB_CTX_REQ_SUBMITTED; ezusb_mod_timer(ctx->upriv, &ctx->timer, jiffies + DEF_TIMEOUT); } unlock: spin_unlock_irqrestore(&upriv->req_lock, flags); done: return; } static void ezusb_req_enqueue_run(struct ezusb_priv *upriv, struct request_context *ctx) { unsigned long flags; spin_lock_irqsave(&upriv->req_lock, flags); if (!ctx->upriv->udev) { spin_unlock_irqrestore(&upriv->req_lock, flags); goto done; } atomic_inc(&ctx->refcount); list_add_tail(&ctx->list, &upriv->req_pending); spin_unlock_irqrestore(&upriv->req_lock, flags); ctx->state = EZUSB_CTX_QUEUED; ezusb_req_queue_run(upriv); done: return; } static void ezusb_request_out_callback(struct urb *urb) { unsigned long flags; enum ezusb_state state; struct request_context *ctx = urb->context; struct ezusb_priv *upriv = ctx->upriv; spin_lock_irqsave(&upriv->req_lock, flags); del_timer(&ctx->timer); if (ctx->killed) { spin_unlock_irqrestore(&upriv->req_lock, flags); pr_warn("interrupt called with dead ctx\n"); goto out; } state = ctx->state; if (urb->status == 0) { switch (state) { case EZUSB_CTX_REQ_SUBMITTED: if (ctx->in_rid) { ctx->state = EZUSB_CTX_REQ_COMPLETE; /* reply URB still pending */ ezusb_mod_timer(upriv, &ctx->timer, jiffies + DEF_TIMEOUT); spin_unlock_irqrestore(&upriv->req_lock, flags); break; } /* fall through */ case EZUSB_CTX_RESP_RECEIVED: /* IN already received before this OUT-ACK */ ctx->state = EZUSB_CTX_COMPLETE; spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); err("Unexpected state(0x%x, %d) in OUT URB", state, urb->status); break; } } else { /* If someone cancels the OUT URB then its status * should be either -ECONNRESET or -ENOENT. */ switch (state) { case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_RESP_RECEIVED: ctx->state = EZUSB_CTX_REQ_FAILED; /* fall through */ case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_REQ_TIMEOUT: spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); err("Unexpected state(0x%x, %d) in OUT URB", state, urb->status); break; } } out: ezusb_request_context_put(ctx); } static void ezusb_request_in_callback(struct ezusb_priv *upriv, struct urb *urb) { struct ezusb_packet *ans = urb->transfer_buffer; struct request_context *ctx = NULL; enum ezusb_state state; unsigned long flags; /* Find the CTX on the active queue that requested this URB */ spin_lock_irqsave(&upriv->req_lock, flags); if (upriv->udev) { struct list_head *item; list_for_each(item, &upriv->req_active) { struct request_context *c; int reply_count; c = list_entry(item, struct request_context, list); reply_count = ezusb_reply_inc(c->buf->req_reply_count); if ((ans->ans_reply_count == reply_count) && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) { ctx = c; break; } netdev_dbg(upriv->dev, "Skipped (0x%x/0x%x) (%d/%d)\n", le16_to_cpu(ans->hermes_rid), c->in_rid, ans->ans_reply_count, reply_count); } } if (ctx == NULL) { spin_unlock_irqrestore(&upriv->req_lock, flags); err("%s: got unexpected RID: 0x%04X", __func__, le16_to_cpu(ans->hermes_rid)); ezusb_req_queue_run(upriv); return; } /* The data we want is in the in buffer, exchange */ urb->transfer_buffer = ctx->buf; ctx->buf = (void *) ans; ctx->buf_length = urb->actual_length; state = ctx->state; switch (state) { case EZUSB_CTX_REQ_SUBMITTED: /* We have received our response URB before * our request has been acknowledged. Do NOT * destroy our CTX yet, because our OUT URB * is still alive ... */ ctx->state = EZUSB_CTX_RESP_RECEIVED; spin_unlock_irqrestore(&upriv->req_lock, flags); /* Let the machine continue running. */ break; case EZUSB_CTX_REQ_COMPLETE: /* This is the usual path: our request * has already been acknowledged, and * we have now received the reply. */ ctx->state = EZUSB_CTX_COMPLETE; /* Stop the intimer */ del_timer(&ctx->timer); spin_unlock_irqrestore(&upriv->req_lock, flags); /* Call the completion handler */ ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); pr_warn("Matched IN URB, unexpected context state(0x%x)\n", state); /* Throw this CTX away and try submitting another */ del_timer(&ctx->timer); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(ctx->outurb); ezusb_req_queue_run(upriv); break; } /* switch */ } static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, struct request_context *ctx) { switch (ctx->state) { case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_REQ_COMPLETE: case EZUSB_CTX_RESP_RECEIVED: if (in_softirq()) { /* If we get called from a timer, timeout timers don't * get the chance to run themselves. So we make sure * that we don't sleep for ever */ int msecs = DEF_TIMEOUT * (1000 / HZ); while (!ctx->done.done && msecs--) udelay(1000); } else { wait_event_interruptible(ctx->done.wait, ctx->done.done); } break; default: /* Done or failed - nothing to wait for */ break; } } static inline u16 build_crc(struct ezusb_packet *data) { u16 crc = 0; u8 *bytes = (u8 *)data; int i; for (i = 0; i < 8; i++) crc = (crc << 1) + bytes[i]; return crc; } /** * ezusb_fill_req: * * if data == NULL and length > 0 the data is assumed to be already in * the target buffer and only the header is filled. * */ static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid, const void *data, u16 frame_type, u8 reply_count) { int total_size = sizeof(*req) + length; BUG_ON(total_size > BULK_BUF_SIZE); req->magic = cpu_to_le16(EZUSB_MAGIC); req->req_reply_count = reply_count; req->ans_reply_count = 0; req->frame_type = cpu_to_le16(frame_type); req->size = cpu_to_le16(length + 4); req->crc = cpu_to_le16(build_crc(req)); req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length)); req->hermes_rid = cpu_to_le16(rid); if (data) memcpy(req->data, data, length); return total_size; } static int ezusb_submit_in_urb(struct ezusb_priv *upriv) { int retval = 0; void *cur_buf = upriv->read_urb->transfer_buffer; if (upriv->read_urb->status == -EINPROGRESS) { netdev_dbg(upriv->dev, "urb busy, not resubmiting\n"); retval = -EBUSY; goto exit; } usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe, cur_buf, BULK_BUF_SIZE, ezusb_bulk_in_callback, upriv); upriv->read_urb->transfer_flags = 0; retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC); if (retval) err("%s submit failed %d", __func__, retval); exit: return retval; } static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset) { u8 res_val = reset; /* avoid argument promotion */ if (!upriv->udev) { err("%s: !upriv->udev", __func__); return -EFAULT; } return usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val, sizeof(res_val), DEF_TIMEOUT); } static int ezusb_firmware_download(struct ezusb_priv *upriv, struct ez_usb_fw *fw) { u8 *fw_buffer; int retval, addr; int variant_offset; fw_buffer = kmalloc(FW_BUF_SIZE, GFP_KERNEL); if (!fw_buffer) { printk(KERN_ERR PFX "Out of memory for firmware buffer.\n"); return -ENOMEM; } /* * This byte is 1 and should be replaced with 0. The offset is * 0x10AD in version 0.0.6. The byte in question should follow * the end of the code pointed to by the jump in the beginning * of the firmware. Also, it is read by code located at 0x358. */ variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]); if (variant_offset >= fw->size) { printk(KERN_ERR PFX "Invalid firmware variant offset: " "0x%04x\n", variant_offset); retval = -EINVAL; goto fail; } retval = ezusb_8051_cpucs(upriv, 1); if (retval < 0) goto fail; for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) { /* 0x100-0x300 should be left alone, it contains card * specific data, like USB enumeration information */ if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END)) continue; memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE); if (variant_offset >= addr && variant_offset < addr + FW_BUF_SIZE) { netdev_dbg(upriv->dev, "Patching card_variant byte at 0x%04X\n", variant_offset); fw_buffer[variant_offset - addr] = FW_VAR_VALUE; } retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, addr, 0x0, fw_buffer, FW_BUF_SIZE, DEF_TIMEOUT); if (retval < 0) goto fail; } retval = ezusb_8051_cpucs(upriv, 0); if (retval < 0) goto fail; goto exit; fail: printk(KERN_ERR PFX "Firmware download failed, error %d\n", retval); exit: kfree(fw_buffer); return retval; } static int ezusb_access_ltv(struct ezusb_priv *upriv, struct request_context *ctx, u16 length, const void *data, u16 frame_type, void *ans_buff, unsigned ans_size, u16 *ans_length) { int req_size; int retval = 0; enum ezusb_state state; BUG_ON(in_irq()); if (!upriv->udev) { retval = -ENODEV; goto exit; } if (upriv->read_urb->status != -EINPROGRESS) err("%s: in urb not pending", __func__); /* protect upriv->reply_count, guarantee sequential numbers */ spin_lock_bh(&upriv->reply_count_lock); req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data, frame_type, upriv->reply_count); usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe, ctx->buf, req_size, ezusb_request_out_callback, ctx); if (ctx->in_rid) upriv->reply_count = ezusb_reply_inc(upriv->reply_count); ezusb_req_enqueue_run(upriv, ctx); spin_unlock_bh(&upriv->reply_count_lock); if (ctx->in_rid) ezusb_req_ctx_wait(upriv, ctx); state = ctx->state; switch (state) { case EZUSB_CTX_COMPLETE: retval = ctx->outurb->status; break; case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: if (!ctx->in_rid) break; default: err("%s: Unexpected context state %d", __func__, state); /* fall though */ case EZUSB_CTX_REQ_TIMEOUT: case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_RESP_TIMEOUT: case EZUSB_CTX_REQSUBMIT_FAIL: printk(KERN_ERR PFX "Access failed, resetting (state %d," " reply_count %d)\n", state, upriv->reply_count); upriv->reply_count = 0; if (state == EZUSB_CTX_REQ_TIMEOUT || state == EZUSB_CTX_RESP_TIMEOUT) { printk(KERN_ERR PFX "ctx timed out\n"); retval = -ETIMEDOUT; } else { printk(KERN_ERR PFX "ctx failed\n"); retval = -EFAULT; } goto exit; } if (ctx->in_rid) { struct ezusb_packet *ans = ctx->buf; unsigned exp_len; if (ans->hermes_len != 0) exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12; else exp_len = 14; if (exp_len != ctx->buf_length) { err("%s: length mismatch for RID 0x%04x: " "expected %d, got %d", __func__, ctx->in_rid, exp_len, ctx->buf_length); retval = -EIO; goto exit; } if (ans_buff) memcpy(ans_buff, ans->data, min(exp_len, ans_size)); if (ans_length) *ans_length = le16_to_cpu(ans->hermes_len); } exit: ezusb_request_context_put(ctx); return retval; } static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, u16 length, const void *data) { struct ezusb_priv *upriv = hw->priv; u16 frame_type; struct request_context *ctx; if (length == 0) return -EINVAL; length = HERMES_RECLEN_TO_BYTES(length); /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be * set to be empty, but the USB bridge doesn't like it */ if (length == 0) return 0; ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; if (rid == EZUSB_RID_TX) frame_type = EZUSB_FRAME_DATA; else frame_type = EZUSB_FRAME_CONTROL; return ezusb_access_ltv(upriv, ctx, length, data, frame_type, NULL, 0, NULL); } static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; if (bufsize % 2) return -EINVAL; ctx = ezusb_alloc_ctx(upriv, rid, rid); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, buf, bufsize, length); } static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, u16 parm2, struct hermes_response *resp) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le16 data[4] = { cpu_to_le16(cmd), cpu_to_le16(parm0), cpu_to_le16(parm1), cpu_to_le16(parm2), }; netdev_dbg(upriv->dev, "0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X\n", cmd, parm0, parm1, parm2); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL); } static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, struct hermes_response *resp) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le16 data[4] = { cpu_to_le16(cmd), cpu_to_le16(parm0), 0, 0, }; netdev_dbg(upriv->dev, "0x%04X, parm0 0x%04X\n", cmd, parm0); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL); } static int ezusb_bap_pread(struct hermes *hw, int bap, void *buf, int len, u16 id, u16 offset) { struct ezusb_priv *upriv = hw->priv; struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer; int actual_length = upriv->read_urb->actual_length; if (id == EZUSB_RID_RX) { if ((sizeof(*ans) + offset + len) > actual_length) { printk(KERN_ERR PFX "BAP read beyond buffer end " "in rx frame\n"); return -EINVAL; } memcpy(buf, ans->data + offset, len); return 0; } if (EZUSB_IS_INFO(id)) { /* Include 4 bytes for length/type */ if ((sizeof(*ans) + offset + len - 4) > actual_length) { printk(KERN_ERR PFX "BAP read beyond buffer end " "in info frame\n"); return -EFAULT; } memcpy(buf, ans->data + offset - 4, len); } else { printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id); return -EINVAL; } return 0; } static int ezusb_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr, u16 pda_len) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le16 data[] = { cpu_to_le16(pda_addr & 0xffff), cpu_to_le16(pda_len - 4) }; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA); if (!ctx) return -ENOMEM; /* wl_lkm does not include PDA size in the PDA area. * We will pad the information into pda, so other routines * don't have to be modified */ pda[0] = cpu_to_le16(pda_len - 2); /* Includes CFG_PROD_DATA but not itself */ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4, NULL); } static int ezusb_program_init(struct hermes *hw, u32 entry_point) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le32 data = cpu_to_le32(entry_point); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL); } static int ezusb_program_end(struct hermes *hw) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, NULL, 0, NULL); } static int ezusb_program_bytes(struct hermes *hw, const char *buf, u32 addr, u32 len) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le32 data = cpu_to_le32(addr); int err; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL); if (err) return err; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, len, buf, EZUSB_FRAME_CONTROL, NULL, 0, NULL); } static int ezusb_program(struct hermes *hw, const char *buf, u32 addr, u32 len) { u32 ch_addr; u32 ch_len; int err = 0; /* We can only send 2048 bytes out of the bulk xmit at a time, * so we have to split any programming into chunks of <2048 * bytes. */ ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE; ch_addr = addr; while (ch_addr < (addr + len)) { pr_debug("Programming subblock of length %d " "to address 0x%08x. Data @ %p\n", ch_len, ch_addr, &buf[ch_addr - addr]); err = ezusb_program_bytes(hw, &buf[ch_addr - addr], ch_addr, ch_len); if (err) break; ch_addr += ch_len; ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ? (addr + len - ch_addr) : MAX_DL_SIZE; } return err; } static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &priv->stats; struct ezusb_priv *upriv = priv->card; u8 mic[MICHAEL_MIC_LEN + 1]; int err = 0; int tx_control; unsigned long flags; struct request_context *ctx; u8 *buf; int tx_size; if (!netif_running(dev)) { printk(KERN_ERR "%s: Tx on stopped device!\n", dev->name); return NETDEV_TX_BUSY; } if (netif_queue_stopped(dev)) { printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", dev->name); return NETDEV_TX_BUSY; } if (orinoco_lock(priv, &flags) != 0) { printk(KERN_ERR "%s: ezusb_xmit() called while hw_unavailable\n", dev->name); return NETDEV_TX_BUSY; } if (!netif_carrier_ok(dev) || (priv->iw_mode == NL80211_IFTYPE_MONITOR)) { /* Oops, the firmware hasn't established a connection, silently drop the packet (this seems to be the safest approach). */ goto drop; } /* Check packet length */ if (skb->len < ETH_HLEN) goto drop; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); if (!ctx) goto busy; memset(ctx->buf, 0, BULK_BUF_SIZE); buf = ctx->buf->data; tx_control = 0; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, &mic[0]); if (err) goto drop; { __le16 *tx_cntl = (__le16 *)buf; *tx_cntl = cpu_to_le16(tx_control); buf += sizeof(*tx_cntl); } memcpy(buf, skb->data, skb->len); buf += skb->len; if (tx_control & HERMES_TXCTRL_MIC) { u8 *m = mic; /* Mic has been offset so it can be copied to an even * address. We're copying eveything anyway, so we * don't need to copy that first byte. */ if (skb->len % 2) m++; memcpy(buf, m, MICHAEL_MIC_LEN); buf += MICHAEL_MIC_LEN; } /* Finally, we actually initiate the send */ netif_stop_queue(dev); /* The card may behave better if we send evenly sized usb transfers */ tx_size = ALIGN(buf - ctx->buf->data, 2); err = ezusb_access_ltv(upriv, ctx, tx_size, NULL, EZUSB_FRAME_DATA, NULL, 0, NULL); if (err) { netif_start_queue(dev); if (net_ratelimit()) printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err); goto busy; } dev->trans_start = jiffies; stats->tx_bytes += skb->len; goto ok; drop: stats->tx_errors++; stats->tx_dropped++; ok: orinoco_unlock(priv, &flags); dev_kfree_skb(skb); return NETDEV_TX_OK; busy: orinoco_unlock(priv, &flags); return NETDEV_TX_BUSY; } static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid) { *fid = EZUSB_RID_TX; return 0; } static int ezusb_hard_reset(struct orinoco_private *priv) { struct ezusb_priv *upriv = priv->card; int retval = ezusb_8051_cpucs(upriv, 1); if (retval < 0) { err("Failed to reset"); return retval; } retval = ezusb_8051_cpucs(upriv, 0); if (retval < 0) { err("Failed to unreset"); return retval; } netdev_dbg(upriv->dev, "sending control message\n"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_TRIGER, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0, 0x0, NULL, 0, DEF_TIMEOUT); if (retval < 0) { err("EZUSB_REQUEST_TRIGER failed retval %d", retval); return retval; } #if 0 dbg("Sending EZUSB_REQUEST_TRIG_AC"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_TRIG_AC, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x00FA, 0x0, NULL, 0, DEF_TIMEOUT); if (retval < 0) { err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval); return retval; } #endif return 0; } static int ezusb_init(struct hermes *hw) { struct ezusb_priv *upriv = hw->priv; int retval; BUG_ON(in_interrupt()); BUG_ON(!upriv); upriv->reply_count = 0; /* Write the MAGIC number on the simulated registers to keep * orinoco.c happy */ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC); hermes_write_regn(hw, RXFID, EZUSB_RID_RX); usb_kill_urb(upriv->read_urb); ezusb_submit_in_urb(upriv); retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1, HERMES_BYTES_TO_RECLEN(2), "\x10\x00"); if (retval < 0) { printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval); return retval; } retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL); if (retval < 0) { printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval); return retval; } return 0; } static void ezusb_bulk_in_callback(struct urb *urb) { struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context; struct ezusb_packet *ans = urb->transfer_buffer; u16 crc; u16 hermes_rid; if (upriv->udev == NULL) return; if (urb->status == -ETIMEDOUT) { /* When a device gets unplugged we get this every time * we resubmit, flooding the logs. Since we don't use * USB timeouts, it shouldn't happen any other time*/ pr_warn("%s: urb timed out, not resubmitting\n", __func__); return; } if (urb->status == -ECONNABORTED) { pr_warn("%s: connection abort, resubmitting urb\n", __func__); goto resubmit; } if ((urb->status == -EILSEQ) || (urb->status == -ENOENT) || (urb->status == -ECONNRESET)) { netdev_dbg(upriv->dev, "status %d, not resubmiting\n", urb->status); return; } if (urb->status) netdev_dbg(upriv->dev, "status: %d length: %d\n", urb->status, urb->actual_length); if (urb->actual_length < sizeof(*ans)) { err("%s: short read, ignoring", __func__); goto resubmit; } crc = build_crc(ans); if (le16_to_cpu(ans->crc) != crc) { err("CRC error, ignoring packet"); goto resubmit; } hermes_rid = le16_to_cpu(ans->hermes_rid); if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) { ezusb_request_in_callback(upriv, urb); } else if (upriv->dev) { struct net_device *dev = upriv->dev; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; if (hermes_rid == EZUSB_RID_RX) { __orinoco_ev_rx(dev, hw); } else { hermes_write_regn(hw, INFOFID, le16_to_cpu(ans->hermes_rid)); __orinoco_ev_info(dev, hw); } } resubmit: if (upriv->udev) ezusb_submit_in_urb(upriv); } static inline void ezusb_delete(struct ezusb_priv *upriv) { struct net_device *dev; struct list_head *item; struct list_head *tmp_item; unsigned long flags; BUG_ON(in_interrupt()); BUG_ON(!upriv); dev = upriv->dev; mutex_lock(&upriv->mtx); upriv->udev = NULL; /* No timer will be rearmed from here */ usb_kill_urb(upriv->read_urb); spin_lock_irqsave(&upriv->req_lock, flags); list_for_each_safe(item, tmp_item, &upriv->req_active) { struct request_context *ctx; int err; ctx = list_entry(item, struct request_context, list); atomic_inc(&ctx->refcount); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; err = usb_unlink_urb(ctx->outurb); spin_unlock_irqrestore(&upriv->req_lock, flags); if (err == -EINPROGRESS) wait_for_completion(&ctx->done); del_timer_sync(&ctx->timer); /* FIXME: there is an slight chance for the irq handler to * be running */ if (!list_empty(&ctx->list)) ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); spin_lock_irqsave(&upriv->req_lock, flags); } spin_unlock_irqrestore(&upriv->req_lock, flags); list_for_each_safe(item, tmp_item, &upriv->req_pending) ezusb_ctx_complete(list_entry(item, struct request_context, list)); if (upriv->read_urb && upriv->read_urb->status == -EINPROGRESS) printk(KERN_ERR PFX "Some URB in progress\n"); mutex_unlock(&upriv->mtx); if (upriv->read_urb) { kfree(upriv->read_urb->transfer_buffer); usb_free_urb(upriv->read_urb); } kfree(upriv->bap_buf); if (upriv->dev) { struct orinoco_private *priv = ndev_priv(upriv->dev); orinoco_if_del(priv); free_orinocodev(priv); } } static void ezusb_lock_irqsave(spinlock_t *lock, unsigned long *flags) __acquires(lock) { spin_lock_bh(lock); } static void ezusb_unlock_irqrestore(spinlock_t *lock, unsigned long *flags) __releases(lock) { spin_unlock_bh(lock); } static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock) { spin_lock_bh(lock); } static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock) { spin_unlock_bh(lock); } static const struct hermes_ops ezusb_ops = { .init = ezusb_init, .cmd_wait = ezusb_docmd_wait, .init_cmd_wait = ezusb_doicmd_wait, .allocate = ezusb_allocate, .read_ltv = ezusb_read_ltv, .write_ltv = ezusb_write_ltv, .bap_pread = ezusb_bap_pread, .read_pda = ezusb_read_pda, .program_init = ezusb_program_init, .program_end = ezusb_program_end, .program = ezusb_program, .lock_irqsave = ezusb_lock_irqsave, .unlock_irqrestore = ezusb_unlock_irqrestore, .lock_irq = ezusb_lock_irq, .unlock_irq = ezusb_unlock_irq, }; static const struct net_device_ops ezusb_netdev_ops = { .ndo_open = orinoco_open, .ndo_stop = orinoco_stop, .ndo_start_xmit = ezusb_xmit, .ndo_set_rx_mode = orinoco_set_multicast_list, .ndo_change_mtu = orinoco_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, .ndo_get_stats = orinoco_get_stats, }; static int ezusb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct orinoco_private *priv; struct hermes *hw; struct ezusb_priv *upriv = NULL; struct usb_interface_descriptor *iface_desc; struct usb_endpoint_descriptor *ep; const struct firmware *fw_entry = NULL; int retval = 0; int i; priv = alloc_orinocodev(sizeof(*upriv), &udev->dev, ezusb_hard_reset, NULL); if (!priv) { err("Couldn't allocate orinocodev"); goto exit; } hw = &priv->hw; upriv = priv->card; mutex_init(&upriv->mtx); spin_lock_init(&upriv->reply_count_lock); spin_lock_init(&upriv->req_lock); INIT_LIST_HEAD(&upriv->req_pending); INIT_LIST_HEAD(&upriv->req_active); upriv->udev = udev; hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake; hw->reg_spacing = HERMES_16BIT_REGSPACING; hw->priv = upriv; hw->ops = &ezusb_ops; /* set up the endpoint information */ /* check out the endpoints */ iface_desc = &interface->altsetting[0].desc; for (i = 0; i < iface_desc->bNumEndpoints; ++i) { ep = &interface->altsetting[0].endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep)) { /* we found a bulk in endpoint */ if (upriv->read_urb != NULL) { pr_warn("Found a second bulk in ep, ignored\n"); continue; } upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!upriv->read_urb) { err("No free urbs available"); goto error; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) pr_warn("bulk in: bEndpointAddress: %d\n", ep->bEndpointAddress); upriv->read_pipe = usb_rcvbulkpipe(udev, ep-> bEndpointAddress); upriv->read_urb->transfer_buffer = kmalloc(BULK_BUF_SIZE, GFP_KERNEL); if (!upriv->read_urb->transfer_buffer) { err("Couldn't allocate IN buffer"); goto error; } } if (usb_endpoint_is_bulk_out(ep)) { /* we found a bulk out endpoint */ if (upriv->bap_buf != NULL) { pr_warn("Found a second bulk out ep, ignored\n"); continue; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk out: wMaxPacketSize != 64\n"); if (ep->bEndpointAddress != 2) pr_warn("bulk out: bEndpointAddress: %d\n", ep->bEndpointAddress); upriv->write_pipe = usb_sndbulkpipe(udev, ep-> bEndpointAddress); upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL); if (!upriv->bap_buf) { err("Couldn't allocate bulk_out_buffer"); goto error; } } } if (!upriv->bap_buf || !upriv->read_urb) { err("Didn't find the required bulk endpoints"); goto error; } if (request_firmware(&fw_entry, "orinoco_ezusb_fw", &interface->dev) == 0) { firmware.size = fw_entry->size; firmware.code = fw_entry->data; } if (firmware.size && firmware.code) { if (ezusb_firmware_download(upriv, &firmware) < 0) goto error; } else { err("No firmware to download"); goto error; } if (ezusb_hard_reset(priv) < 0) { err("Cannot reset the device"); goto error; } /* If the firmware is already downloaded orinoco.c will call * ezusb_init but if the firmware is not already there, that will make * the kernel very unstable, so we try initializing here and quit in * case of error */ if (ezusb_init(hw) < 0) { err("Couldn't initialize the device"); err("Firmware may not be downloaded or may be wrong."); goto error; } /* Initialise the main driver */ if (orinoco_init(priv) != 0) { err("orinoco_init() failed\n"); goto error; } if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) { upriv->dev = NULL; err("%s: orinoco_if_add() failed", __func__); goto error; } upriv->dev = priv->ndev; goto exit; error: ezusb_delete(upriv); if (upriv->dev) { /* upriv->dev was 0, so ezusb_delete() didn't free it */ free_orinocodev(priv); } upriv = NULL; retval = -EFAULT; exit: if (fw_entry) { firmware.code = NULL; firmware.size = 0; release_firmware(fw_entry); } usb_set_intfdata(interface, upriv); return retval; } static void ezusb_disconnect(struct usb_interface *intf) { struct ezusb_priv *upriv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); ezusb_delete(upriv); printk(KERN_INFO PFX "Disconnected\n"); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver orinoco_driver = { .name = DRIVER_NAME, .probe = ezusb_probe, .disconnect = ezusb_disconnect, .id_table = ezusb_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(orinoco_driver); MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge"); MODULE_LICENSE("Dual MPL/GPL");
gpl-2.0
DirtyUnicorns/android_kernel_sony_apq8064
drivers/net/ethernet/tehuti/tehuti.c
907
68079
/* * Tehuti Networks(R) Network Driver * ethtool interface implementation * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * RX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * There are 2 types of RX communication channels between driver and NIC. * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds * info about buffer's location, size and ID. An ID field is used to identify a * buffer when it's returned with data via RXD Fifo (see below) * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is * filled by HW and is readen by SW. Each descriptor holds status and ID. * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data, * via dma moves it into host memory, builds new RXD descriptor with same ID, * pushes it into RXD Fifo and raises interrupt to indicate new RX data. * * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos. * One holds 1.5K packets and another - 26K packets. Depending on incoming * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is * filled with data, HW builds new RXD descriptor for it and push it into single * RXD Fifo. * * RX SW Data Structures * ~~~~~~~~~~~~~~~~~~~~~ * skb db - used to keep track of all skbs owned by SW and their dma addresses. * For RX case, ownership lasts from allocating new empty skb for RXF until * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own * skb db. Implemented as array with bitmask. * fifo - keeps info about fifo's size and location, relevant HW registers, * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. * Implemented as simple struct. * * RX SW Execution Flow * ~~~~~~~~~~~~~~~~~~~~ * Upon initialization (ifconfig up) driver creates RX fifos and initializes * relevant registers. At the end of init phase, driver enables interrupts. * NIC sees that there is no RXF buffers and raises * RD_INTR interrupt, isr fills skbs and Rx begins. * Driver has two receive operation modes: * NAPI - interrupt-driven mixed with polling * interrupt-driven only * * Interrupt-driven only flow is following. When buffer is ready, HW raises * interrupt and isr is called. isr collects all available packets * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit. * Rx buffer allocation note * ~~~~~~~~~~~~~~~~~~~~~~~~~ * Driver cares to feed such amount of RxF descriptors that respective amount of * RxD descriptors can not fill entire RxD fifo. The main reason is lack of * overflow check in Bordeaux for RxD fifo free/used size. * FIXME: this is NOT fully implemented, more work should be done * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "tehuti.h" static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = { { PCI_VDEVICE(TEHUTI, 0x3009), }, { PCI_VDEVICE(TEHUTI, 0x3010), }, { PCI_VDEVICE(TEHUTI, 0x3014), }, { 0 } }; MODULE_DEVICE_TABLE(pci, bdx_pci_tbl); /* Definitions needed by ISR or NAPI functions */ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f); static void bdx_tx_cleanup(struct bdx_priv *priv); static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget); /* Definitions needed by FW loading */ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size); /* Definitions needed by hw_start */ static int bdx_tx_init(struct bdx_priv *priv); static int bdx_rx_init(struct bdx_priv *priv); /* Definitions needed by bdx_close */ static void bdx_rx_free(struct bdx_priv *priv); static void bdx_tx_free(struct bdx_priv *priv); /* Definitions needed by bdx_probe */ static void bdx_set_ethtool_ops(struct net_device *netdev); /************************************************************************* * Print Info * *************************************************************************/ static void print_hw_id(struct pci_dev *pdev) { struct pci_nic *nic = pci_get_drvdata(pdev); u16 pci_link_status = 0; u16 pci_ctrl = 0; pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status); pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl); pr_info("%s%s\n", BDX_NIC_NAME, nic->port_num == 1 ? "" : ", 2-Port"); pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n", readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF, readl(nic->regs + FPGA_SEED), GET_LINK_STATUS_LANES(pci_link_status), GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl)); } static void print_fw_id(struct pci_nic *nic) { pr_info("fw 0x%x\n", readl(nic->regs + FW_VER)); } static void print_eth_id(struct net_device *ndev) { netdev_info(ndev, "%s, Port %c\n", BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B'); } /************************************************************************* * Code * *************************************************************************/ #define bdx_enable_interrupts(priv) \ do { WRITE_REG(priv, regIMR, IR_RUN); } while (0) #define bdx_disable_interrupts(priv) \ do { WRITE_REG(priv, regIMR, 0); } while (0) /* bdx_fifo_init * create TX/RX descriptor fifo for host-NIC communication. * 1K extra space is allocated at the end of the fifo to simplify * processing of descriptors that wraps around fifo's end * @priv - NIC private structure * @f - fifo to initialize * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB * @reg_XXX - offsets of registers relative to base address * * Returns 0 on success, negative value on failure * */ static int bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type, u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR) { u16 memsz = FIFO_SIZE * (1 << fsz_type); memset(f, 0, sizeof(struct fifo)); /* pci_alloc_consistent gives us 4k-aligned memory */ f->va = pci_alloc_consistent(priv->pdev, memsz + FIFO_EXTRA_SPACE, &f->da); if (!f->va) { pr_err("pci_alloc_consistent failed\n"); RET(-ENOMEM); } f->reg_CFG0 = reg_CFG0; f->reg_CFG1 = reg_CFG1; f->reg_RPTR = reg_RPTR; f->reg_WPTR = reg_WPTR; f->rptr = 0; f->wptr = 0; f->memsz = memsz; f->size_mask = memsz - 1; WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type)); WRITE_REG(priv, reg_CFG1, H32_64(f->da)); RET(0); } /* bdx_fifo_free - free all resources used by fifo * @priv - NIC private structure * @f - fifo to release */ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) { ENTER; if (f->va) { pci_free_consistent(priv->pdev, f->memsz + FIFO_EXTRA_SPACE, f->va, f->da); f->va = NULL; } RET(); } /* * bdx_link_changed - notifies OS about hw link state. * @bdx_priv - hw adapter structure */ static void bdx_link_changed(struct bdx_priv *priv) { u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT; if (!link) { if (netif_carrier_ok(priv->ndev)) { netif_stop_queue(priv->ndev); netif_carrier_off(priv->ndev); netdev_err(priv->ndev, "Link Down\n"); } } else { if (!netif_carrier_ok(priv->ndev)) { netif_wake_queue(priv->ndev); netif_carrier_on(priv->ndev); netdev_err(priv->ndev, "Link Up\n"); } } } static void bdx_isr_extra(struct bdx_priv *priv, u32 isr) { if (isr & IR_RX_FREE_0) { bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); DBG("RX_FREE_0\n"); } if (isr & IR_LNKCHG0) bdx_link_changed(priv); if (isr & IR_PCIE_LINK) netdev_err(priv->ndev, "PCI-E Link Fault\n"); if (isr & IR_PCIE_TOUT) netdev_err(priv->ndev, "PCI-E Time Out\n"); } /* bdx_isr - Interrupt Service Routine for Bordeaux NIC * @irq - interrupt number * @ndev - network device * @regs - CPU registers * * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise * * It reads ISR register to know interrupt reasons, and proceed them one by one. * Reasons of interest are: * RX_DESC - new packet has arrived and RXD fifo holds its descriptor * RX_FREE - number of free Rx buffers in RXF fifo gets low * TX_FREE - packet was transmited and RXF fifo holds its descriptor */ static irqreturn_t bdx_isr_napi(int irq, void *dev) { struct net_device *ndev = dev; struct bdx_priv *priv = netdev_priv(ndev); u32 isr; ENTER; isr = (READ_REG(priv, regISR) & IR_RUN); if (unlikely(!isr)) { bdx_enable_interrupts(priv); return IRQ_NONE; /* Not our interrupt */ } if (isr & IR_EXTRA) bdx_isr_extra(priv, isr); if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { if (likely(napi_schedule_prep(&priv->napi))) { __napi_schedule(&priv->napi); RET(IRQ_HANDLED); } else { /* NOTE: we get here if intr has slipped into window * between these lines in bdx_poll: * bdx_enable_interrupts(priv); * return 0; * currently intrs are disabled (since we read ISR), * and we have failed to register next poll. * so we read the regs to trigger chip * and allow further interupts. */ READ_REG(priv, regTXF_WPTR_0); READ_REG(priv, regRXD_WPTR_0); } } bdx_enable_interrupts(priv); RET(IRQ_HANDLED); } static int bdx_poll(struct napi_struct *napi, int budget) { struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); int work_done; ENTER; bdx_tx_cleanup(priv); work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget); if ((work_done < budget) || (priv->napi_stop++ >= 30)) { DBG("rx poll is done. backing to isr-driven\n"); /* from time to time we exit to let NAPI layer release * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; napi_complete(napi); bdx_enable_interrupts(priv); } return work_done; } /* bdx_fw_load - loads firmware to NIC * @priv - NIC private structure * Firmware is loaded via TXD fifo, so it must be initialized first. * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC * can have few of them). So all drivers use semaphore register to choose one * that will actually load FW to NIC. */ static int bdx_fw_load(struct bdx_priv *priv) { const struct firmware *fw = NULL; int master, i; int rc; ENTER; master = READ_REG(priv, regINIT_SEMAPHORE); if (!READ_REG(priv, regINIT_STATUS) && master) { rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev); if (rc) goto out; bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size); mdelay(100); } for (i = 0; i < 200; i++) { if (READ_REG(priv, regINIT_STATUS)) { rc = 0; goto out; } mdelay(2); } rc = -EIO; out: if (master) WRITE_REG(priv, regINIT_SEMAPHORE, 1); if (fw) release_firmware(fw); if (rc) { netdev_err(priv->ndev, "firmware loading failed\n"); if (rc == -EIO) DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n", READ_REG(priv, regVPC), READ_REG(priv, regVIC), READ_REG(priv, regINIT_STATUS), i); RET(rc); } else { DBG("%s: firmware loading success\n", priv->ndev->name); RET(0); } } static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv) { u32 val; ENTER; DBG("mac0=%x mac1=%x mac2=%x\n", READ_REG(priv, regUNC_MAC0_A), READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); WRITE_REG(priv, regUNC_MAC2_A, val); val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); WRITE_REG(priv, regUNC_MAC1_A, val); val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); WRITE_REG(priv, regUNC_MAC0_A, val); DBG("mac0=%x mac1=%x mac2=%x\n", READ_REG(priv, regUNC_MAC0_A), READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); RET(); } /* bdx_hw_start - inits registers and starts HW's Rx and Tx engines * @priv - NIC private structure */ static int bdx_hw_start(struct bdx_priv *priv) { int rc = -EIO; struct net_device *ndev = priv->ndev; ENTER; bdx_link_changed(priv); /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */ WRITE_REG(priv, regFRM_LENGTH, 0X3FE0); WRITE_REG(priv, regPAUSE_QUANT, 0x96); WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010); WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010); WRITE_REG(priv, regRX_FULLNESS, 0); WRITE_REG(priv, regTX_FULLNESS, 0); WRITE_REG(priv, regCTRLST, regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA); WRITE_REG(priv, regVGLB, 0); WRITE_REG(priv, regMAX_FRAME_A, priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL); DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */ WRITE_REG(priv, regRDINTCM0, priv->rdintcm); WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */ DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */ WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */ /* Enable timer interrupt once in 2 secs. */ /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */ bdx_restore_mac(priv->ndev, priv); WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN | GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); #define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED) rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE, ndev->name, ndev); if (rc) goto err_irq; bdx_enable_interrupts(priv); RET(0); err_irq: RET(rc); } static void bdx_hw_stop(struct bdx_priv *priv) { ENTER; bdx_disable_interrupts(priv); free_irq(priv->pdev->irq, priv->ndev); netif_carrier_off(priv->ndev); netif_stop_queue(priv->ndev); RET(); } static int bdx_hw_reset_direct(void __iomem *regs) { u32 val, i; ENTER; /* reset sequences: read, write 1, read, write 0 */ val = readl(regs + regCLKPLL); writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL); udelay(50); val = readl(regs + regCLKPLL); writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL); /* check that the PLLs are locked and reset ended */ for (i = 0; i < 70; i++, mdelay(10)) if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { /* do any PCI-E read transaction */ readl(regs + regRXD_CFG0_0); return 0; } pr_err("HW reset failed\n"); return 1; /* failure */ } static int bdx_hw_reset(struct bdx_priv *priv) { u32 val, i; ENTER; if (priv->port == 0) { /* reset sequences: read, write 1, read, write 0 */ val = READ_REG(priv, regCLKPLL); WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8); udelay(50); val = READ_REG(priv, regCLKPLL); WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST); } /* check that the PLLs are locked and reset ended */ for (i = 0; i < 70; i++, mdelay(10)) if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { /* do any PCI-E read transaction */ READ_REG(priv, regRXD_CFG0_0); return 0; } pr_err("HW reset failed\n"); return 1; /* failure */ } static int bdx_sw_reset(struct bdx_priv *priv) { int i; ENTER; /* 1. load MAC (obsolete) */ /* 2. disable Rx (and Tx) */ WRITE_REG(priv, regGMAC_RXF_A, 0); mdelay(100); /* 3. disable port */ WRITE_REG(priv, regDIS_PORT, 1); /* 4. disable queue */ WRITE_REG(priv, regDIS_QU, 1); /* 5. wait until hw is disabled */ for (i = 0; i < 50; i++) { if (READ_REG(priv, regRST_PORT) & 1) break; mdelay(10); } if (i == 50) netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n"); /* 6. disable intrs */ WRITE_REG(priv, regRDINTCM0, 0); WRITE_REG(priv, regTDINTCM0, 0); WRITE_REG(priv, regIMR, 0); READ_REG(priv, regISR); /* 7. reset queue */ WRITE_REG(priv, regRST_QU, 1); /* 8. reset port */ WRITE_REG(priv, regRST_PORT, 1); /* 9. zero all read and write pointers */ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) WRITE_REG(priv, i, 0); /* 10. unseet port disable */ WRITE_REG(priv, regDIS_PORT, 0); /* 11. unset queue disable */ WRITE_REG(priv, regDIS_QU, 0); /* 12. unset queue reset */ WRITE_REG(priv, regRST_QU, 0); /* 13. unset port reset */ WRITE_REG(priv, regRST_PORT, 0); /* 14. enable Rx */ /* skiped. will be done later */ /* 15. save MAC (obsolete) */ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); RET(0); } /* bdx_reset - performs right type of reset depending on hw type */ static int bdx_reset(struct bdx_priv *priv) { ENTER; RET((priv->pdev->device == 0x3009) ? bdx_hw_reset(priv) : bdx_sw_reset(priv)); } /** * bdx_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int bdx_close(struct net_device *ndev) { struct bdx_priv *priv = NULL; ENTER; priv = netdev_priv(ndev); napi_disable(&priv->napi); bdx_reset(priv); bdx_hw_stop(priv); bdx_rx_free(priv); bdx_tx_free(priv); RET(0); } /** * bdx_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int bdx_open(struct net_device *ndev) { struct bdx_priv *priv; int rc; ENTER; priv = netdev_priv(ndev); bdx_reset(priv); if (netif_running(ndev)) netif_stop_queue(priv->ndev); if ((rc = bdx_tx_init(priv)) || (rc = bdx_rx_init(priv)) || (rc = bdx_fw_load(priv))) goto err; bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); rc = bdx_hw_start(priv); if (rc) goto err; napi_enable(&priv->napi); print_fw_id(priv->nic); RET(0); err: bdx_close(ndev); RET(rc); } static int bdx_range_check(struct bdx_priv *priv, u32 offset) { return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ? -EINVAL : 0; } static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) { struct bdx_priv *priv = netdev_priv(ndev); u32 data[3]; int error; ENTER; DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); if (cmd != SIOCDEVPRIVATE) { error = copy_from_user(data, ifr->ifr_data, sizeof(data)); if (error) { pr_err("can't copy from user\n"); RET(-EFAULT); } DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); } if (!capable(CAP_SYS_RAWIO)) return -EPERM; switch (data[0]) { case BDX_OP_READ: error = bdx_range_check(priv, data[1]); if (error < 0) return error; data[2] = READ_REG(priv, data[1]); DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], data[2]); error = copy_to_user(ifr->ifr_data, data, sizeof(data)); if (error) RET(-EFAULT); break; case BDX_OP_WRITE: error = bdx_range_check(priv, data[1]); if (error < 0) return error; WRITE_REG(priv, data[1], data[2]); DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); break; default: RET(-EOPNOTSUPP); } return 0; } static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { ENTER; if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) RET(bdx_ioctl_priv(ndev, ifr, cmd)); else RET(-EOPNOTSUPP); } /* * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid * by passing VLAN filter table to hardware * @ndev network device * @vid VLAN vid * @op add or kill operation */ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) { struct bdx_priv *priv = netdev_priv(ndev); u32 reg, bit, val; ENTER; DBG2("vid=%d value=%d\n", (int)vid, enable); if (unlikely(vid >= 4096)) { pr_err("invalid VID: %u (> 4096)\n", vid); RET(); } reg = regVLAN_0 + (vid / 32) * 4; bit = 1 << vid % 32; val = READ_REG(priv, reg); DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit); if (enable) val |= bit; else val &= ~bit; DBG2("new val %x\n", val); WRITE_REG(priv, reg, val); RET(); } /* * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table * @ndev network device * @vid VLAN vid to add */ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) { __bdx_vlan_rx_vid(ndev, vid, 1); return 0; } /* * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table * @ndev network device * @vid VLAN vid to kill */ static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) { __bdx_vlan_rx_vid(ndev, vid, 0); return 0; } /** * bdx_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int bdx_change_mtu(struct net_device *ndev, int new_mtu) { ENTER; if (new_mtu == ndev->mtu) RET(0); /* enforce minimum frame size */ if (new_mtu < ETH_ZLEN) { netdev_err(ndev, "mtu %d is less then minimal %d\n", new_mtu, ETH_ZLEN); RET(-EINVAL); } ndev->mtu = new_mtu; if (netif_running(ndev)) { bdx_close(ndev); bdx_open(ndev); } RET(0); } static void bdx_setmulti(struct net_device *ndev) { struct bdx_priv *priv = netdev_priv(ndev); u32 rxf_val = GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN; int i; ENTER; /* IMF - imperfect (hash) rx multicat filter */ /* PMF - perfect rx multicat filter */ /* FIXME: RXE(OFF) */ if (ndev->flags & IFF_PROMISC) { rxf_val |= GMAC_RX_FILTER_PRM; } else if (ndev->flags & IFF_ALLMULTI) { /* set IMF to accept all multicast frmaes */ for (i = 0; i < MAC_MCST_HASH_NUM; i++) WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); } else if (!netdev_mc_empty(ndev)) { u8 hash; struct netdev_hw_addr *ha; u32 reg, val; /* set IMF to deny all multicast frames */ for (i = 0; i < MAC_MCST_HASH_NUM; i++) WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0); /* set PMF to deny all multicast frames */ for (i = 0; i < MAC_MCST_NUM; i++) { WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0); WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0); } /* use PMF to accept first MAC_MCST_NUM (15) addresses */ /* TBD: sort addresses and write them in ascending order * into RX_MAC_MCST regs. we skip this phase now and accept ALL * multicast frames throu IMF */ /* accept the rest of addresses throu IMF */ netdev_for_each_mc_addr(ha, ndev) { hash = 0; for (i = 0; i < ETH_ALEN; i++) hash ^= ha->addr[i]; reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); val = READ_REG(priv, reg); val |= (1 << (hash % 32)); WRITE_REG(priv, reg, val); } } else { DBG("only own mac %d\n", netdev_mc_count(ndev)); rxf_val |= GMAC_RX_FILTER_AB; } WRITE_REG(priv, regGMAC_RXF_A, rxf_val); /* enable RX */ /* FIXME: RXE(ON) */ RET(); } static int bdx_set_mac(struct net_device *ndev, void *p) { struct bdx_priv *priv = netdev_priv(ndev); struct sockaddr *addr = p; ENTER; /* if (netif_running(dev)) return -EBUSY */ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); bdx_restore_mac(ndev, priv); RET(0); } static int bdx_read_mac(struct bdx_priv *priv) { u16 macAddress[3], i; ENTER; macAddress[2] = READ_REG(priv, regUNC_MAC0_A); macAddress[2] = READ_REG(priv, regUNC_MAC0_A); macAddress[1] = READ_REG(priv, regUNC_MAC1_A); macAddress[1] = READ_REG(priv, regUNC_MAC1_A); macAddress[0] = READ_REG(priv, regUNC_MAC2_A); macAddress[0] = READ_REG(priv, regUNC_MAC2_A); for (i = 0; i < 3; i++) { priv->ndev->dev_addr[i * 2 + 1] = macAddress[i]; priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8; } RET(0); } static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg) { u64 val; val = READ_REG(priv, reg); val |= ((u64) READ_REG(priv, reg + 8)) << 32; return val; } /*Do the statistics-update work*/ static void bdx_update_stats(struct bdx_priv *priv) { struct bdx_stats *stats = &priv->hw_stats; u64 *stats_vector = (u64 *) stats; int i; int addr; /*Fill HW structure */ addr = 0x7200; /*First 12 statistics - 0x7200 - 0x72B0 */ for (i = 0; i < 12; i++) { stats_vector[i] = bdx_read_l2stat(priv, addr); addr += 0x10; } BDX_ASSERT(addr != 0x72C0); /* 0x72C0-0x72E0 RSRV */ addr = 0x72F0; for (; i < 16; i++) { stats_vector[i] = bdx_read_l2stat(priv, addr); addr += 0x10; } BDX_ASSERT(addr != 0x7330); /* 0x7330-0x7360 RSRV */ addr = 0x7370; for (; i < 19; i++) { stats_vector[i] = bdx_read_l2stat(priv, addr); addr += 0x10; } BDX_ASSERT(addr != 0x73A0); /* 0x73A0-0x73B0 RSRV */ addr = 0x73C0; for (; i < 23; i++) { stats_vector[i] = bdx_read_l2stat(priv, addr); addr += 0x10; } BDX_ASSERT(addr != 0x7400); BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); } static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, u16 rxd_vlan); static void print_rxfd(struct rxf_desc *rxfd); /************************************************************************* * Rx DB * *************************************************************************/ static void bdx_rxdb_destroy(struct rxdb *db) { vfree(db); } static struct rxdb *bdx_rxdb_create(int nelem) { struct rxdb *db; int i; db = vmalloc(sizeof(struct rxdb) + (nelem * sizeof(int)) + (nelem * sizeof(struct rx_map))); if (likely(db != NULL)) { db->stack = (int *)(db + 1); db->elems = (void *)(db->stack + nelem); db->nelem = nelem; db->top = nelem; for (i = 0; i < nelem; i++) db->stack[i] = nelem - i - 1; /* to make first allocs close to db struct*/ } return db; } static inline int bdx_rxdb_alloc_elem(struct rxdb *db) { BDX_ASSERT(db->top <= 0); return db->stack[--(db->top)]; } static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n) { BDX_ASSERT((n < 0) || (n >= db->nelem)); return db->elems + n; } static inline int bdx_rxdb_available(struct rxdb *db) { return db->top; } static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) { BDX_ASSERT((n >= db->nelem) || (n < 0)); db->stack[(db->top)++] = n; } /************************************************************************* * Rx Init * *************************************************************************/ /* bdx_rx_init - initialize RX all related HW and SW resources * @priv - NIC private structure * * Returns 0 on success, negative value on failure * * It creates rxf and rxd fifos, update relevant HW registers, preallocate * skb for rx. It assumes that Rx is desabled in HW * funcs are grouped for better cache usage * * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be * filled and packets will be dropped by nic without getting into host or * cousing interrupt. Anyway, in that condition, host has no chance to process * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles */ /* TBD: ensure proper packet size */ static int bdx_rx_init(struct bdx_priv *priv) { ENTER; if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size, regRXD_CFG0_0, regRXD_CFG1_0, regRXD_RPTR_0, regRXD_WPTR_0)) goto err_mem; if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size, regRXF_CFG0_0, regRXF_CFG1_0, regRXF_RPTR_0, regRXF_WPTR_0)) goto err_mem; priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz / sizeof(struct rxf_desc)); if (!priv->rxdb) goto err_mem; priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN; return 0; err_mem: netdev_err(priv->ndev, "Rx init failed\n"); return -ENOMEM; } /* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo * @priv - NIC private structure * @f - RXF fifo */ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) { struct rx_map *dm; struct rxdb *db = priv->rxdb; u16 i; ENTER; DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db), db->nelem - bdx_rxdb_available(db)); while (bdx_rxdb_available(db) > 0) { i = bdx_rxdb_alloc_elem(db); dm = bdx_rxdb_addr_elem(db, i); dm->dma = 0; } for (i = 0; i < db->nelem; i++) { dm = bdx_rxdb_addr_elem(db, i); if (dm->dma) { pci_unmap_single(priv->pdev, dm->dma, f->m.pktsz, PCI_DMA_FROMDEVICE); dev_kfree_skb(dm->skb); } } } /* bdx_rx_free - release all Rx resources * @priv - NIC private structure * It assumes that Rx is desabled in HW */ static void bdx_rx_free(struct bdx_priv *priv) { ENTER; if (priv->rxdb) { bdx_rx_free_skbs(priv, &priv->rxf_fifo0); bdx_rxdb_destroy(priv->rxdb); priv->rxdb = NULL; } bdx_fifo_free(priv, &priv->rxf_fifo0.m); bdx_fifo_free(priv, &priv->rxd_fifo0.m); RET(); } /************************************************************************* * Rx Engine * *************************************************************************/ /* bdx_rx_alloc_skbs - fill rxf fifo with new skbs * @priv - nic's private structure * @f - RXF fifo that needs skbs * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. * skb's virtual and physical addresses are stored in skb db. * To calculate free space, func uses cached values of RPTR and WPTR * When needed, it also updates RPTR and WPTR. */ /* TBD: do not update WPTR if no desc were written */ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) { struct sk_buff *skb; struct rxf_desc *rxfd; struct rx_map *dm; int dno, delta, idx; struct rxdb *db = priv->rxdb; ENTER; dno = bdx_rxdb_available(db) - 1; while (dno > 0) { skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); if (!skb) { pr_err("NO MEM: netdev_alloc_skb failed\n"); break; } skb_reserve(skb, NET_IP_ALIGN); idx = bdx_rxdb_alloc_elem(db); dm = bdx_rxdb_addr_elem(db, idx); dm->dma = pci_map_single(priv->pdev, skb->data, f->m.pktsz, PCI_DMA_FROMDEVICE); dm->skb = skb; rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ rxfd->va_lo = idx; rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); print_rxfd(rxfd); f->m.wptr += sizeof(struct rxf_desc); delta = f->m.wptr - f->m.memsz; if (unlikely(delta >= 0)) { f->m.wptr = delta; if (delta > 0) { memcpy(f->m.va, f->m.va + f->m.memsz, delta); DBG("wrapped descriptor\n"); } } dno--; } /*TBD: to do - delayed rxf wptr like in txd */ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); RET(); } static inline void NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan, struct sk_buff *skb) { ENTER; DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1)); if (GET_RXD_VTAG(rxd_val1)) { DBG("%s: vlan rcv vlan '%x' vtag '%x'\n", priv->ndev->name, GET_RXD_VLAN_ID(rxd_vlan), GET_RXD_VTAG(rxd_val1)); __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan)); } netif_receive_skb(skb); } static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) { struct rxf_desc *rxfd; struct rx_map *dm; struct rxf_fifo *f; struct rxdb *db; struct sk_buff *skb; int delta; ENTER; DBG("priv=%p rxdd=%p\n", priv, rxdd); f = &priv->rxf_fifo0; db = priv->rxdb; DBG("db=%p f=%p\n", db, f); dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); DBG("dm=%p\n", dm); skb = dm->skb; rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ rxfd->va_lo = rxdd->va_lo; rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); print_rxfd(rxfd); f->m.wptr += sizeof(struct rxf_desc); delta = f->m.wptr - f->m.memsz; if (unlikely(delta >= 0)) { f->m.wptr = delta; if (delta > 0) { memcpy(f->m.va, f->m.va + f->m.memsz, delta); DBG("wrapped descriptor\n"); } } RET(); } /* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS * NOTE: a special treatment is given to non-continuous descriptors * that start near the end, wraps around and continue at the beginning. a second * part is copied right after the first, and then descriptor is interpreted as * normal. fifo has an extra space to allow such operations * @priv - nic's private structure * @f - RXF fifo that needs skbs */ /* TBD: replace memcpy func call by explicite inline asm */ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) { struct net_device *ndev = priv->ndev; struct sk_buff *skb, *skb2; struct rxd_desc *rxdd; struct rx_map *dm; struct rxf_fifo *rxf_fifo; int tmp_len, size; int done = 0; int max_done = BDX_MAX_RX_DONE; struct rxdb *db = NULL; /* Unmarshalled descriptor - copy of descriptor in host order */ u32 rxd_val1; u16 len; u16 rxd_vlan; ENTER; max_done = budget; f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR; size = f->m.wptr - f->m.rptr; if (size < 0) size = f->m.memsz + size; /* size is negative :-) */ while (size > 0) { rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr); rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1); len = CPU_CHIP_SWAP16(rxdd->len); rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan); print_rxdd(rxdd, rxd_val1, len, rxd_vlan); tmp_len = GET_RXD_BC(rxd_val1) << 3; BDX_ASSERT(tmp_len <= 0); size -= tmp_len; if (size < 0) /* test for partially arrived descriptor */ break; f->m.rptr += tmp_len; tmp_len = f->m.rptr - f->m.memsz; if (unlikely(tmp_len >= 0)) { f->m.rptr = tmp_len; if (tmp_len > 0) { DBG("wrapped desc rptr=%d tmp_len=%d\n", f->m.rptr, tmp_len); memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); } } if (unlikely(GET_RXD_ERR(rxd_val1))) { DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); ndev->stats.rx_errors++; bdx_recycle_skb(priv, rxdd); continue; } rxf_fifo = &priv->rxf_fifo0; db = priv->rxdb; dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); skb = dm->skb; if (len < BDX_COPYBREAK && (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) { skb_reserve(skb2, NET_IP_ALIGN); /*skb_put(skb2, len); */ pci_dma_sync_single_for_cpu(priv->pdev, dm->dma, rxf_fifo->m.pktsz, PCI_DMA_FROMDEVICE); memcpy(skb2->data, skb->data, len); bdx_recycle_skb(priv, rxdd); skb = skb2; } else { pci_unmap_single(priv->pdev, dm->dma, rxf_fifo->m.pktsz, PCI_DMA_FROMDEVICE); bdx_rxdb_free_elem(db, rxdd->va_lo); } ndev->stats.rx_bytes += len; skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); /* Non-IP packets aren't checksum-offloaded */ if (GET_RXD_PKT_ID(rxd_val1) == 0) skb_checksum_none_assert(skb); else skb->ip_summed = CHECKSUM_UNNECESSARY; NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); if (++done >= max_done) break; } ndev->stats.rx_packets += done; /* FIXME: do smth to minimize pci accesses */ WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); RET(done); } /************************************************************************* * Debug / Temprorary Code * *************************************************************************/ static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, u16 rxd_vlan) { DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n", GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1), GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1), GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1), GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan), GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo, rxdd->va_hi); } static void print_rxfd(struct rxf_desc *rxfd) { DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); } /* * TX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * There are 2 types of TX communication channels between driver and NIC. * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets * 2) TX Data Fifo - TXD - holds descriptors of full buffers. * * Currently NIC supports TSO, checksuming and gather DMA * UFO and IP fragmentation is on the way * * RX SW Data Structures * ~~~~~~~~~~~~~~~~~~~~~ * txdb - used to keep track of all skbs owned by SW and their dma addresses. * For TX case, ownership lasts from geting packet via hard_xmit and until HW * acknowledges sent by TXF descriptors. * Implemented as cyclic buffer. * fifo - keeps info about fifo's size and location, relevant HW registers, * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. * Implemented as simple struct. * * TX SW Execution Flow * ~~~~~~~~~~~~~~~~~~~~ * OS calls driver's hard_xmit method with packet to sent. * Driver creates DMA mappings, builds TXD descriptors and kicks HW * by updating TXD WPTR. * When packet is sent, HW write us TXF descriptor and SW frees original skb. * To prevent TXD fifo overflow without reading HW registers every time, * SW deploys "tx level" technique. * Upon strart up, tx level is initialized to TXD fifo length. * For every sent packet, SW gets its TXD descriptor sizei * (from precalculated array) and substructs it from tx level. * The size is also stored in txdb. When TXF ack arrives, SW fetch size of * original TXD descriptor from txdb and adds it to tx level. * When Tx level drops under some predefined treshhold, the driver * stops the TX queue. When TX level rises above that level, * the tx queue is enabled again. * * This technique avoids eccessive reading of RPTR and WPTR registers. * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput. */ /************************************************************************* * Tx DB * *************************************************************************/ static inline int bdx_tx_db_size(struct txdb *db) { int taken = db->wptr - db->rptr; if (taken < 0) taken = db->size + 1 + taken; /* (size + 1) equals memsz */ return db->size - taken; } /* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap * @d - tx data base * @ptr - read or write pointer */ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) { BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */ BDX_ASSERT(*pptr != db->rptr && /* expect either read */ *pptr != db->wptr); /* or write pointer */ BDX_ASSERT(*pptr < db->start || /* pointer has to be */ *pptr >= db->end); /* in range */ ++*pptr; if (unlikely(*pptr == db->end)) *pptr = db->start; } /* bdx_tx_db_inc_rptr - increment read pointer * @d - tx data base */ static inline void bdx_tx_db_inc_rptr(struct txdb *db) { BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */ __bdx_tx_db_ptr_next(db, &db->rptr); } /* bdx_tx_db_inc_rptr - increment write pointer * @d - tx data base */ static inline void bdx_tx_db_inc_wptr(struct txdb *db) { __bdx_tx_db_ptr_next(db, &db->wptr); BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as a result of write */ } /* bdx_tx_db_init - creates and initializes tx db * @d - tx data base * @sz_type - size of tx fifo * Returns 0 on success, error code otherwise */ static int bdx_tx_db_init(struct txdb *d, int sz_type) { int memsz = FIFO_SIZE * (1 << (sz_type + 1)); d->start = vmalloc(memsz); if (!d->start) return -ENOMEM; /* * In order to differentiate between db is empty and db is full * states at least one element should always be empty in order to * avoid rptr == wptr which means db is empty */ d->size = memsz / sizeof(struct tx_map) - 1; d->end = d->start + d->size + 1; /* just after last element */ /* all dbs are created equally empty */ d->rptr = d->start; d->wptr = d->start; return 0; } /* bdx_tx_db_close - closes tx db and frees all memory * @d - tx data base */ static void bdx_tx_db_close(struct txdb *d) { BDX_ASSERT(d == NULL); vfree(d->start); d->start = NULL; } /************************************************************************* * Tx Engine * *************************************************************************/ /* sizes of tx desc (including padding if needed) as function * of skb's frag number */ static struct { u16 bytes; u16 qwords; /* qword = 64 bit */ } txd_sizes[MAX_SKB_FRAGS + 1]; /* txdb_map_skb - creates and stores dma mappings for skb's data blocks * @priv - NIC private structure * @skb - socket buffer to map * * It makes dma mappings for skb's data blocks and writes them to PBL of * new tx descriptor. It also stores them in the tx db, so they could be * unmaped after data was sent. It is reponsibility of a caller to make * sure that there is enough space in the tx db. Last element holds pointer * to skb itself and marked with zero length */ static inline void bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, struct txd_desc *txdd) { struct txdb *db = &priv->txdb; struct pbl *pbl = &txdd->pbl[0]; int nr_frags = skb_shinfo(skb)->nr_frags; int i; db->wptr->len = skb_headlen(skb); db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, db->wptr->len, PCI_DMA_TODEVICE); pbl->len = CPU_CHIP_SWAP32(db->wptr->len); pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); DBG("=== pbl len: 0x%x ================\n", pbl->len); DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo); DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi); bdx_tx_db_inc_wptr(db); for (i = 0; i < nr_frags; i++) { const struct skb_frag_struct *frag; frag = &skb_shinfo(skb)->frags[i]; db->wptr->len = skb_frag_size(frag); db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); pbl++; pbl->len = CPU_CHIP_SWAP32(db->wptr->len); pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); bdx_tx_db_inc_wptr(db); } /* add skb clean up info. */ db->wptr->len = -txd_sizes[nr_frags].bytes; db->wptr->addr.skb = skb; bdx_tx_db_inc_wptr(db); } /* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags * number of frags is used as index to fetch correct descriptors size, * instead of calculating it each time */ static void __init init_txd_sizes(void) { int i, lwords; /* 7 - is number of lwords in txd with one phys buffer * 3 - is number of lwords used for every additional phys buffer */ for (i = 0; i < MAX_SKB_FRAGS + 1; i++) { lwords = 7 + (i * 3); if (lwords & 1) lwords++; /* pad it with 1 lword */ txd_sizes[i].qwords = lwords >> 1; txd_sizes[i].bytes = lwords << 2; } } /* bdx_tx_init - initialize all Tx related stuff. * Namely, TXD and TXF fifos, database etc */ static int bdx_tx_init(struct bdx_priv *priv) { if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size, regTXD_CFG0_0, regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0)) goto err_mem; if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size, regTXF_CFG0_0, regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0)) goto err_mem; /* The TX db has to keep mappings for all packets sent (on TxD) * and not yet reclaimed (on TxF) */ if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size))) goto err_mem; priv->tx_level = BDX_MAX_TX_LEVEL; #ifdef BDX_DELAY_WPTR priv->tx_update_mark = priv->tx_level - 1024; #endif return 0; err_mem: netdev_err(priv->ndev, "Tx init failed\n"); return -ENOMEM; } /* * bdx_tx_space - calculates available space in TX fifo * @priv - NIC private structure * Returns available space in TX fifo in bytes */ static inline int bdx_tx_space(struct bdx_priv *priv) { struct txd_fifo *f = &priv->txd_fifo0; int fsize; f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR; fsize = f->m.rptr - f->m.wptr; if (fsize <= 0) fsize = f->m.memsz + fsize; return fsize; } /* bdx_tx_transmit - send packet to NIC * @skb - packet to send * ndev - network device assigned to NIC * Return codes: * o NETDEV_TX_OK everything ok. * o NETDEV_TX_BUSY Cannot transmit packet, try later * Usually a bug, means queue start/stop flow control is broken in * the driver. Note: the driver must NOT put the skb in its DMA ring. * o NETDEV_TX_LOCKED Locking failed, please retry quickly. */ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) { struct bdx_priv *priv = netdev_priv(ndev); struct txd_fifo *f = &priv->txd_fifo0; int txd_checksum = 7; /* full checksum */ int txd_lgsnd = 0; int txd_vlan_id = 0; int txd_vtag = 0; int txd_mss = 0; int nr_frags = skb_shinfo(skb)->nr_frags; struct txd_desc *txdd; int len; unsigned long flags; ENTER; local_irq_save(flags); if (!spin_trylock(&priv->tx_lock)) { local_irq_restore(flags); DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", BDX_DRV_NAME, ndev->name); return NETDEV_TX_LOCKED; } /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ txdd = (struct txd_desc *)(f->m.va + f->m.wptr); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) txd_checksum = 0; if (skb_shinfo(skb)->gso_size) { txd_mss = skb_shinfo(skb)->gso_size; txd_lgsnd = 1; DBG("skb %p skb len %d gso size = %d\n", skb, skb->len, txd_mss); } if (vlan_tx_tag_present(skb)) { /*Cut VLAN ID to 12 bits */ txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); txd_vtag = 1; } txdd->length = CPU_CHIP_SWAP16(skb->len); txdd->mss = CPU_CHIP_SWAP16(txd_mss); txdd->txd_val1 = CPU_CHIP_SWAP32(TXD_W1_VAL (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag, txd_lgsnd, txd_vlan_id)); DBG("=== TxD desc =====================\n"); DBG("=== w1: 0x%x ================\n", txdd->txd_val1); DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length); bdx_tx_map_skb(priv, skb, txdd); /* increment TXD write pointer. In case of fifo wrapping copy reminder of the descriptor to the beginning */ f->m.wptr += txd_sizes[nr_frags].bytes; len = f->m.wptr - f->m.memsz; if (unlikely(len >= 0)) { f->m.wptr = len; if (len > 0) { BDX_ASSERT(len > f->m.memsz); memcpy(f->m.va, f->m.va + f->m.memsz, len); } } BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */ priv->tx_level -= txd_sizes[nr_frags].bytes; BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); #ifdef BDX_DELAY_WPTR if (priv->tx_level > priv->tx_update_mark) { /* Force memory writes to complete before letting h/w know there are new descriptors to fetch. (might be needed on platforms like IA64) wmb(); */ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); } else { if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) { priv->tx_noupd = 0; WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); } } #else /* Force memory writes to complete before letting h/w know there are new descriptors to fetch. (might be needed on platforms like IA64) wmb(); */ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); #endif #ifdef BDX_LLTX ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ #endif ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; if (priv->tx_level < BDX_MIN_TX_LEVEL) { DBG("%s: %s: TX Q STOP level %d\n", BDX_DRV_NAME, ndev->name, priv->tx_level); netif_stop_queue(ndev); } spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; } /* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. * @priv - bdx adapter * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS * that those packets were sent */ static void bdx_tx_cleanup(struct bdx_priv *priv) { struct txf_fifo *f = &priv->txf_fifo0; struct txdb *db = &priv->txdb; int tx_level = 0; ENTER; f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK; BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */ while (f->m.wptr != f->m.rptr) { f->m.rptr += BDX_TXF_DESC_SZ; f->m.rptr &= f->m.size_mask; /* unmap all the fragments */ /* first has to come tx_maps containing dma */ BDX_ASSERT(db->rptr->len == 0); do { BDX_ASSERT(db->rptr->addr.dma == 0); pci_unmap_page(priv->pdev, db->rptr->addr.dma, db->rptr->len, PCI_DMA_TODEVICE); bdx_tx_db_inc_rptr(db); } while (db->rptr->len > 0); tx_level -= db->rptr->len; /* '-' koz len is negative */ /* now should come skb pointer - free it */ dev_kfree_skb_irq(db->rptr->addr.skb); bdx_tx_db_inc_rptr(db); } /* let h/w know which TXF descriptors were cleaned */ BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz); WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); /* We reclaimed resources, so in case the Q is stopped by xmit callback, * we resume the transmition and use tx_lock to synchronize with xmit.*/ spin_lock(&priv->tx_lock); priv->tx_level += tx_level; BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); #ifdef BDX_DELAY_WPTR if (priv->tx_noupd) { priv->tx_noupd = 0; WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR, priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR); } #endif if (unlikely(netif_queue_stopped(priv->ndev) && netif_carrier_ok(priv->ndev) && (priv->tx_level >= BDX_MIN_TX_LEVEL))) { DBG("%s: %s: TX Q WAKE level %d\n", BDX_DRV_NAME, priv->ndev->name, priv->tx_level); netif_wake_queue(priv->ndev); } spin_unlock(&priv->tx_lock); } /* bdx_tx_free_skbs - frees all skbs from TXD fifo. * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod */ static void bdx_tx_free_skbs(struct bdx_priv *priv) { struct txdb *db = &priv->txdb; ENTER; while (db->rptr != db->wptr) { if (likely(db->rptr->len)) pci_unmap_page(priv->pdev, db->rptr->addr.dma, db->rptr->len, PCI_DMA_TODEVICE); else dev_kfree_skb(db->rptr->addr.skb); bdx_tx_db_inc_rptr(db); } RET(); } /* bdx_tx_free - frees all Tx resources */ static void bdx_tx_free(struct bdx_priv *priv) { ENTER; bdx_tx_free_skbs(priv); bdx_fifo_free(priv, &priv->txd_fifo0.m); bdx_fifo_free(priv, &priv->txf_fifo0.m); bdx_tx_db_close(&priv->txdb); } /* bdx_tx_push_desc - push descriptor to TxD fifo * @priv - NIC private structure * @data - desc's data * @size - desc's size * * Pushes desc to TxD fifo and overlaps it if needed. * NOTE: this func does not check for available space. this is responsibility * of the caller. Neither does it check that data size is smaller than * fifo size. */ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) { struct txd_fifo *f = &priv->txd_fifo0; int i = f->m.memsz - f->m.wptr; if (size == 0) return; if (i > size) { memcpy(f->m.va + f->m.wptr, data, size); f->m.wptr += size; } else { memcpy(f->m.va + f->m.wptr, data, i); f->m.wptr = size - i; memcpy(f->m.va, data + i, f->m.wptr); } WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); } /* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way * @priv - NIC private structure * @data - desc's data * @size - desc's size * * NOTE: this func does check for available space and, if necessary, waits for * NIC to read existing data before writing new one. */ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) { int timer = 0; ENTER; while (size > 0) { /* we substruct 8 because when fifo is full rptr == wptr which also means that fifo is empty, we can understand the difference, but could hw do the same ??? :) */ int avail = bdx_tx_space(priv) - 8; if (avail <= 0) { if (timer++ > 300) { /* prevent endless loop */ DBG("timeout while writing desc to TxD fifo\n"); break; } udelay(50); /* give hw a chance to clean fifo */ continue; } avail = min(avail, size); DBG("about to push %d bytes starting %p size %d\n", avail, data, size); bdx_tx_push_desc(priv, data, avail); size -= avail; data += avail; } RET(); } static const struct net_device_ops bdx_netdev_ops = { .ndo_open = bdx_open, .ndo_stop = bdx_close, .ndo_start_xmit = bdx_tx_transmit, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = bdx_ioctl, .ndo_set_rx_mode = bdx_setmulti, .ndo_change_mtu = bdx_change_mtu, .ndo_set_mac_address = bdx_set_mac, .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid, }; /** * bdx_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in bdx_pci_tbl * * Returns 0 on success, negative on failure * * bdx_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. * * functions and their order used as explained in * /usr/src/linux/Documentation/DMA-{API,mapping}.txt * */ /* TBD: netif_msg should be checked and implemented. I disable it for now */ static int __devinit bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *ndev; struct bdx_priv *priv; int err, pci_using_dac, port; unsigned long pciaddr; u32 regionSize; struct pci_nic *nic; ENTER; nic = vmalloc(sizeof(*nic)); if (!nic) RET(-ENOMEM); /************** pci *****************/ err = pci_enable_device(pdev); if (err) /* it triggers interrupt, dunno why. */ goto err_pci; /* it's not a problem though */ if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { pci_using_dac = 1; } else { if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { pr_err("No usable DMA configuration, aborting\n"); goto err_dma; } pci_using_dac = 0; } err = pci_request_regions(pdev, BDX_DRV_NAME); if (err) goto err_dma; pci_set_master(pdev); pciaddr = pci_resource_start(pdev, 0); if (!pciaddr) { err = -EIO; pr_err("no MMIO resource\n"); goto err_out_res; } regionSize = pci_resource_len(pdev, 0); if (regionSize < BDX_REGS_SIZE) { err = -EIO; pr_err("MMIO resource (%x) too small\n", regionSize); goto err_out_res; } nic->regs = ioremap(pciaddr, regionSize); if (!nic->regs) { err = -EIO; pr_err("ioremap failed\n"); goto err_out_res; } if (pdev->irq < 2) { err = -EIO; pr_err("invalid irq (%d)\n", pdev->irq); goto err_out_iomap; } pci_set_drvdata(pdev, nic); if (pdev->device == 0x3014) nic->port_num = 2; else nic->port_num = 1; print_hw_id(pdev); bdx_hw_reset_direct(nic->regs); nic->irq_type = IRQ_INTX; #ifdef BDX_MSI if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { err = pci_enable_msi(pdev); if (err) pr_err("Can't eneble msi. error is %d\n", err); else nic->irq_type = IRQ_MSI; } else DBG("HW does not support MSI\n"); #endif /************** netdev **************/ for (port = 0; port < nic->port_num; port++) { ndev = alloc_etherdev(sizeof(struct bdx_priv)); if (!ndev) { err = -ENOMEM; goto err_out_iomap; } ndev->netdev_ops = &bdx_netdev_ops; ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; bdx_set_ethtool_ops(ndev); /* ethtool interface */ /* these fields are used for info purposes only * so we can have them same for all ports of the board */ ndev->if_port = port; ndev->base_addr = pciaddr; ndev->mem_start = pciaddr; ndev->mem_end = pciaddr + regionSize; ndev->irq = pdev->irq; ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM ; ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_TX; if (pci_using_dac) ndev->features |= NETIF_F_HIGHDMA; /************** priv ****************/ priv = nic->priv[port] = netdev_priv(ndev); priv->pBdxRegs = nic->regs + port * 0x8000; priv->port = port; priv->pdev = pdev; priv->ndev = ndev; priv->nic = nic; priv->msg_enable = BDX_DEF_MSG_ENABLE; netif_napi_add(ndev, &priv->napi, bdx_poll, 64); if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) { DBG("HW statistics not supported\n"); priv->stats_flag = 0; } else { priv->stats_flag = 1; } /* Initialize fifo sizes. */ priv->txd_size = 2; priv->txf_size = 2; priv->rxd_size = 2; priv->rxf_size = 3; /* Initialize the initial coalescing registers. */ priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12); priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12); /* ndev->xmit_lock spinlock is not used. * Private priv->tx_lock is used for synchronization * between transmit and TX irq cleanup. In addition * set multicast list callback has to use priv->tx_lock. */ #ifdef BDX_LLTX ndev->features |= NETIF_F_LLTX; #endif spin_lock_init(&priv->tx_lock); /*bdx_hw_reset(priv); */ if (bdx_read_mac(priv)) { pr_err("load MAC address failed\n"); goto err_out_iomap; } SET_NETDEV_DEV(ndev, &pdev->dev); err = register_netdev(ndev); if (err) { pr_err("register_netdev failed\n"); goto err_out_free; } netif_carrier_off(ndev); netif_stop_queue(ndev); print_eth_id(ndev); } RET(0); err_out_free: free_netdev(ndev); err_out_iomap: iounmap(nic->regs); err_out_res: pci_release_regions(pdev); err_dma: pci_disable_device(pdev); err_pci: vfree(nic); RET(err); } /****************** Ethtool interface *********************/ /* get strings for statistics counters */ static const char bdx_stat_names[][ETH_GSTRING_LEN] = { "InUCast", /* 0x7200 */ "InMCast", /* 0x7210 */ "InBCast", /* 0x7220 */ "InPkts", /* 0x7230 */ "InErrors", /* 0x7240 */ "InDropped", /* 0x7250 */ "FrameTooLong", /* 0x7260 */ "FrameSequenceErrors", /* 0x7270 */ "InVLAN", /* 0x7280 */ "InDroppedDFE", /* 0x7290 */ "InDroppedIntFull", /* 0x72A0 */ "InFrameAlignErrors", /* 0x72B0 */ /* 0x72C0-0x72E0 RSRV */ "OutUCast", /* 0x72F0 */ "OutMCast", /* 0x7300 */ "OutBCast", /* 0x7310 */ "OutPkts", /* 0x7320 */ /* 0x7330-0x7360 RSRV */ "OutVLAN", /* 0x7370 */ "InUCastOctects", /* 0x7380 */ "OutUCastOctects", /* 0x7390 */ /* 0x73A0-0x73B0 RSRV */ "InBCastOctects", /* 0x73C0 */ "OutBCastOctects", /* 0x73D0 */ "InOctects", /* 0x73E0 */ "OutOctects", /* 0x73F0 */ }; /* * bdx_get_settings - get device-specific settings * @netdev * @ecmd */ static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { u32 rdintcm; u32 tdintcm; struct bdx_priv *priv = netdev_priv(netdev); rdintcm = priv->rdintcm; tdintcm = priv->tdintcm; ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); ethtool_cmd_speed_set(ecmd, SPEED_10000); ecmd->duplex = DUPLEX_FULL; ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ ecmd->autoneg = AUTONEG_DISABLE; /* PCK_TH measures in multiples of FIFO bytes We translate to packets */ ecmd->maxtxpkt = ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); ecmd->maxrxpkt = ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); return 0; } /* * bdx_get_drvinfo - report driver information * @netdev * @drvinfo */ static void bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct bdx_priv *priv = netdev_priv(netdev); strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcat(drvinfo->bus_info, pci_name(priv->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } /* * bdx_get_coalesce - get interrupt coalescing parameters * @netdev * @ecoal */ static int bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) { u32 rdintcm; u32 tdintcm; struct bdx_priv *priv = netdev_priv(netdev); rdintcm = priv->rdintcm; tdintcm = priv->tdintcm; /* PCK_TH measures in multiples of FIFO bytes We translate to packets */ ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT; ecoal->rx_max_coalesced_frames = ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT; ecoal->tx_max_coalesced_frames = ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); /* adaptive parameters ignored */ return 0; } /* * bdx_set_coalesce - set interrupt coalescing parameters * @netdev * @ecoal */ static int bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) { u32 rdintcm; u32 tdintcm; struct bdx_priv *priv = netdev_priv(netdev); int rx_coal; int tx_coal; int rx_max_coal; int tx_max_coal; /* Check for valid input */ rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT; tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT; rx_max_coal = ecoal->rx_max_coalesced_frames; tx_max_coal = ecoal->tx_max_coalesced_frames; /* Translate from packets to multiples of FIFO bytes */ rx_max_coal = (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1) / PCK_TH_MULT); tx_max_coal = (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1) / PCK_TH_MULT); if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) || (rx_max_coal > 0xF) || (tx_max_coal > 0xF)) return -EINVAL; rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm), GET_RXF_TH(priv->rdintcm), rx_max_coal); tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0, tx_max_coal); priv->rdintcm = rdintcm; priv->tdintcm = tdintcm; WRITE_REG(priv, regRDINTCM0, rdintcm); WRITE_REG(priv, regTDINTCM0, tdintcm); return 0; } /* Convert RX fifo size to number of pending packets */ static inline int bdx_rx_fifo_size_to_packets(int rx_size) { return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc); } /* Convert TX fifo size to number of pending packets */ static inline int bdx_tx_fifo_size_to_packets(int tx_size) { return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ; } /* * bdx_get_ringparam - report ring sizes * @netdev * @ring */ static void bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct bdx_priv *priv = netdev_priv(netdev); /*max_pending - the maximum-sized FIFO we allow */ ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3); ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3); ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size); ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size); } /* * bdx_set_ringparam - set ring sizes * @netdev * @ring */ static int bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct bdx_priv *priv = netdev_priv(netdev); int rx_size = 0; int tx_size = 0; for (; rx_size < 4; rx_size++) { if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending) break; } if (rx_size == 4) rx_size = 3; for (; tx_size < 4; tx_size++) { if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending) break; } if (tx_size == 4) tx_size = 3; /*Is there anything to do? */ if ((rx_size == priv->rxf_size) && (tx_size == priv->txd_size)) return 0; priv->rxf_size = rx_size; if (rx_size > 1) priv->rxd_size = rx_size - 1; else priv->rxd_size = rx_size; priv->txf_size = priv->txd_size = tx_size; if (netif_running(netdev)) { bdx_close(netdev); bdx_open(netdev); } return 0; } /* * bdx_get_strings - return a set of strings that describe the requested objects * @netdev * @data */ static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); break; } } /* * bdx_get_sset_count - return number of statistics or tests * @netdev */ static int bdx_get_sset_count(struct net_device *netdev, int stringset) { struct bdx_priv *priv = netdev_priv(netdev); switch (stringset) { case ETH_SS_STATS: BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) != sizeof(struct bdx_stats) / sizeof(u64)); return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0; } return -EINVAL; } /* * bdx_get_ethtool_stats - return device's hardware L2 statistics * @netdev * @stats * @data */ static void bdx_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct bdx_priv *priv = netdev_priv(netdev); if (priv->stats_flag) { /* Update stats from HW */ bdx_update_stats(priv); /* Copy data to user buffer */ memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats)); } } /* * bdx_set_ethtool_ops - ethtool interface implementation * @netdev */ static void bdx_set_ethtool_ops(struct net_device *netdev) { static const struct ethtool_ops bdx_ethtool_ops = { .get_settings = bdx_get_settings, .get_drvinfo = bdx_get_drvinfo, .get_link = ethtool_op_get_link, .get_coalesce = bdx_get_coalesce, .set_coalesce = bdx_set_coalesce, .get_ringparam = bdx_get_ringparam, .set_ringparam = bdx_set_ringparam, .get_strings = bdx_get_strings, .get_sset_count = bdx_get_sset_count, .get_ethtool_stats = bdx_get_ethtool_stats, }; SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); } /** * bdx_remove - Device Removal Routine * @pdev: PCI device information struct * * bdx_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void __devexit bdx_remove(struct pci_dev *pdev) { struct pci_nic *nic = pci_get_drvdata(pdev); struct net_device *ndev; int port; for (port = 0; port < nic->port_num; port++) { ndev = nic->priv[port]->ndev; unregister_netdev(ndev); free_netdev(ndev); } /*bdx_hw_reset_direct(nic->regs); */ #ifdef BDX_MSI if (nic->irq_type == IRQ_MSI) pci_disable_msi(pdev); #endif iounmap(nic->regs); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); vfree(nic); RET(); } static struct pci_driver bdx_pci_driver = { .name = BDX_DRV_NAME, .id_table = bdx_pci_tbl, .probe = bdx_probe, .remove = __devexit_p(bdx_remove), }; /* * print_driver_id - print parameters of the driver build */ static void __init print_driver_id(void) { pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION); pr_info("Options: hw_csum %s\n", BDX_MSI_STRING); } static int __init bdx_module_init(void) { ENTER; init_txd_sizes(); print_driver_id(); RET(pci_register_driver(&bdx_pci_driver)); } module_init(bdx_module_init); static void __exit bdx_module_exit(void) { ENTER; pci_unregister_driver(&bdx_pci_driver); RET(); } module_exit(bdx_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(BDX_DRV_DESC); MODULE_FIRMWARE("tehuti/bdx.bin");
gpl-2.0
felixsch/linux
arch/avr32/mm/fault.c
1163
6353
/* * Copyright (C) 2004-2006 Atmel Corporation * * Based on linux/arch/sh/mm/fault.c: * Copyright (C) 1999 Niibe Yutaka * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/kdebug.h> #include <linux/kprobes.h> #include <asm/mmu_context.h> #include <asm/sysreg.h> #include <asm/tlb.h> #include <asm/uaccess.h> #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; if (!user_mode(regs)) { if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; } return ret; } #else static inline int notify_page_fault(struct pt_regs *regs, int trap) { return 0; } #endif int exception_trace = 1; /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: * 6: Protection fault (instruction access) * 15: Protection fault (read access) * 16: Protection fault (write access) * 20: Page not found (instruction access) * 24: Page not found (read access) * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; const struct exception_table_entry *fixup; unsigned long address; unsigned long page; long signr; int code; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (notify_page_fault(regs, ecr)) return; address = sysreg_read(TLBEAR); tsk = current; mm = tsk->mm; signr = SIGSEGV; code = SEGV_MAPERR; /* * If we're in an interrupt or have no user context, we must * not take the fault... */ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) goto no_context; local_irq_enable(); if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so we * can handle it... */ good_area: code = SEGV_ACCERR; switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: if (!(vma->vm_flags & VM_EXEC)) goto bad_area; break; case ECR_PROTECTION_R: case ECR_TLB_MISS_R: if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; break; case ECR_PROTECTION_W: case ECR_TLB_MISS_W: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; break; default: panic("Unhandled case %lu in do_page_fault!", ecr); } /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ fault = handle_mm_fault(mm, vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would have * already released it in __lock_page_or_retry() in * mm/filemap.c. */ goto retry; } } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory * map. Fix it, but check if it's kernel or user first... */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGSEGV, regs, code, address); return; } no_context: /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return; } /* * Oops. The kernel tried to access some bad page. We'll have * to terminate things with extreme prejudice. */ if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); if (address >= TASK_SIZE) page = (unsigned long)swapper_pg_dir; if (page) { page = ((unsigned long *)page)[address >> 22]; printk(" pgd = %08lx", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; printk(" pte = %08lx", page); } } printk("\n"); die("Kernel access of bad area", regs, signr); return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ signr = SIGBUS; code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; if (exception_trace) printk("%s%s[%d]: bus error at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, struct pt_regs *regs) { printk(KERN_ALERT "Bus error at physical address 0x%08lx (%s access)\n", addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); die("Bus Error", regs, SIGKILL); }
gpl-2.0
mpokwsths/mpokang_kernel
drivers/scsi/lpfc/lpfc_nportdisc.c
2955
70127
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* Called to verify a rcv'ed ADISC was intended for us. */ static int lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node. */ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) return 0; if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) return 0; /* we match, return success */ return 1; } int lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct serv_parm *sp, uint32_t class, int flogi) { volatile struct serv_parm *hsp = &vport->fc_sparam; uint16_t hsp_value, ssp_value = 0; /* * The receive data field size and buffer-to-buffer receive data field * size entries are 16 bits but are represented as two 8-bit fields in * the driver data structure to account for rsvd bits and other control * bits. Reconstruct and compare the fields as a 16-bit values before * correcting the byte values. */ if (sp->cls1.classValid) { if (!flogi) { hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) | hsp->cls1.rcvDataSizeLsb); ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) | sp->cls1.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; } } } else if (class == CLASS1) goto bad_service_param; if (sp->cls2.classValid) { if (!flogi) { hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) | hsp->cls2.rcvDataSizeLsb); ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) | sp->cls2.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; } } } else if (class == CLASS2) goto bad_service_param; if (sp->cls3.classValid) { if (!flogi) { hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) | hsp->cls3.rcvDataSizeLsb); ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) | sp->cls3.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; } } } else if (class == CLASS3) goto bad_service_param; /* * Preserve the upper four bits of the MSB from the PLOGI response. * These bits contain the Buffer-to-Buffer State Change Number * from the target and need to be passed to the FW. */ hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; if (ssp_value > hsp_value) { sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | (hsp->cmn.bbRcvSizeMsb & 0x0F); } memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); return 1; bad_service_param: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0207 Device %x " "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " "invalid service parameters. Ignoring device.\n", ndlp->nlp_DID, sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); return 0; } static void * lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; void *ptr = NULL; IOCB_t *irsp; irsp = &rspiocb->iocb; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; /* For lpfc_els_abort, context2 could be zero'ed to delay * freeing associated memory till after ABTS completes. */ if (pcmd) { prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (prsp) { lp = (uint32_t *) prsp->virt; ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); } } else { /* Force ulpStatus error since we are returning NULL ptr */ if (!(irsp->ulpStatus)) { irsp->ulpStatus = IOSTAT_LOCAL_REJECT; irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; } ptr = NULL; } return ptr; } /* * Free resources / clean up outstanding I/Os * associated with a LPFC_NODELIST entry. This * routine effectively results in a "software abort". */ int lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); LIST_HEAD(txcmplq_completions); LIST_HEAD(abort_list); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; struct lpfc_iocbq *iocb, *next_iocb; /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_fabric_abort_nport(ndlp); /* First check the txq */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* Check to see if iocb matches the nport we are looking for */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { /* It matches, so deque and call compl with anp error */ list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } } /* Next check the txcmplq */ list_splice_init(&pring->txcmplq, &txcmplq_completions); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) { /* Check to see if iocb matches the nport we are looking for */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) list_add_tail(&iocb->dlist, &abort_list); } spin_lock_irq(&phba->hbalock); list_splice(&txcmplq_completions, &pring->txcmplq); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { spin_lock_irq(&phba->hbalock); list_del_init(&iocb->dlist); lpfc_sli_issue_abort_iotag(phba, pring, iocb); spin_unlock_irq(&phba->hbalock); } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); return 0; } static int lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; IOCB_t *icmd; struct serv_parm *sp; LPFC_MBOXQ_t *mbox; struct ls_rjt stat; int rc; memset(&stat, 0, sizeof (struct ls_rjt)); if (vport->port_state <= LPFC_FDISC) { /* Before responding to PLOGI, check for pt2pt mode. * If we are pt2pt, with an outstanding FLOGI, abort * the FLOGI and resend it first. */ if (vport->fc_flag & FC_PT2PT) { lpfc_els_abort_flogi(phba); if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { /* If the other side is supposed to initiate * the PLOGI anyway, just ACC it now and * move on with discovery. */ phba->fc_edtov = FF_DEF_EDTOV; phba->fc_ratov = FF_DEF_RATOV; /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); } else lpfc_initial_flogi(vport); } else { stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } } pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); if (wwn_to_u64(sp->portName.u.wwn) == 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0140 PLOGI Reject: invalid nname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0141 PLOGI Reject: invalid pname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } icmd = &cmdiocb->iocb; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; else ndlp->nlp_fcp_info |= CLASS3; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; /* no need to reg_login if we are already in one of these states */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) break; case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); return 1; } if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { /* rcv'ed PLOGI decides what our NPortId will be */ vport->fc_myDID = icmd->un.rcvels.parmRo; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox == NULL) goto out; lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto out; } lpfc_can_disctmo(vport); } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) goto out; rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, (uint8_t *) sp, mbox, ndlp->nlp_rpi); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); goto out; } /* ACC PLOGI rsp command needs to execute first, * queue this mbox command to be processed later. */ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; /* * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox * command issued in lpfc_cmpl_els_acc(). */ mbox->vport = vport; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(shost->host_lock); /* * If there is an outstanding PLOGI issued, abort it before * sending ACC rsp for received PLOGI. If pending plogi * is not canceled here, the plogi will be rejected by * remote port and will be retried. On a configuration with * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. */ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); } if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) { /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is * to register, then unregister the RPI. */ spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, mbox); return 1; } lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); return 1; out: stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } static int lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_dmabuf *pcmd; struct serv_parm *sp; struct lpfc_name *pnn, *ppn; struct ls_rjt stat; ADISC *ap; IOCB_t *icmd; uint32_t *lp; uint32_t cmd; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; cmd = *lp++; if (cmd == ELS_CMD_ADISC) { ap = (ADISC *) lp; pnn = (struct lpfc_name *) & ap->nodeName; ppn = (struct lpfc_name *) & ap->portName; } else { sp = (struct serv_parm *) lp; pnn = (struct lpfc_name *) & sp->nodeName; ppn = (struct lpfc_name *) & sp->portName; } icmd = &cmdiocb->iocb; if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { if (cmd == ELS_CMD_ADISC) { lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); } else { lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); } return 1; } /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return 0; } static int lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_vport **vports; int i, active_vlink_present = 0 ; /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if (ndlp->nlp_DID == Fabric_DID) { if (vport->port_state <= LPFC_FDISC) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_LOGO_RCVD; spin_unlock_irq(shost->host_lock); vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if ((!(vports[i]->fc_flag & FC_VPORT_LOGO_RCVD)) && (vports[i]->port_state > LPFC_FDISC)) { active_vlink_present = 1; break; } } lpfc_destroy_vport_work_array(phba, vports); } if (active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG; spin_unlock_irq(shost->host_lock); lpfc_retry_pport_discovery(phba); } } else if ((!(ndlp->nlp_type & NLP_FABRIC) && ((ndlp->nlp_type & NLP_FCP_TARGET) || !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } out: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an * unreg_login, the driver waits so the ACC does not get aborted. */ return 0; } static void lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct lpfc_dmabuf *pcmd; uint32_t *lp; PRLI *npr; struct fc_rport *rport = ndlp->rport; u32 roles; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; if (npr->prliType == PRLI_FCP_TYPE) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } if (rport) { /* We need to update the rport role values */ roles = FC_RPORT_ROLE_UNKNOWN; if (ndlp->nlp_type & NLP_FCP_INITIATOR) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (ndlp->nlp_type & NLP_FCP_TARGET) roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport rolechg: role:x%x did:x%x flg:x%x", roles, ndlp->nlp_DID, ndlp->nlp_flag); fc_remote_port_rolechg(rport, roles); } } static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 0; } if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); return 1; } } ndlp->nlp_flag &= ~NLP_NPR_ADISC; lpfc_unreg_rpi(vport, ndlp); return 0; } /** * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. * @phba : Pointer to lpfc_hba structure. * @vport: Pointer to lpfc_vport structure. * @rpi : rpi to be release. * * This function will send a unreg_login mailbox command to the firmware * to release a rpi. **/ void lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, uint16_t rpi) { LPFC_MBOXQ_t *pmb; int rc; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2796 mailbox memory allocation failed \n"); else { lpfc_unreg_login(phba, vport->vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(pmb, phba->mbox_mem_pool); } } static uint32_t lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb; uint16_t rpi; phba = vport->phba; /* Release the RPI if reglogin completing */ if (!(phba->pport->load_flag & FC_UNLOADING) && (evt == NLP_EVT_CMPL_REG_LOGIN) && (!pmb->u.mb.mbxStatus)) { mb = &pmb->u.mb; rpi = pmb->u.mb.un.varWords[0]; lpfc_release_rpi(phba, vport, rpi); } lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0271 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* This transition is only legal if we previously * rcv'ed a PLOGI. Since we don't want 2 discovery threads * working on the same NPortID, do nothing for this thread * to stop it. */ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0272 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); } return ndlp->nlp_state; } /* Start of Discovery State Machine routines */ static uint32_t lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { return ndlp->nlp_state; } return NLP_STE_FREED_NODE; } static uint32_t lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_issue_els_logo(vport, ndlp, 0); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return NLP_STE_FREED_NODE; } static uint32_t lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = arg; struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; uint32_t *lp = (uint32_t *) pcmd->virt; struct serv_parm *sp = (struct serv_parm *) (lp + 1); struct ls_rjt stat; int port_cmp; memset(&stat, 0, sizeof (struct ls_rjt)); /* For a PLOGI, we only accept if our portname is less * than the remote portname. */ phba->fc_stat.elsLogiCol++; port_cmp = memcmp(&vport->fc_portname, &sp->portName, sizeof(struct lpfc_name)); if (port_cmp >= 0) { /* Reject this request because the remote node will accept ours */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && (ndlp->nlp_flag & NLP_NPR_2B_DISC) && (vport->num_disc_nodes)) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); lpfc_end_rscn(vport); } } } /* If our portname was less */ return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct ls_rjt stat; memset(&stat, 0, sizeof (struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); if (evt == NLP_EVT_RCV_LOGO) { lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); } else { lpfc_issue_els_logo(vport, ndlp, 0); } /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp, *mp; uint32_t *lp; IOCB_t *irsp; struct serv_parm *sp; LPFC_MBOXQ_t *mbox; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } irsp = &rspiocb->iocb; if (irsp->ulpStatus) goto out; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); /* Some switches have FDMI servers returning 0 for WWN */ if ((ndlp->nlp_DID != FDMI_DID) && (wwn_to_u64(sp->portName.u.wwn) == 0 || wwn_to_u64(sp->nodeName.u.wwn) == 0)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0142 PLOGI RSP: Invalid WWN.\n"); goto out; } if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0)) goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) ndlp->nlp_fcp_info |= CLASS2; else ndlp->nlp_fcp_info |= CLASS3; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0133 PLOGI: no memory for reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; } lpfc_unreg_rpi(vport, ndlp); if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; break; case FDMI_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; /* decrement node reference count to the failed mbox * command */ lpfc_nlp_put(ndlp); mp = (struct lpfc_dmabuf *) mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(mbox, phba->mbox_mem_pool); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0134 PLOGI: cannot issue reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { mempool_free(mbox, phba->mbox_mem_pool); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0135 PLOGI: cannot format reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } out: if (ndlp->nlp_DID == NameServer_DID) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0261 Cannot Register NameServer login\n"); } spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DEFER_RM; spin_unlock_irq(shost->host_lock); return NLP_STE_FREED_NODE; } static uint32_t lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint16_t rpi; phba = vport->phba; /* Release the RPI */ if (!(phba->pport->load_flag & FC_UNLOADING) && !mb->mbxStatus) { rpi = pmb->u.mb.un.varWords[0]; lpfc_release_rpi(phba, vport, rpi); } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } else { /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); if (vport->num_disc_nodes) lpfc_more_adisc(vport); } return ndlp->nlp_state; } ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; ADISC *ap; int rc; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); irsp = &rspiocb->iocb; if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_unreg_rpi(vport, ndlp); return ndlp->nlp_state; } if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_sli4_resume_rpi(ndlp); if (rc) { /* Stay in state and retry. */ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; return ndlp->nlp_state; } } if (ndlp->nlp_type & NLP_FCP_TARGET) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } else { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } else { /* software abort outstanding ADISC */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; LPFC_MBOXQ_t *mb; LPFC_MBOXQ_t *nextmb; struct lpfc_dmabuf *mp; cmdiocb = (struct lpfc_iocbq *) arg; /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { lpfc_nlp_put(ndlp); mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; mempool_free(mb, phba->mbox_mem_pool); } } spin_unlock_irq(&phba->hbalock); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; if (mb->mbxStatus) { /* RegLogin failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0246 RegLogin failed Data: x%x x%x x%x x%x " "x%x\n", did, mb->mbxStatus, vport->port_state, mb->un.varRegLogin.vpi, mb->un.varRegLogin.rpi); /* * If RegLogin failed due to lack of HBA resources do not * retry discovery. */ if (mb->mbxStatus == MBXERR_RPI_FULL) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; /* Only if we are not a fabric nport do we issue PRLI */ if (!(ndlp->nlp_type & NLP_FABRIC)) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } else { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Software abort outstanding PRLI before sending acc */ lpfc_els_abort(vport->phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } /* This routine is envoked when we rcv a PRLO request from a nport * we are logged into. We should send back a PRLO rsp setting the * appropriate bits. * NEXT STATE = PRLI_ISSUE */ static uint32_t lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_hba *phba = vport->phba; IOCB_t *irsp; PRLI *npr; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); irsp = &rspiocb->iocb; if (irsp->ulpStatus) { if ((vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { goto out; } ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; if (ndlp->nlp_type & NLP_FCP_TARGET) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); else lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } /*! lpfc_device_rm_prli_issue * * \pre * \post * \param phba * \param ndlp * \param arg * \param evt * \return uint32_t * * \b Description: * This routine is envoked when we a request to remove a nport we are in the * process of PRLIing. We should software abort outstanding prli, unreg * login, send a logout. We will change node state to UNUSED_NODE, put it * on plogi list so it can be freed when LOGO completes. * */ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } else { /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } /*! lpfc_device_recov_prli_issue * * \pre * \post * \param phba * \param ndlp * \param arg * \param evt * \return uint32_t * * \b Description: * The routine is envoked when the state of a device is unknown, like * during a link down. We should remove the nodelist entry from the * unmapped list, issue a UNREG_LOGIN, do a software abort of the * outstanding PRLI command, then free the node entry. */ static uint32_t lpfc_device_recov_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding PRLI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_device_recov_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* flush the target */ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Treat like rcv logo */ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t lpfc_device_recov_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { /* send PLOGI immediately, move to PLOGI issue state */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct ls_rjt stat; memset(&stat, 0, sizeof (struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { if (ndlp->nlp_flag & NLP_NPR_ADISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; ndlp->nlp_prev_state = NLP_STE_NPR_NODE; spin_unlock_irq(shost->host_lock); lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); lpfc_issue_els_adisc(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); /* * Do not start discovery if discovery is about to start * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { if (ndlp->nlp_flag & NLP_NPR_ADISC) { ndlp->nlp_flag &= ~NLP_NPR_ADISC; ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); lpfc_issue_els_adisc(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; irsp = &rspiocb->iocb; if (irsp->ulpStatus) { ndlp->nlp_flag |= NLP_DEFER_RM; return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; irsp = &rspiocb->iocb; if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_DID == Fabric_DID) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); } lpfc_unreg_rpi(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; irsp = &rspiocb->iocb; if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; if (!mb->mbxStatus) { /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; } else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } /* This next section defines the NPort Discovery State Machine */ /* There are 4 different double linked lists nodelist entries can reside on. * The plogi list and adisc list are used when Link Up discovery or RSCN * processing is needed. Each list holds the nodes that we will send PLOGI * or ADISC on. These lists will keep track of what nodes will be effected * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). * The unmapped_list will contain all nodes that we have successfully logged * into at the Fibre Channel level. The mapped_list will contain all nodes * that are mapped FCP targets. */ /* * The bind list is a list of undiscovered (potentially non-existent) nodes * that we have saved binding information on. This information is used when * nodes transition from the unmapped to the mapped list. */ /* For UNUSED_NODE state, the node has just been allocated . * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list * and put on the unmapped list. For ADISC processing, the node is taken off * the ADISC list and placed on either the mapped or unmapped list (depending * on its previous state). Once on the unmapped list, a PRLI is issued and the * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is * changed to UNMAPPED_NODE. If the completion indicates a mapped * node, the node is taken off the unmapped list. The binding list is checked * for a valid binding, or a binding is automatically assigned. If binding * assignment is unsuccessful, the node is left on the unmapped list. If * binding assignment is successful, the associated binding list entry (if * any) is removed, and the node is placed on the mapped list. */ /* * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers * expire, all effected nodes will receive a DEVICE_RM event. */ /* * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap * check, additional nodes may be added or removed (via DEVICE_RM) to / from * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, * we will first process the ADISC list. 32 entries are processed initially and * ADISC is initited for each one. Completions / Events for each node are * funnelled thru the state machine. As each node finishes ADISC processing, it * starts ADISC for any nodes waiting for ADISC processing. If no nodes are * waiting, and the ADISC list count is identically 0, then we are done. For * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI * list. 32 entries are processed initially and PLOGI is initited for each one. * Completions / Events for each node are funnelled thru the state machine. As * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is * indentically 0, then we are done. We have now completed discovery / RSCN * handling. Upon completion, ALL nodes should be on either the mapped or * unmapped lists. */ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { /* Action routine Event Current State */ lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ lpfc_rcv_els_unused_node, /* RCV_PRLI */ lpfc_rcv_logo_unused_node, /* RCV_LOGO */ lpfc_rcv_els_unused_node, /* RCV_ADISC */ lpfc_rcv_els_unused_node, /* RCV_PDISC */ lpfc_rcv_els_unused_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_unused_node, /* DEVICE_RM */ lpfc_disc_illegal, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ lpfc_device_rm_plogi_issue, /* DEVICE_RM */ lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_adisc_issue, /* DEVICE_RM */ lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_prli_issue, /* DEVICE_RM */ lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_disc_illegal, /* DEVICE_RM */ lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_disc_illegal, /* DEVICE_RM */ lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ lpfc_rcv_prli_npr_node, /* RCV_PRLI */ lpfc_rcv_logo_npr_node, /* RCV_LOGO */ lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ lpfc_device_rm_npr_node, /* DEVICE_RM */ lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ }; int lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { uint32_t cur_state, rc; uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); uint32_t got_ndlp = 0; if (lpfc_nlp_get(ndlp)) got_ndlp = 1; cur_state = ndlp->nlp_state; /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " "state %d Data: x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM in: evt:%d ste:%d did:x%x", evt, cur_state, ndlp->nlp_DID); func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; rc = (func) (vport, ndlp, arg, evt); /* DSM out state <rc> on NPort <nlp_DID> */ if (got_ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x Data: x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM out: ste:%d did:x%x flg:x%x", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0213 DSM out state %d on NPort free\n", rc); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM out: ste:%d did:x%x flg:x%x", rc, 0, 0); } return rc; }
gpl-2.0
flar2/m7-bulletproof
drivers/staging/keucr/smilmain.c
7307
45354
#include <linux/slab.h> #include "usb.h" #include "scsiglue.h" #include "smcommon.h" #include "smil.h" int Check_D_LogCHS (WORD *,BYTE *,BYTE *); void Initialize_D_Media (void); void PowerOff_D_Media (void); int Check_D_MediaPower (void); int Check_D_MediaExist (void); int Check_D_MediaWP (void); int Check_D_MediaFmt (struct us_data *); int Check_D_MediaFmtForEraseAll (struct us_data *); int Conv_D_MediaAddr (struct us_data *, DWORD); int Inc_D_MediaAddr (struct us_data *); int Check_D_FirstSect (void); int Check_D_LastSect (void); int Media_D_ReadOneSect (struct us_data *, WORD, BYTE *); int Media_D_WriteOneSect (struct us_data *, WORD, BYTE *); int Media_D_CopyBlockHead (struct us_data *); int Media_D_CopyBlockTail (struct us_data *); int Media_D_EraseOneBlock (void); int Media_D_EraseAllBlock (void); int Copy_D_BlockAll (struct us_data *, DWORD); int Copy_D_BlockHead (struct us_data *); int Copy_D_BlockTail (struct us_data *); int Reassign_D_BlockHead (struct us_data *); int Assign_D_WriteBlock (void); int Release_D_ReadBlock (struct us_data *); int Release_D_WriteBlock (struct us_data *); int Release_D_CopySector (struct us_data *); int Copy_D_PhyOneSect (struct us_data *); int Read_D_PhyOneSect (struct us_data *, WORD, BYTE *); int Write_D_PhyOneSect (struct us_data *, WORD, BYTE *); int Erase_D_PhyOneBlock (struct us_data *); int Set_D_PhyFmtValue (struct us_data *); int Search_D_CIS (struct us_data *); int Make_D_LogTable (struct us_data *); void Check_D_BlockIsFull (void); int MarkFail_D_PhyOneBlock (struct us_data *); DWORD ErrXDCode; DWORD ErrCode; //BYTE SectBuf[SECTSIZE]; static BYTE WorkBuf[SECTSIZE]; static BYTE Redundant[REDTSIZE]; static BYTE WorkRedund[REDTSIZE]; //WORD Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK]; static WORD *Log2Phy[MAX_ZONENUM]; // 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK]; static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM/8]; static WORD AssignStart[MAX_ZONENUM]; WORD ReadBlock; WORD WriteBlock; DWORD MediaChange; static DWORD SectCopyMode; //BIT Control Macro static BYTE BitData[] = { 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 } ; #define Set_D_Bit(a,b) (a[(BYTE)((b)/8)]|= BitData[(b)%8]) #define Clr_D_Bit(a,b) (a[(BYTE)((b)/8)]&=~BitData[(b)%8]) #define Chk_D_Bit(a,b) (a[(BYTE)((b)/8)] & BitData[(b)%8]) //extern PBYTE SMHostAddr; BYTE IsSSFDCCompliance; BYTE IsXDCompliance; // ////Power Control & Media Exist Check Function ////----- Init_D_SmartMedia() -------------------------------------------- //int Init_D_SmartMedia(void) //{ // int i; // // EMCR_Print("Init_D_SmartMedia start\n"); // for (i=0; i<MAX_ZONENUM; i++) // { // if (Log2Phy[i]!=NULL) // { // EMCR_Print("ExFreePool Zone = %x, Addr = %x\n", i, Log2Phy[i]); // ExFreePool(Log2Phy[i]); // Log2Phy[i] = NULL; // } // } // // Initialize_D_Media(); // return(NO_ERROR); //} //----- SM_FreeMem() ------------------------------------------------- int SM_FreeMem(void) { int i; pr_info("SM_FreeMem start\n"); for (i=0; i<MAX_ZONENUM; i++) { if (Log2Phy[i]!=NULL) { pr_info("Free Zone = %x, Addr = %p\n", i, Log2Phy[i]); kfree(Log2Phy[i]); Log2Phy[i] = NULL; } } return(NO_ERROR); } ////----- Pwoff_D_SmartMedia() ------------------------------------------- //int Pwoff_D_SmartMedia(void) //{ // PowerOff_D_Media(); // return(NO_ERROR); //} // ////----- Check_D_SmartMedia() ------------------------------------------- //int Check_D_SmartMedia(void) //{ // if (Check_D_MediaExist()) // return(ErrCode); // // return(NO_ERROR); //} // ////----- Check_D_Parameter() -------------------------------------------- //int Check_D_Parameter(PFDO_DEVICE_EXTENSION fdoExt,WORD *pcyl,BYTE *phead,BYTE *psect) //{ // if (Check_D_MediaPower()) // return(ErrCode); // // if (Check_D_MediaFmt(fdoExt)) // return(ErrCode); // // if (Check_D_LogCHS(pcyl,phead,psect)) // return(ErrCode); // // return(NO_ERROR); //} //SmartMedia Read/Write/Erase Function //----- Media_D_ReadSector() ------------------------------------------- int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf) { WORD len, bn; //if (Check_D_MediaPower()) ; ¦b 6250 don't care // return(ErrCode); //if (Check_D_MediaFmt(fdoExt)) ; // return(ErrCode); if (Conv_D_MediaAddr(us, start)) return(ErrCode); while(1) { len = Ssfdc.MaxSectors - Media.Sector; if (count > len) bn = len; else bn = count; //if (Media_D_ReadOneSect(fdoExt, SectBuf)) //if (Media_D_ReadOneSect(fdoExt, count, buf)) if (Media_D_ReadOneSect(us, bn, buf)) { ErrCode = ERR_EccReadErr; return(ErrCode); } Media.Sector += bn; count -= bn; if (count<=0) break; buf += bn * SECTSIZE; if (Inc_D_MediaAddr(us)) return(ErrCode); } return(NO_ERROR); } // here //----- Media_D_CopySector() ------------------------------------------ int Media_D_CopySector(struct us_data *us, DWORD start,WORD count,BYTE *buf) { //DWORD mode; //int i; WORD len, bn; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; /* pr_info("Media_D_CopySector !!!\n"); */ if (Conv_D_MediaAddr(us, start)) return(ErrCode); while(1) { if (Assign_D_WriteBlock()) return(ERROR); len = Ssfdc.MaxSectors - Media.Sector; if (count > len) bn = len; else bn = count; //if (Ssfdc_D_CopyBlock(fdoExt,count,buf,Redundant)) if (Ssfdc_D_CopyBlock(us,bn,buf,Redundant)) { ErrCode = ERR_WriteFault; return(ErrCode); } Media.Sector = 0x1F; //if (Release_D_ReadBlock(fdoExt)) if (Release_D_CopySector(us)) { if (ErrCode==ERR_HwError) { ErrCode = ERR_WriteFault; return(ErrCode); } } count -= bn; if (count<=0) break; buf += bn * SECTSIZE; if (Inc_D_MediaAddr(us)) return(ErrCode); } return(NO_ERROR); } //----- Release_D_CopySector() ------------------------------------------ int Release_D_CopySector(struct us_data *us) { //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock; Media.PhyBlock=ReadBlock; if (Media.PhyBlock==NO_ASSIGN) { Media.PhyBlock=WriteBlock; return(SMSUCCESS); } Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock); Media.PhyBlock=WriteBlock; return(SMSUCCESS); } /* //----- Media_D_WriteSector() ------------------------------------------ int Media_D_WriteSector(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count,BYTE *buf) { int i; WORD len, bn; SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; //if (Check_D_MediaPower()) // return(ErrCode); // //if (Check_D_MediaFmt(fdoExt)) // return(ErrCode); // //if (Check_D_MediaWP()) // return(ErrCode); if (Conv_D_MediaAddr(fdoExt, start)) return(ErrCode); //ENE_Print("Media_D_WriteSector --- Sector = %x\n", Media.Sector); if (Check_D_FirstSect()) { if (Media_D_CopyBlockHead(fdoExt)) { ErrCode = ERR_WriteFault; return(ErrCode); } } while(1) { if (!Check_D_FirstSect()) { if (Assign_D_WriteBlock()) return(ErrCode); } len = Ssfdc.MaxSectors - Media.Sector; if (count > len) bn = len; else bn = count; //for(i=0;i<SECTSIZE;i++) // SectBuf[i]=*buf++; //if (Media_D_WriteOneSect(fdoExt, SectBuf)) if (Media_D_WriteOneSect(fdoExt, bn, buf)) { ErrCode = ERR_WriteFault; return(ErrCode); } Media.Sector += bn - 1; if (!Check_D_LastSect()) { if (Release_D_ReadBlock(fdoExt)) { if (ErrCode==ERR_HwError) { ErrCode = ERR_WriteFault; return(ErrCode); } } } count -= bn; if (count<=0) break; buf += bn * SECTSIZE; //if (--count<=0) // break; if (Inc_D_MediaAddr(fdoExt)) return(ErrCode); } if (!Check_D_LastSect()) return(NO_ERROR); if (Inc_D_MediaAddr(fdoExt)) return(ErrCode); if (Media_D_CopyBlockTail(fdoExt)) { ErrCode = ERR_WriteFault; return(ErrCode); } return(NO_ERROR); } // ////----- Media_D_EraseBlock() ------------------------------------------- //int Media_D_EraseBlock(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count) //{ // if (Check_D_MediaPower()) // return(ErrCode); // // if (Check_D_MediaFmt(fdoExt)) // return(ErrCode); // // if (Check_D_MediaWP()) // return(ErrCode); // // if (Conv_D_MediaAddr(start)) // return(ErrCode); // // while(Check_D_FirstSect()) { // if (Inc_D_MediaAddr(fdoExt)) // return(ErrCode); // // if (--count<=0) // return(NO_ERROR); // } // // while(1) { // if (!Check_D_LastSect()) // if (Media_D_EraseOneBlock()) // if (ErrCode==ERR_HwError) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // // if (Inc_D_MediaAddr(fdoExt)) // return(ErrCode); // // if (--count<=0) // return(NO_ERROR); // } //} // ////----- Media_D_EraseAll() --------------------------------------------- //int Media_D_EraseAll(PFDO_DEVICE_EXTENSION fdoExt) //{ // if (Check_D_MediaPower()) // return(ErrCode); // // if (Check_D_MediaFmtForEraseAll(fdoExt)) // return(ErrCode); // // if (Check_D_MediaWP()) // return(ErrCode); // // if (Media_D_EraseAllBlock()) // return(ErrCode); // // return(NO_ERROR); //} //SmartMedia Write Function for One Sector Write Mode //----- Media_D_OneSectWriteStart() ------------------------------------ int Media_D_OneSectWriteStart(PFDO_DEVICE_EXTENSION fdoExt,DWORD start,BYTE *buf) { // int i; // SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; // ADDRESS_T bb = (ADDRESS_T) &Media; // // //if (Check_D_MediaPower()) // // return(ErrCode); // //if (Check_D_MediaFmt(fdoExt)) // // return(ErrCode); // //if (Check_D_MediaWP()) // // return(ErrCode); // if (Conv_D_MediaAddr(fdoExt, start)) // return(ErrCode); // // if (Check_D_FirstSect()) // if (Media_D_CopyBlockHead(fdoExt)) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // // if (!Check_D_FirstSect()) // if (Assign_D_WriteBlock()) // return(ErrCode); // // //for(i=0;i<SECTSIZE;i++) // // SectBuf[i]=*buf++; // // //if (Media_D_WriteOneSect(fdoExt, SectBuf)) // if (Media_D_WriteOneSect(fdoExt, buf)) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // // if (!Check_D_LastSect()) // { // if (Release_D_ReadBlock(fdoExt)) // if (ErrCode==ERR_HwError) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // } return(NO_ERROR); } //----- Media_D_OneSectWriteNext() ------------------------------------- int Media_D_OneSectWriteNext(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf) { // int i; // SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; // ADDRESS_T bb = (ADDRESS_T) &Media; // // if (Inc_D_MediaAddr(fdoExt)) // return(ErrCode); // // if (!Check_D_FirstSect()) // if (Assign_D_WriteBlock()) // return(ErrCode); // // //for(i=0;i<SECTSIZE;i++) // // SectBuf[i]=*buf++; // // //if (Media_D_WriteOneSect(fdoExt, SectBuf)) // if (Media_D_WriteOneSect(fdoExt, buf)) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // // if (!Check_D_LastSect()) // { // if (Release_D_ReadBlock(fdoExt)) // if (ErrCode==ERR_HwError) // { // ErrCode = ERR_WriteFault; // return(ErrCode); // } // } return(NO_ERROR); } //----- Media_D_OneSectWriteFlush() ------------------------------------ int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt) { if (!Check_D_LastSect()) return(NO_ERROR); if (Inc_D_MediaAddr(fdoExt)) return(ErrCode); if (Media_D_CopyBlockTail(fdoExt)) { ErrCode = ERR_WriteFault; return(ErrCode); } return(NO_ERROR); } // ////LED Tern On/Off Subroutine ////----- SM_EnableLED() ----------------------------------------------- //void SM_EnableLED(PFDO_DEVICE_EXTENSION fdoExt, BOOLEAN enable) //{ // if (fdoExt->Drive_IsSWLED) // { // if (enable) // Led_D_TernOn(); // else // Led_D_TernOff(); // } //} // ////----- Led_D_TernOn() ------------------------------------------------- //void Led_D_TernOn(void) //{ // if (Check_D_CardStsChg()) // MediaChange=ERROR; // // Cnt_D_LedOn(); //} // ////----- Led_D_TernOff() ------------------------------------------------ //void Led_D_TernOff(void) //{ // if (Check_D_CardStsChg()) // MediaChange=ERROR; // // Cnt_D_LedOff(); //} // ////SmartMedia Logical Format Subroutine ////----- Check_D_LogCHS() ----------------------------------------------- //int Check_D_LogCHS(WORD *c,BYTE *h,BYTE *s) //{ // switch(Ssfdc.Model) { // case SSFDC1MB: *c=125; *h= 4; *s= 4; break; // case SSFDC2MB: *c=125; *h= 4; *s= 8; break; // case SSFDC4MB: *c=250; *h= 4; *s= 8; break; // case SSFDC8MB: *c=250; *h= 4; *s=16; break; // case SSFDC16MB: *c=500; *h= 4; *s=16; break; // case SSFDC32MB: *c=500; *h= 8; *s=16; break; // case SSFDC64MB: *c=500; *h= 8; *s=32; break; // case SSFDC128MB: *c=500; *h=16; *s=32; break; // default: *c= 0; *h= 0; *s= 0; ErrCode = ERR_NoSmartMedia; return(ERROR); // } // // return(SMSUCCESS); //} // ////Power Control & Media Exist Check Subroutine ////----- Initialize_D_Media() ------------------------------------------- //void Initialize_D_Media(void) //{ // ErrCode = NO_ERROR; // MediaChange = ERROR; // SectCopyMode = COMPLETED; // Cnt_D_Reset(); //} // ////----- PowerOff_D_Media() --------------------------------------------- //void PowerOff_D_Media(void) //{ // Cnt_D_PowerOff(); //} // ////----- Check_D_MediaPower() ------------------------------------------- //int Check_D_MediaPower(void) //{ // //usleep(56*1024); // if (Check_D_CardStsChg()) // MediaChange = ERROR; // //usleep(56*1024); // if ((!Check_D_CntPower())&&(!MediaChange)) // ¦³ power & Media ¨S³Q change, «h return success // return(SMSUCCESS); // //usleep(56*1024); // // if (Check_D_CardExist()) // Check if card is not exist, return err // { // ErrCode = ERR_NoSmartMedia; // MediaChange = ERROR; // return(ERROR); // } // //usleep(56*1024); // if (Cnt_D_PowerOn()) // { // ErrCode = ERR_NoSmartMedia; // MediaChange = ERROR; // return(ERROR); // } // //usleep(56*1024); // Ssfdc_D_Reset(fdoExt); // //usleep(56*1024); // return(SMSUCCESS); //} // ////-----Check_D_MediaExist() -------------------------------------------- //int Check_D_MediaExist(void) //{ // if (Check_D_CardStsChg()) // MediaChange = ERROR; // // if (!Check_D_CardExist()) // { // if (!MediaChange) // return(SMSUCCESS); // // ErrCode = ERR_ChangedMedia; // return(ERROR); // } // // ErrCode = ERR_NoSmartMedia; // // return(ERROR); //} // ////----- Check_D_MediaWP() ---------------------------------------------- //int Check_D_MediaWP(void) //{ // if (Ssfdc.Attribute &MWP) // { // ErrCode = ERR_WrtProtect; // return(ERROR); // } // // return(SMSUCCESS); //} */ //SmartMedia Physical Format Test Subroutine //----- Check_D_MediaFmt() --------------------------------------------- int Check_D_MediaFmt(struct us_data *us) { pr_info("Check_D_MediaFmt\n"); //ULONG i,j, result=FALSE, zone,block; //usleep(56*1024); if (!MediaChange) return(SMSUCCESS); MediaChange = ERROR; SectCopyMode = COMPLETED; //usleep(56*1024); if (Set_D_PhyFmtValue(us)) { ErrCode = ERR_UnknownMedia; return(ERROR); } //usleep(56*1024); if (Search_D_CIS(us)) { ErrCode = ERR_IllegalFmt; return(ERROR); } MediaChange = SMSUCCESS; return(SMSUCCESS); } /* ////----- Check_D_BlockIsFull() ---------------------------------- //void Check_D_BlockIsFull() //{ // ULONG i, block; // // if (IsXDCompliance || IsSSFDCCompliance) // { // // If the blocks are full then return write-protect. // block = Ssfdc.MaxBlocks/8; // for (Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++) // { // if (Log2Phy[Media.Zone]==NULL) // { // if (Make_D_LogTable()) // { // ErrCode = ERR_IllegalFmt; // return; // } // } // // for (i=0; i<block; i++) // { // if (Assign[Media.Zone][i] != 0xFF) // return; // } // } // Ssfdc.Attribute |= WP; // } //} // // ////----- Check_D_MediaFmtForEraseAll() ---------------------------------- //int Check_D_MediaFmtForEraseAll(PFDO_DEVICE_EXTENSION fdoExt) //{ // MediaChange = ERROR; // SectCopyMode = COMPLETED; // // if (Set_D_PhyFmtValue(fdoExt)) // { // ErrCode = ERR_UnknownMedia; // return(ERROR); // } // // if (Search_D_CIS(fdoExt)) // { // ErrCode = ERR_IllegalFmt; // return(ERROR); // } // // return(SMSUCCESS); //} */ //SmartMedia Physical Address Control Subroutine //----- Conv_D_MediaAddr() --------------------------------------------- int Conv_D_MediaAddr(struct us_data *us, DWORD addr) { DWORD temp; //ULONG zz; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; temp = addr/Ssfdc.MaxSectors; Media.Zone = (BYTE) (temp/Ssfdc.MaxLogBlocks); if (Log2Phy[Media.Zone]==NULL) { if (Make_D_LogTable(us)) { ErrCode = ERR_IllegalFmt; return(ERROR); } } Media.Sector = (BYTE) (addr%Ssfdc.MaxSectors); Media.LogBlock = (WORD) (temp%Ssfdc.MaxLogBlocks); if (Media.Zone<Ssfdc.MaxZones) { Clr_D_RedundantData(Redundant); Set_D_LogBlockAddr(Redundant); Media.PhyBlock = Log2Phy[Media.Zone][Media.LogBlock]; return(SMSUCCESS); } ErrCode = ERR_OutOfLBA; return(ERROR); } //----- Inc_D_MediaAddr() ---------------------------------------------- int Inc_D_MediaAddr(struct us_data *us) { WORD LogBlock = Media.LogBlock; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; if (++Media.Sector<Ssfdc.MaxSectors) return(SMSUCCESS); if (Log2Phy[Media.Zone]==NULL) { if (Make_D_LogTable(us)) { ErrCode = ERR_IllegalFmt; return(ERROR); } } Media.Sector=0; Media.LogBlock = LogBlock; if (++Media.LogBlock<Ssfdc.MaxLogBlocks) { Clr_D_RedundantData(Redundant); Set_D_LogBlockAddr(Redundant); Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock]; return(SMSUCCESS); } Media.LogBlock=0; if (++Media.Zone<Ssfdc.MaxZones) { if (Log2Phy[Media.Zone]==NULL) { if (Make_D_LogTable(us)) { ErrCode = ERR_IllegalFmt; return(ERROR); } } Media.LogBlock = 0; Clr_D_RedundantData(Redundant); Set_D_LogBlockAddr(Redundant); Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock]; return(SMSUCCESS); } Media.Zone=0; ErrCode = ERR_OutOfLBA; return(ERROR); } /* //----- Check_D_FirstSect() -------------------------------------------- int Check_D_FirstSect(void) { SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; if (!Media.Sector) return(SMSUCCESS); return(ERROR); } //----- Check_D_LastSect() --------------------------------------------- int Check_D_LastSect(void) { SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; if (Media.Sector<(Ssfdc.MaxSectors-1)) return(ERROR); return(SMSUCCESS); } */ //SmartMedia Read/Write Subroutine with Retry //----- Media_D_ReadOneSect() ------------------------------------------ int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf) { DWORD err, retry; if (!Read_D_PhyOneSect(us, count, buf)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); if (ErrCode==ERR_DataStatus) return(ERROR); #ifdef RDERR_REASSIGN if (Ssfdc.Attribute &MWP) { if (ErrCode==ERR_CorReadErr) return(SMSUCCESS); return(ERROR); } err=ErrCode; for(retry=0; retry<2; retry++) { if (Copy_D_BlockAll(us, (err==ERR_EccReadErr)?REQ_FAIL:REQ_ERASE)) { if (ErrCode==ERR_HwError) return(ERROR); continue; } ErrCode = err; if (ErrCode==ERR_CorReadErr) return(SMSUCCESS); return(ERROR); } MediaChange = ERROR; #else if (ErrCode==ERR_CorReadErr) return(SMSUCCESS); #endif return(ERROR); } /* //----- Media_D_WriteOneSect() ----------------------------------------- int Media_D_WriteOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf) { DWORD retry; SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; if (!Write_D_PhyOneSect(fdoExt, count, buf)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); for(retry=1; retry<2; retry++) { if (Reassign_D_BlockHead(fdoExt)) { if (ErrCode==ERR_HwError) return(ERROR); continue; } if (!Write_D_PhyOneSect(fdoExt, count, buf)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); } if (Release_D_WriteBlock(fdoExt)) return(ERROR); ErrCode = ERR_WriteFault; MediaChange = ERROR; return(ERROR); } //SmartMedia Data Copy Subroutine with Retry //----- Media_D_CopyBlockHead() ---------------------------------------- int Media_D_CopyBlockHead(PFDO_DEVICE_EXTENSION fdoExt) { DWORD retry; for(retry=0; retry<2; retry++) { if (!Copy_D_BlockHead(fdoExt)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); } MediaChange = ERROR; return(ERROR); } //----- Media_D_CopyBlockTail() ---------------------------------------- int Media_D_CopyBlockTail(PFDO_DEVICE_EXTENSION fdoExt) { DWORD retry; if (!Copy_D_BlockTail(fdoExt)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); for(retry=1; retry<2; retry++) { if (Reassign_D_BlockHead(fdoExt)) { if (ErrCode==ERR_HwError) return(ERROR); continue; } if (!Copy_D_BlockTail(fdoExt)) return(SMSUCCESS); if (ErrCode==ERR_HwError) return(ERROR); } if (Release_D_WriteBlock(fdoExt)) return(ERROR); ErrCode = ERR_WriteFault; MediaChange = ERROR; return(ERROR); } // ////----- Media_D_EraseOneBlock() ---------------------------------------- //int Media_D_EraseOneBlock(void) //{ // WORD LogBlock = Media.LogBlock; // WORD PhyBlock = Media.PhyBlock; // SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; // ADDRESS_T bb = (ADDRESS_T) &Media; // // if (Media.PhyBlock==NO_ASSIGN) // return(SMSUCCESS); // // if (Log2Phy[Media.Zone]==NULL) // { // if (Make_D_LogTable()) // { // ErrCode = ERR_IllegalFmt; // return(ERROR); // } // } // Media.LogBlock = LogBlock; // Media.PhyBlock = PhyBlock; // // Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN; // // if (Erase_D_PhyOneBlock(fdoExt)) // { // if (ErrCode==ERR_HwError) // return(ERROR); // if (MarkFail_D_PhyOneBlock()) // return(ERROR); // // ErrCode = ERR_WriteFault; // return(ERROR); // } // // Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock); // Media.PhyBlock=NO_ASSIGN; // return(SMSUCCESS); //} // ////SmartMedia Erase Subroutine ////----- Media_D_EraseAllBlock() ---------------------------------------- //int Media_D_EraseAllBlock(void) //{ // WORD cis=0; // // SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; // ADDRESS_T bb = (ADDRESS_T) &Media; // // MediaChange = ERROR; // Media.Sector = 0; // // for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++) // for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++) { // if (Ssfdc_D_ReadRedtData(Redundant)) // { // Ssfdc_D_Reset(fdoExt); // return(ERROR); // } // // Ssfdc_D_Reset(fdoExt); // if (!Check_D_FailBlock(Redundant)) // { // if (cis) // { // if (Ssfdc_D_EraseBlock(fdoExt)) // { // ErrCode = ERR_HwError; // return(ERROR); // } // // if (Ssfdc_D_CheckStatus()) // { // if (MarkFail_D_PhyOneBlock()) // return(ERROR); // } // // continue; // } // // if (Media.PhyBlock!=CisArea.PhyBlock) // { // ErrCode = ERR_IllegalFmt; // return(ERROR); // } // // cis++; // } // // } // return(SMSUCCESS); //} */ //SmartMedia Physical Sector Data Copy Subroutine //----- Copy_D_BlockAll() ---------------------------------------------- int Copy_D_BlockAll(struct us_data *us, DWORD mode) { BYTE sect; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; sect=Media.Sector; if (Assign_D_WriteBlock()) return(ERROR); if (mode==REQ_FAIL) SectCopyMode=REQ_FAIL; for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++) { if (Copy_D_PhyOneSect(us)) { if (ErrCode==ERR_HwError) return(ERROR); if (Release_D_WriteBlock(us)) return(ERROR); ErrCode = ERR_WriteFault; Media.PhyBlock=ReadBlock; Media.Sector=sect; return(ERROR); } } if (Release_D_ReadBlock(us)) return(ERROR); Media.PhyBlock=WriteBlock; Media.Sector=sect; return(SMSUCCESS); } /* //----- Copy_D_BlockHead() --------------------------------------------- int Copy_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt) { BYTE sect; SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; sect=Media.Sector; if (Assign_D_WriteBlock()) return(ERROR); for(Media.Sector=0; Media.Sector<sect; Media.Sector++) { if (Copy_D_PhyOneSect(fdoExt)) { if (ErrCode==ERR_HwError) return(ERROR); if (Release_D_WriteBlock(fdoExt)) return(ERROR); ErrCode = ERR_WriteFault; Media.PhyBlock=ReadBlock; Media.Sector=sect; return(ERROR); } } Media.PhyBlock=WriteBlock; Media.Sector=sect; return(SMSUCCESS); } //----- Copy_D_BlockTail() --------------------------------------------- int Copy_D_BlockTail(PFDO_DEVICE_EXTENSION fdoExt) { BYTE sect; SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; for(sect=Media.Sector; Media.Sector<Ssfdc.MaxSectors; Media.Sector++) { if (Copy_D_PhyOneSect(fdoExt)) { if (ErrCode==ERR_HwError) return(ERROR); Media.PhyBlock=WriteBlock; Media.Sector=sect; return(ERROR); } } if (Release_D_ReadBlock(fdoExt)) return(ERROR); Media.PhyBlock=WriteBlock; Media.Sector=sect; return(SMSUCCESS); } //----- Reassign_D_BlockHead() ----------------------------------------- int Reassign_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt) { DWORD mode; WORD block; BYTE sect; SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; mode=SectCopyMode; block=ReadBlock; sect=Media.Sector; if (Assign_D_WriteBlock()) return(ERROR); SectCopyMode=REQ_FAIL; for(Media.Sector=0; Media.Sector<sect; Media.Sector++) { if (Copy_D_PhyOneSect(fdoExt)) { if (ErrCode==ERR_HwError) return(ERROR); if (Release_D_WriteBlock(fdoExt)) return(ERROR); ErrCode = ERR_WriteFault; SectCopyMode=mode; WriteBlock=ReadBlock; ReadBlock=block; Media.Sector=sect; Media.PhyBlock=WriteBlock; return(ERROR); } } if (Release_D_ReadBlock(fdoExt)) return(ERROR); SectCopyMode=mode; ReadBlock=block; Media.Sector=sect; Media.PhyBlock=WriteBlock; return(SMSUCCESS); } */ //SmartMedia Physical Block Assign/Release Subroutine //----- Assign_D_WriteBlock() ------------------------------------------ int Assign_D_WriteBlock(void) { //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; ReadBlock=Media.PhyBlock; for(WriteBlock=AssignStart[Media.Zone]; WriteBlock<Ssfdc.MaxBlocks; WriteBlock++) { if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock)) { Set_D_Bit(Assign[Media.Zone],WriteBlock); AssignStart[Media.Zone]=WriteBlock+1; Media.PhyBlock=WriteBlock; SectCopyMode=REQ_ERASE; //ErrXDCode = NO_ERROR; return(SMSUCCESS); } } for(WriteBlock=0; WriteBlock<AssignStart[Media.Zone]; WriteBlock++) { if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock)) { Set_D_Bit(Assign[Media.Zone],WriteBlock); AssignStart[Media.Zone]=WriteBlock+1; Media.PhyBlock=WriteBlock; SectCopyMode=REQ_ERASE; //ErrXDCode = NO_ERROR; return(SMSUCCESS); } } WriteBlock=NO_ASSIGN; ErrCode = ERR_WriteFault; // For xD test //Ssfdc.Attribute |= WP; //ErrXDCode = ERR_WrtProtect; return(ERROR); } //----- Release_D_ReadBlock() ------------------------------------------ int Release_D_ReadBlock(struct us_data *us) { DWORD mode; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; mode=SectCopyMode; SectCopyMode=COMPLETED; if (mode==COMPLETED) return(SMSUCCESS); Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock; Media.PhyBlock=ReadBlock; if (Media.PhyBlock==NO_ASSIGN) { Media.PhyBlock=WriteBlock; return(SMSUCCESS); } if (mode==REQ_ERASE) { if (Erase_D_PhyOneBlock(us)) { if (ErrCode==ERR_HwError) return(ERROR); if (MarkFail_D_PhyOneBlock(us)) return(ERROR); } else Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock); } else if (MarkFail_D_PhyOneBlock(us)) return(ERROR); Media.PhyBlock=WriteBlock; return(SMSUCCESS); } //----- Release_D_WriteBlock() ----------------------------------------- int Release_D_WriteBlock(struct us_data *us) { //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; SectCopyMode=COMPLETED; Media.PhyBlock=WriteBlock; if (MarkFail_D_PhyOneBlock(us)) return(ERROR); Media.PhyBlock=ReadBlock; return(SMSUCCESS); } //SmartMedia Physical Sector Data Copy Subroutine //----- Copy_D_PhyOneSect() -------------------------------------------- int Copy_D_PhyOneSect(struct us_data *us) { int i; DWORD err, retry; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; /* pr_info("Copy_D_PhyOneSect --- Secotr = %x\n", Media.Sector); */ if (ReadBlock!=NO_ASSIGN) { Media.PhyBlock=ReadBlock; for(retry=0; retry<2; retry++) { if (retry!=0) { Ssfdc_D_Reset(us); if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Check_D_CISdata(WorkBuf,WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } } if (Ssfdc_D_ReadSect(us,WorkBuf,WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Check_D_DataStatus(WorkRedund)) { err=ERROR; break; } if (!Check_D_ReadError(WorkRedund)) { err=SMSUCCESS; break; } if (!Check_D_Correct(WorkBuf,WorkRedund)) { err=SMSUCCESS; break; } err=ERROR; SectCopyMode=REQ_FAIL; } } else { err=SMSUCCESS; for(i=0; i<SECTSIZE; i++) WorkBuf[i]=DUMMY_DATA; Clr_D_RedundantData(WorkRedund); } Set_D_LogBlockAddr(WorkRedund); if (err==ERROR) { Set_D_RightECC(WorkRedund); Set_D_DataStaus(WorkRedund); } Media.PhyBlock=WriteBlock; if (Ssfdc_D_WriteSectForCopy(us, WorkBuf, WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Ssfdc_D_CheckStatus()) { ErrCode = ERR_WriteFault; return(ERROR); } Media.PhyBlock=ReadBlock; return(SMSUCCESS); } //SmartMedia Physical Sector Read/Write/Erase Subroutine //----- Read_D_PhyOneSect() -------------------------------------------- int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf) { int i; DWORD retry; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; if (Media.PhyBlock==NO_ASSIGN) { for(i=0; i<SECTSIZE; i++) *buf++=DUMMY_DATA; return(SMSUCCESS); } for(retry=0; retry<2; retry++) { if (retry!=0) { Ssfdc_D_Reset(us); if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Check_D_CISdata(WorkBuf,WorkRedund)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } } //if (Ssfdc_D_ReadSect(fdoExt,buf,Redundant)) if (Ssfdc_D_ReadBlock(us,count,buf,Redundant)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Check_D_DataStatus(Redundant)) { ErrCode = ERR_DataStatus; return(ERROR); } if (!Check_D_ReadError(Redundant)) return(SMSUCCESS); if (!Check_D_Correct(buf,Redundant)) { ErrCode = ERR_CorReadErr; return(ERROR); } } ErrCode = ERR_EccReadErr; return(ERROR); } /* //----- Write_D_PhyOneSect() ------------------------------------------- int Write_D_PhyOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf) { SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; ADDRESS_T bb = (ADDRESS_T) &Media; //if (Ssfdc_D_WriteSect(fdoExt,buf,Redundant)) if (Ssfdc_D_WriteBlock(fdoExt,count,buf,Redundant)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Ssfdc_D_CheckStatus()) { ErrCode = ERR_WriteFault; return(ERROR); } return(SMSUCCESS); } */ //----- Erase_D_PhyOneBlock() ------------------------------------------ int Erase_D_PhyOneBlock(struct us_data *us) { //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; if (Ssfdc_D_EraseBlock(us)) { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); } if (Ssfdc_D_CheckStatus()) { ErrCode = ERR_WriteFault; return(ERROR); } return(SMSUCCESS); } //SmartMedia Physical Format Check Local Subroutine //----- Set_D_PhyFmtValue() -------------------------------------------- int Set_D_PhyFmtValue(struct us_data *us) { // PPDO_DEVICE_EXTENSION pdoExt; // BYTE idcode[4]; // DWORD UserDefData_1, UserDefData_2, Data, mask; // // //if (!fdoExt->ChildDeviceObject) return(ERROR); // //pdoExt = fdoExt->ChildDeviceObject->DeviceExtension; // // Ssfdc_D_ReadID(idcode, READ_ID_1); // //if (Set_D_SsfdcModel(idcode[1])) if (Set_D_SsfdcModel(us->SM_DeviceID)) return(ERROR); // //Use Multi-function pin to differentiate SM and xD. // UserDefData_1 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) & 0x80; // if (UserDefData_1) // { // if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x80 ) fdoExt->DiskType = DISKTYPE_XD; // if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x40 ) fdoExt->DiskType = DISKTYPE_SM; // // if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) ) // { // Ssfdc_D_ReadID(idcode, READ_ID_3); // if (idcode[2] != 0xB5) // return(ERROR); // } // } // // //Use GPIO to differentiate SM and xD. // UserDefData_2 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) >> 8; // if ( UserDefData_2 ) // { // Data = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, 0, 0xAC); // // mask = 1 << (UserDefData_2-1); // // 1 : xD , 0 : SM // if ( Data & mask) // fdoExt->DiskType = DISKTYPE_XD; // else // fdoExt->DiskType = DISKTYPE_SM; // // if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) ) // { // Ssfdc_D_ReadID(idcode, READ_ID_3); // if (idcode[2] != 0xB5) // return(ERROR); // } // } // // if ( !(UserDefData_1 | UserDefData_2) ) // { // // Use UserDefine Register to differentiate SM and xD. // Ssfdc_D_ReadID(idcode, READ_ID_3); // // if (idcode[2] == 0xB5) // fdoExt->DiskType = DISKTYPE_XD; // else // { // if (!IsXDCompliance) // fdoExt->DiskType = DISKTYPE_SM; // else // return(ERROR); // } // // if (fdoExt->UserDef_DiskType == 0x04) fdoExt->DiskType = DISKTYPE_XD; // if (fdoExt->UserDef_DiskType == 0x08) fdoExt->DiskType = DISKTYPE_SM; // } // // if (!fdoExt->UserDef_DisableWP) // { // if (fdoExt->DiskType == DISKTYPE_SM) // { // if (Check_D_SsfdcWP()) // Ssfdc.Attribute|=WP; // } // } return(SMSUCCESS); } //----- Search_D_CIS() ------------------------------------------------- int Search_D_CIS(struct us_data *us) { //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; Media.Zone=0; Media.Sector=0; for (Media.PhyBlock=0; Media.PhyBlock<(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1); Media.PhyBlock++) { if (Ssfdc_D_ReadRedtData(us, Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } if (!Check_D_FailBlock(Redundant)) break; } if (Media.PhyBlock==(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1)) { Ssfdc_D_Reset(us); return(ERROR); } while (Media.Sector<CIS_SEARCH_SECT) { if (Media.Sector) { if (Ssfdc_D_ReadRedtData(us, Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } } if (!Check_D_DataStatus(Redundant)) { if (Ssfdc_D_ReadSect(us,WorkBuf,Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } if (Check_D_CISdata(WorkBuf,Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } CisArea.PhyBlock=Media.PhyBlock; CisArea.Sector=Media.Sector; Ssfdc_D_Reset(us); return(SMSUCCESS); } Media.Sector++; } Ssfdc_D_Reset(us); return(ERROR); } //----- Make_D_LogTable() ---------------------------------------------- int Make_D_LogTable(struct us_data *us) { WORD phyblock,logblock; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; if (Log2Phy[Media.Zone]==NULL) { Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK*sizeof(WORD), GFP_KERNEL); /* pr_info("ExAllocatePool Zone = %x, Addr = %x\n", Media.Zone, Log2Phy[Media.Zone]); */ if (Log2Phy[Media.Zone]==NULL) return(ERROR); } Media.Sector=0; //for(Media.Zone=0; Media.Zone<MAX_ZONENUM; Media.Zone++) //for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++) { /* pr_info("Make_D_LogTable --- MediaZone = 0x%x\n", Media.Zone); */ for(Media.LogBlock=0; Media.LogBlock<Ssfdc.MaxLogBlocks; Media.LogBlock++) Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN; for(Media.PhyBlock=0; Media.PhyBlock<(MAX_BLOCKNUM/8); Media.PhyBlock++) Assign[Media.Zone][Media.PhyBlock]=0x00; for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++) { if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock)) { Set_D_Bit(Assign[Media.Zone],Media.PhyBlock); continue; } if (Ssfdc_D_ReadRedtData(us, Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } if (!Check_D_DataBlank(Redundant)) continue; Set_D_Bit(Assign[Media.Zone],Media.PhyBlock); if (Check_D_FailBlock(Redundant)) continue; //if (Check_D_DataStatus(Redundant)) // continue; if (Load_D_LogBlockAddr(Redundant)) continue; if (Media.LogBlock>=Ssfdc.MaxLogBlocks) continue; if (Log2Phy[Media.Zone][Media.LogBlock]==NO_ASSIGN) { Log2Phy[Media.Zone][Media.LogBlock]=Media.PhyBlock; continue; } phyblock = Media.PhyBlock; logblock = Media.LogBlock; Media.Sector = (BYTE)(Ssfdc.MaxSectors-1); if (Ssfdc_D_ReadRedtData(us, Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } if (!Load_D_LogBlockAddr(Redundant)) { if (Media.LogBlock==logblock) { Media.PhyBlock=Log2Phy[Media.Zone][logblock]; if (Ssfdc_D_ReadRedtData(us, Redundant)) { Ssfdc_D_Reset(us); return(ERROR); } Media.PhyBlock=phyblock; if (!Load_D_LogBlockAddr(Redundant)) { if (Media.LogBlock!=logblock) { Media.PhyBlock=Log2Phy[Media.Zone][logblock]; Log2Phy[Media.Zone][logblock]=phyblock; } } else { Media.PhyBlock=Log2Phy[Media.Zone][logblock]; Log2Phy[Media.Zone][logblock]=phyblock; } } } Media.Sector=0; // here Not yet //#ifdef L2P_ERR_ERASE // if (!(Ssfdc.Attribute &MWP)) // { // Ssfdc_D_Reset(fdoExt); // if (Ssfdc_D_EraseBlock(fdoExt)) // return(ERROR); // // if (Ssfdc_D_CheckStatus()) // { // if (MarkFail_D_PhyOneBlock()) // return(ERROR); // } // else // Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock); // } //#else // Ssfdc.Attribute|=MWP; //#endif Media.PhyBlock=phyblock; } // End for (Media.PhyBlock<Ssfdc.MaxBlocks) AssignStart[Media.Zone]=0; } // End for (Media.Zone<MAX_ZONENUM) Ssfdc_D_Reset(us); return(SMSUCCESS); } //----- MarkFail_D_PhyOneBlock() --------------------------------------- int MarkFail_D_PhyOneBlock(struct us_data *us) { BYTE sect; //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc; //ADDRESS_T bb = (ADDRESS_T) &Media; sect=Media.Sector; Set_D_FailBlock(WorkRedund); //Ssfdc_D_WriteRedtMode(); for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++) { if (Ssfdc_D_WriteRedtData(us, WorkRedund)) { Ssfdc_D_Reset(us); Media.Sector = sect; ErrCode = ERR_HwError; MediaChange = ERROR; return(ERROR); } // NO Status Check } Ssfdc_D_Reset(us); Media.Sector=sect; return(SMSUCCESS); } /* // ////----- SM_Init() ---------------------------------------------------- //void SM_Init(void) //{ // _Hw_D_ClrIntCardChg(); // _Hw_D_SetIntMask(); // // For DMA Interrupt // _Hw_D_ClrDMAIntCardChg(); // _Hw_D_SetDMAIntMask(); //} // ////----- Media_D_EraseAllRedtData() ----------------------------------- //int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock) //{ // BYTE i; // // if (Check_D_MediaPower()) // return(ErrCode); // // if (Check_D_MediaWP()) // return(ErrCode); // // for (i=0; i<REDTSIZE; i++) // WorkRedund[i] = 0xFF; // // Media.Zone = (BYTE)Index; // for (Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++) // { // if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock)) // continue; // // if (Ssfdc_D_EraseBlock(fdoExt)) // { // ErrCode = ERR_HwError; // return(ERROR); // } // // for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++) // { // Ssfdc_D_WriteRedtMode(); // // if (Ssfdc_D_WriteRedtData(WorkRedund)) // { // Ssfdc_D_Reset(fdoExt); // ErrCode = ERR_HwError; // MediaChange = ERROR; // return(ERROR); // } // NO Status Check // } // // Ssfdc_D_Reset(fdoExt); // } // // Ssfdc_D_Reset(fdoExt); // // return(SMSUCCESS); //} // ////----- Media_D_GetMediaInfo() --------------------------------------- //DWORD Media_D_GetMediaInfo(PFDO_DEVICE_EXTENSION fdoExt, PIOCTL_MEDIA_INFO_IN pParamIn, PIOCTL_MEDIA_INFO_OUT pParamOut) //{ // pParamOut->ErrCode = STATUS_CMD_FAIL; // // Init_D_SmartMedia(); // // if (Check_D_MediaPower()) // return (ErrCode==ERR_NoSmartMedia) ? STATUS_CMD_NO_MEDIA : STATUS_CMD_FAIL; // // if (Set_D_PhyFmtValue(fdoExt)) // return STATUS_CMD_FAIL; // // //usleep(56*1024); // if (Search_D_CIS(fdoExt)) // return STATUS_CMD_FAIL; // // if (Check_D_MediaWP()) // return STATUS_CMD_MEDIA_WP; // // pParamOut->PageSize = Ssfdc.MaxSectors; // pParamOut->BlockSize = Ssfdc.MaxBlocks; // pParamOut->ZoneSize = Ssfdc.MaxZones; // // return STATUS_CMD_SUCCESS; //}*/
gpl-2.0
gearslam/JB_LS970ZVC_Viper
arch/mips/loongson/common/machtype.c
7819
1756
/* * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/errno.h> #include <asm/bootinfo.h> #include <loongson.h> #include <machine.h> /* please ensure the length of the machtype string is less than 50 */ #define MACHTYPE_LEN 50 static const char *system_types[] = { [MACH_LOONGSON_UNKNOWN] "unknown loongson machine", [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box", [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box", [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches", [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches", [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f", [MACH_LEMOTE_NAS] "lemote-nas-2f", [MACH_LEMOTE_LL2F] "lemote-lynloong-2f", [MACH_LOONGSON_END] NULL, }; const char *get_system_type(void) { return system_types[mips_machtype]; } void __weak __init mach_prom_init_machtype(void) { } void __init prom_init_machtype(void) { char *p, str[MACHTYPE_LEN + 1]; int machtype = MACH_LEMOTE_FL2E; mips_machtype = LOONGSON_MACHTYPE; p = strstr(arcs_cmdline, "machtype="); if (!p) { mach_prom_init_machtype(); return; } p += strlen("machtype="); strncpy(str, p, MACHTYPE_LEN); str[MACHTYPE_LEN] = '\0'; p = strstr(str, " "); if (p) *p = '\0'; for (; system_types[machtype]; machtype++) if (strstr(system_types[machtype], str)) { mips_machtype = machtype; break; } }
gpl-2.0
hellobbn/android_kernel_htc_msm8974
security/tomoyo/file.c
8587
29590
/* * security/tomoyo/file.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/slab.h> /* * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index". */ static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE, [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK, [TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR, [TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR, [TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE, [TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK, [TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT, [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT, }; /* * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index". */ const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK, [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR, }; /* * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index". */ const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK, [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME, [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT, }; /* * Mapping table from "enum tomoyo_path_number_acl_index" to * "enum tomoyo_mac_index". */ const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE, [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR, [TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO, [TOMOYO_TYPE_MKSOCK] = TOMOYO_MAC_FILE_MKSOCK, [TOMOYO_TYPE_IOCTL] = TOMOYO_MAC_FILE_IOCTL, [TOMOYO_TYPE_CHMOD] = TOMOYO_MAC_FILE_CHMOD, [TOMOYO_TYPE_CHOWN] = TOMOYO_MAC_FILE_CHOWN, [TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP, }; /** * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union". * * @ptr: Pointer to "struct tomoyo_name_union". * * Returns nothing. */ void tomoyo_put_name_union(struct tomoyo_name_union *ptr) { tomoyo_put_group(ptr->group); tomoyo_put_name(ptr->filename); } /** * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not. * * @name: Pointer to "struct tomoyo_path_info". * @ptr: Pointer to "struct tomoyo_name_union". * * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise. */ const struct tomoyo_path_info * tomoyo_compare_name_union(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr) { if (ptr->group) return tomoyo_path_matches_group(name, ptr->group); if (tomoyo_path_matches_pattern(name, ptr->filename)) return ptr->filename; return NULL; } /** * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union". * * @ptr: Pointer to "struct tomoyo_number_union". * * Returns nothing. */ void tomoyo_put_number_union(struct tomoyo_number_union *ptr) { tomoyo_put_group(ptr->group); } /** * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not. * * @value: Number to check. * @ptr: Pointer to "struct tomoyo_number_union". * * Returns true if @value matches @ptr, false otherwise. */ bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr) { if (ptr->group) return tomoyo_number_matches_group(value, value, ptr->group); return value >= ptr->values[0] && value <= ptr->values[1]; } /** * tomoyo_add_slash - Add trailing '/' if needed. * * @buf: Pointer to "struct tomoyo_path_info". * * Returns nothing. * * @buf must be generated by tomoyo_encode() because this function does not * allocate memory for adding '/'. */ static void tomoyo_add_slash(struct tomoyo_path_info *buf) { if (buf->is_dir) return; /* * This is OK because tomoyo_encode() reserves space for appending "/". */ strcat((char *) buf->name, "/"); tomoyo_fill_path_info(buf); } /** * tomoyo_get_realpath - Get realpath. * * @buf: Pointer to "struct tomoyo_path_info". * @path: Pointer to "struct path". * * Returns true on success, false otherwise. */ static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path) { buf->name = tomoyo_realpath_from_path(path); if (buf->name) { tomoyo_fill_path_info(buf); return true; } return false; } /** * tomoyo_audit_path_log - Audit path request log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword [r->param.path.operation], r->param.path.filename->name); } /** * tomoyo_audit_path2_log - Audit path/path request log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords [tomoyo_pp2mac[r->param.path2.operation]], r->param.path2.filename1->name, r->param.path2.filename2->name); } /** * tomoyo_audit_mkdev_log - Audit path/number/number/number request log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", tomoyo_mac_keywords [tomoyo_pnnn2mac[r->param.mkdev.operation]], r->param.mkdev.filename->name, r->param.mkdev.mode, r->param.mkdev.major, r->param.mkdev.minor); } /** * tomoyo_audit_path_number_log - Audit path/number request log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) { const u8 type = r->param.path_number.operation; u8 radix; char buffer[64]; switch (type) { case TOMOYO_TYPE_CREATE: case TOMOYO_TYPE_MKDIR: case TOMOYO_TYPE_MKFIFO: case TOMOYO_TYPE_MKSOCK: case TOMOYO_TYPE_CHMOD: radix = TOMOYO_VALUE_TYPE_OCTAL; break; case TOMOYO_TYPE_IOCTL: radix = TOMOYO_VALUE_TYPE_HEXADECIMAL; break; default: radix = TOMOYO_VALUE_TYPE_DECIMAL; break; } tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number, radix); return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords [tomoyo_pn2mac[type]], r->param.path_number.filename->name, buffer); } /** * tomoyo_check_path_acl - Check permission for path operation. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. * * To be able to use wildcard for domain transition, this function sets * matching entry on success. Since the caller holds tomoyo_read_lock(), * it is safe to set matching entry. */ static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl), head); if (acl->perm & (1 << r->param.path.operation)) { r->param.path.matched_path = tomoyo_compare_name_union(r->param.path.filename, &acl->name); return r->param.path.matched_path != NULL; } return false; } /** * tomoyo_check_path_number_acl - Check permission for path number operation. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_path_number_acl *acl = container_of(ptr, typeof(*acl), head); return (acl->perm & (1 << r->param.path_number.operation)) && tomoyo_compare_number_union(r->param.path_number.number, &acl->number) && tomoyo_compare_name_union(r->param.path_number.filename, &acl->name); } /** * tomoyo_check_path2_acl - Check permission for path path operation. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_path2_acl *acl = container_of(ptr, typeof(*acl), head); return (acl->perm & (1 << r->param.path2.operation)) && tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1) && tomoyo_compare_name_union(r->param.path2.filename2, &acl->name2); } /** * tomoyo_check_mkdev_acl - Check permission for path number number number operation. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_mkdev_acl *acl = container_of(ptr, typeof(*acl), head); return (acl->perm & (1 << r->param.mkdev.operation)) && tomoyo_compare_number_union(r->param.mkdev.mode, &acl->mode) && tomoyo_compare_number_union(r->param.mkdev.major, &acl->major) && tomoyo_compare_number_union(r->param.mkdev.minor, &acl->minor) && tomoyo_compare_name_union(r->param.mkdev.filename, &acl->name); } /** * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b except permission bits, false otherwise. */ static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_name_union(&p1->name, &p2->name); } /** * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * @is_delete: True for @a &= ~@b, false for @a |= @b. * * Returns true if @a is empty, false otherwise. */ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) { u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head) ->perm; u16 perm = *a_perm; const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; if (is_delete) perm &= ~b_perm; else perm |= b_perm; *a_perm = perm; return !perm; } /** * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list. * * @perm: Permission. * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_update_path_acl(const u16 perm, struct tomoyo_acl_param *param) { struct tomoyo_path_acl e = { .head.type = TOMOYO_TYPE_PATH_ACL, .perm = perm }; int error; if (!tomoyo_parse_name_union(param, &e.name)) error = -EINVAL; else error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_path_acl, tomoyo_merge_path_acl); tomoyo_put_name_union(&e.name); return error; } /** * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b except permission bits, false otherwise. */ static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_name_union(&p1->name, &p2->name) && tomoyo_same_number_union(&p1->mode, &p2->mode) && tomoyo_same_number_union(&p1->major, &p2->major) && tomoyo_same_number_union(&p1->minor, &p2->minor); } /** * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * @is_delete: True for @a &= ~@b, false for @a |= @b. * * Returns true if @a is empty, false otherwise. */ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) { u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl, head)->perm; u8 perm = *a_perm; const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head) ->perm; if (is_delete) perm &= ~b_perm; else perm |= b_perm; *a_perm = perm; return !perm; } /** * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list. * * @perm: Permission. * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_update_mkdev_acl(const u8 perm, struct tomoyo_acl_param *param) { struct tomoyo_mkdev_acl e = { .head.type = TOMOYO_TYPE_MKDEV_ACL, .perm = perm }; int error; if (!tomoyo_parse_name_union(param, &e.name) || !tomoyo_parse_number_union(param, &e.mode) || !tomoyo_parse_number_union(param, &e.major) || !tomoyo_parse_number_union(param, &e.minor)) error = -EINVAL; else error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_mkdev_acl, tomoyo_merge_mkdev_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.mode); tomoyo_put_number_union(&e.major); tomoyo_put_number_union(&e.minor); return error; } /** * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b except permission bits, false otherwise. */ static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_name_union(&p1->name1, &p2->name1) && tomoyo_same_name_union(&p1->name2, &p2->name2); } /** * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * @is_delete: True for @a &= ~@b, false for @a |= @b. * * Returns true if @a is empty, false otherwise. */ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) { u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head) ->perm; u8 perm = *a_perm; const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm; if (is_delete) perm &= ~b_perm; else perm |= b_perm; *a_perm = perm; return !perm; } /** * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list. * * @perm: Permission. * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_update_path2_acl(const u8 perm, struct tomoyo_acl_param *param) { struct tomoyo_path2_acl e = { .head.type = TOMOYO_TYPE_PATH2_ACL, .perm = perm }; int error; if (!tomoyo_parse_name_union(param, &e.name1) || !tomoyo_parse_name_union(param, &e.name2)) error = -EINVAL; else error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_path2_acl, tomoyo_merge_path2_acl); tomoyo_put_name_union(&e.name1); tomoyo_put_name_union(&e.name2); return error; } /** * tomoyo_path_permission - Check permission for single path operation. * * @r: Pointer to "struct tomoyo_request_info". * @operation: Type of operation. * @filename: Filename to check. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, const struct tomoyo_path_info *filename) { int error; r->type = tomoyo_p2mac[operation]; r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); if (r->mode == TOMOYO_CONFIG_DISABLED) return 0; r->param_type = TOMOYO_TYPE_PATH_ACL; r->param.path.filename = filename; r->param.path.operation = operation; do { tomoyo_check_acl(r, tomoyo_check_path_acl); error = tomoyo_audit_path_log(r); } while (error == TOMOYO_RETRY_REQUEST); return error; } /** * tomoyo_execute_permission - Check permission for execute operation. * * @r: Pointer to "struct tomoyo_request_info". * @filename: Filename to check. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ int tomoyo_execute_permission(struct tomoyo_request_info *r, const struct tomoyo_path_info *filename) { /* * Unlike other permission checks, this check is done regardless of * profile mode settings in order to check for domain transition * preference. */ r->type = TOMOYO_MAC_FILE_EXECUTE; r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); r->param_type = TOMOYO_TYPE_PATH_ACL; r->param.path.filename = filename; r->param.path.operation = TOMOYO_TYPE_EXECUTE; tomoyo_check_acl(r, tomoyo_check_path_acl); r->ee->transition = r->matched_acl && r->matched_acl->cond ? r->matched_acl->cond->transit : NULL; if (r->mode != TOMOYO_CONFIG_DISABLED) return tomoyo_audit_path_log(r); return 0; } /** * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b except permission bits, false otherwise. */ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path_number_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_name_union(&p1->name, &p2->name) && tomoyo_same_number_union(&p1->number, &p2->number); } /** * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * @is_delete: True for @a &= ~@b, false for @a |= @b. * * Returns true if @a is empty, false otherwise. */ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) { u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl, head)->perm; u8 perm = *a_perm; const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head) ->perm; if (is_delete) perm &= ~b_perm; else perm |= b_perm; *a_perm = perm; return !perm; } /** * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL. * * @perm: Permission. * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_update_path_number_acl(const u8 perm, struct tomoyo_acl_param *param) { struct tomoyo_path_number_acl e = { .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, .perm = perm }; int error; if (!tomoyo_parse_name_union(param, &e.name) || !tomoyo_parse_number_union(param, &e.number)) error = -EINVAL; else error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_path_number_acl, tomoyo_merge_path_number_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.number); return error; } /** * tomoyo_path_number_perm - Check permission for "create", "mkdir", "mkfifo", "mksock", "ioctl", "chmod", "chown", "chgrp". * * @type: Type of operation. * @path: Pointer to "struct path". * @number: Number. * * Returns 0 on success, negative value otherwise. */ int tomoyo_path_number_perm(const u8 type, struct path *path, unsigned long number) { struct tomoyo_request_info r; struct tomoyo_obj_info obj = { .path1 = *path, }; int error = -ENOMEM; struct tomoyo_path_info buf; int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type]) == TOMOYO_CONFIG_DISABLED || !path->dentry) return 0; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) goto out; r.obj = &obj; if (type == TOMOYO_TYPE_MKDIR) tomoyo_add_slash(&buf); r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL; r.param.path_number.operation = type; r.param.path_number.filename = &buf; r.param.path_number.number = number; do { tomoyo_check_acl(&r, tomoyo_check_path_number_acl); error = tomoyo_audit_path_number_log(&r); } while (error == TOMOYO_RETRY_REQUEST); kfree(buf.name); out: tomoyo_read_unlock(idx); if (r.mode != TOMOYO_CONFIG_ENFORCING) error = 0; return error; } /** * tomoyo_check_open_permission - Check permission for "read" and "write". * * @domain: Pointer to "struct tomoyo_domain_info". * @path: Pointer to "struct path". * @flag: Flags for open(). * * Returns 0 on success, negative value otherwise. */ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct path *path, const int flag) { const u8 acc_mode = ACC_MODE(flag); int error = 0; struct tomoyo_path_info buf; struct tomoyo_request_info r; struct tomoyo_obj_info obj = { .path1 = *path, }; int idx; buf.name = NULL; r.mode = TOMOYO_CONFIG_DISABLED; idx = tomoyo_read_lock(); if (acc_mode && tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN) != TOMOYO_CONFIG_DISABLED) { if (!tomoyo_get_realpath(&buf, path)) { error = -ENOMEM; goto out; } r.obj = &obj; if (acc_mode & MAY_READ) error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ, &buf); if (!error && (acc_mode & MAY_WRITE)) error = tomoyo_path_permission(&r, (flag & O_APPEND) ? TOMOYO_TYPE_APPEND : TOMOYO_TYPE_WRITE, &buf); } out: kfree(buf.name); tomoyo_read_unlock(idx); if (r.mode != TOMOYO_CONFIG_ENFORCING) error = 0; return error; } /** * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount". * * @operation: Type of operation. * @path: Pointer to "struct path". * @target: Symlink's target if @operation is TOMOYO_TYPE_SYMLINK, * NULL otherwise. * * Returns 0 on success, negative value otherwise. */ int tomoyo_path_perm(const u8 operation, struct path *path, const char *target) { struct tomoyo_request_info r; struct tomoyo_obj_info obj = { .path1 = *path, }; int error; struct tomoyo_path_info buf; bool is_enforce; struct tomoyo_path_info symlink_target; int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING); error = -ENOMEM; buf.name = NULL; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) goto out; r.obj = &obj; switch (operation) { case TOMOYO_TYPE_RMDIR: case TOMOYO_TYPE_CHROOT: tomoyo_add_slash(&buf); break; case TOMOYO_TYPE_SYMLINK: symlink_target.name = tomoyo_encode(target); if (!symlink_target.name) goto out; tomoyo_fill_path_info(&symlink_target); obj.symlink_target = &symlink_target; break; } error = tomoyo_path_permission(&r, operation, &buf); if (operation == TOMOYO_TYPE_SYMLINK) kfree(symlink_target.name); out: kfree(buf.name); tomoyo_read_unlock(idx); if (!is_enforce) error = 0; return error; } /** * tomoyo_mkdev_perm - Check permission for "mkblock" and "mkchar". * * @operation: Type of operation. (TOMOYO_TYPE_MKCHAR or TOMOYO_TYPE_MKBLOCK) * @path: Pointer to "struct path". * @mode: Create mode. * @dev: Device number. * * Returns 0 on success, negative value otherwise. */ int tomoyo_mkdev_perm(const u8 operation, struct path *path, const unsigned int mode, unsigned int dev) { struct tomoyo_request_info r; struct tomoyo_obj_info obj = { .path1 = *path, }; int error = -ENOMEM; struct tomoyo_path_info buf; int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; idx = tomoyo_read_lock(); error = -ENOMEM; if (tomoyo_get_realpath(&buf, path)) { r.obj = &obj; dev = new_decode_dev(dev); r.param_type = TOMOYO_TYPE_MKDEV_ACL; r.param.mkdev.filename = &buf; r.param.mkdev.operation = operation; r.param.mkdev.mode = mode; r.param.mkdev.major = MAJOR(dev); r.param.mkdev.minor = MINOR(dev); tomoyo_check_acl(&r, tomoyo_check_mkdev_acl); error = tomoyo_audit_mkdev_log(&r); kfree(buf.name); } tomoyo_read_unlock(idx); if (r.mode != TOMOYO_CONFIG_ENFORCING) error = 0; return error; } /** * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root". * * @operation: Type of operation. * @path1: Pointer to "struct path". * @path2: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ int tomoyo_path2_perm(const u8 operation, struct path *path1, struct path *path2) { int error = -ENOMEM; struct tomoyo_path_info buf1; struct tomoyo_path_info buf2; struct tomoyo_request_info r; struct tomoyo_obj_info obj = { .path1 = *path1, .path2 = *path2, }; int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; buf1.name = NULL; buf2.name = NULL; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf1, path1) || !tomoyo_get_realpath(&buf2, path2)) goto out; switch (operation) { struct dentry *dentry; case TOMOYO_TYPE_RENAME: case TOMOYO_TYPE_LINK: dentry = path1->dentry; if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode)) break; /* fall through */ case TOMOYO_TYPE_PIVOT_ROOT: tomoyo_add_slash(&buf1); tomoyo_add_slash(&buf2); break; } r.obj = &obj; r.param_type = TOMOYO_TYPE_PATH2_ACL; r.param.path2.operation = operation; r.param.path2.filename1 = &buf1; r.param.path2.filename2 = &buf2; do { tomoyo_check_acl(&r, tomoyo_check_path2_acl); error = tomoyo_audit_path2_log(&r); } while (error == TOMOYO_RETRY_REQUEST); out: kfree(buf1.name); kfree(buf2.name); tomoyo_read_unlock(idx); if (r.mode != TOMOYO_CONFIG_ENFORCING) error = 0; return error; } /** * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b, false otherwise. */ static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && tomoyo_same_number_union(&p1->flags, &p2->flags); } /** * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param) { struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; int error; if (!tomoyo_parse_name_union(param, &e.dev_name) || !tomoyo_parse_name_union(param, &e.dir_name) || !tomoyo_parse_name_union(param, &e.fs_type) || !tomoyo_parse_number_union(param, &e.flags)) error = -EINVAL; else error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_mount_acl, NULL); tomoyo_put_name_union(&e.dev_name); tomoyo_put_name_union(&e.dir_name); tomoyo_put_name_union(&e.fs_type); tomoyo_put_number_union(&e.flags); return error; } /** * tomoyo_write_file - Update file related list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ int tomoyo_write_file(struct tomoyo_acl_param *param) { u16 perm = 0; u8 type; const char *operation = tomoyo_read_token(param); for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) if (tomoyo_permstr(operation, tomoyo_path_keyword[type])) perm |= 1 << type; if (perm) return tomoyo_update_path_acl(perm, param); for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) if (tomoyo_permstr(operation, tomoyo_mac_keywords[tomoyo_pp2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_path2_acl(perm, param); for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) if (tomoyo_permstr(operation, tomoyo_mac_keywords[tomoyo_pn2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_path_number_acl(perm, param); for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) if (tomoyo_permstr(operation, tomoyo_mac_keywords[tomoyo_pnnn2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_mkdev_acl(perm, param); if (tomoyo_permstr(operation, tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT])) return tomoyo_update_mount_acl(param); return -EINVAL; }
gpl-2.0
jfdsmabalot/kernel_moto-g
arch/microblaze/kernel/asm-offsets.c
13451
5153
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/hardirq.h> #include <linux/thread_info.h> #include <linux/kbuild.h> #include <asm/cpuinfo.h> int main(int argc, char *argv[]) { /* struct pt_regs */ DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_MSR, offsetof(struct pt_regs, msr)); DEFINE(PT_EAR, offsetof(struct pt_regs, ear)); DEFINE(PT_ESR, offsetof(struct pt_regs, esr)); DEFINE(PT_FSR, offsetof(struct pt_regs, fsr)); DEFINE(PT_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_R0, offsetof(struct pt_regs, r0)); DEFINE(PT_R1, offsetof(struct pt_regs, r1)); DEFINE(PT_R2, offsetof(struct pt_regs, r2)); DEFINE(PT_R3, offsetof(struct pt_regs, r3)); DEFINE(PT_R4, offsetof(struct pt_regs, r4)); DEFINE(PT_R5, offsetof(struct pt_regs, r5)); DEFINE(PT_R6, offsetof(struct pt_regs, r6)); DEFINE(PT_R7, offsetof(struct pt_regs, r7)); DEFINE(PT_R8, offsetof(struct pt_regs, r8)); DEFINE(PT_R9, offsetof(struct pt_regs, r9)); DEFINE(PT_R10, offsetof(struct pt_regs, r10)); DEFINE(PT_R11, offsetof(struct pt_regs, r11)); DEFINE(PT_R12, offsetof(struct pt_regs, r12)); DEFINE(PT_R13, offsetof(struct pt_regs, r13)); DEFINE(PT_R14, offsetof(struct pt_regs, r14)); DEFINE(PT_R15, offsetof(struct pt_regs, r15)); DEFINE(PT_R16, offsetof(struct pt_regs, r16)); DEFINE(PT_R17, offsetof(struct pt_regs, r17)); DEFINE(PT_R18, offsetof(struct pt_regs, r18)); DEFINE(PT_R19, offsetof(struct pt_regs, r19)); DEFINE(PT_R20, offsetof(struct pt_regs, r20)); DEFINE(PT_R21, offsetof(struct pt_regs, r21)); DEFINE(PT_R22, offsetof(struct pt_regs, r22)); DEFINE(PT_R23, offsetof(struct pt_regs, r23)); DEFINE(PT_R24, offsetof(struct pt_regs, r24)); DEFINE(PT_R25, offsetof(struct pt_regs, r25)); DEFINE(PT_R26, offsetof(struct pt_regs, r26)); DEFINE(PT_R27, offsetof(struct pt_regs, r27)); DEFINE(PT_R28, offsetof(struct pt_regs, r28)); DEFINE(PT_R29, offsetof(struct pt_regs, r29)); DEFINE(PT_R30, offsetof(struct pt_regs, r30)); DEFINE(PT_R31, offsetof(struct pt_regs, r31)); DEFINE(PT_MODE, offsetof(struct pt_regs, pt_mode)); BLANK(); /* Magic offsets for PTRACE PEEK/POKE etc */ DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs) + 1); DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs) + 2); DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs) + 3); BLANK(); /* struct task_struct */ DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); #ifdef CONFIG_MMU DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); DEFINE(TASK_PID, offsetof(struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); BLANK(); DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); BLANK(); #endif /* struct thread_info */ DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); BLANK(); /* struct cpu_context */ DEFINE(CC_R1, offsetof(struct cpu_context, r1)); /* r1 */ DEFINE(CC_R2, offsetof(struct cpu_context, r2)); /* dedicated registers */ DEFINE(CC_R13, offsetof(struct cpu_context, r13)); DEFINE(CC_R14, offsetof(struct cpu_context, r14)); DEFINE(CC_R15, offsetof(struct cpu_context, r15)); DEFINE(CC_R16, offsetof(struct cpu_context, r16)); DEFINE(CC_R17, offsetof(struct cpu_context, r17)); DEFINE(CC_R18, offsetof(struct cpu_context, r18)); /* non-volatile registers */ DEFINE(CC_R19, offsetof(struct cpu_context, r19)); DEFINE(CC_R20, offsetof(struct cpu_context, r20)); DEFINE(CC_R21, offsetof(struct cpu_context, r21)); DEFINE(CC_R22, offsetof(struct cpu_context, r22)); DEFINE(CC_R23, offsetof(struct cpu_context, r23)); DEFINE(CC_R24, offsetof(struct cpu_context, r24)); DEFINE(CC_R25, offsetof(struct cpu_context, r25)); DEFINE(CC_R26, offsetof(struct cpu_context, r26)); DEFINE(CC_R27, offsetof(struct cpu_context, r27)); DEFINE(CC_R28, offsetof(struct cpu_context, r28)); DEFINE(CC_R29, offsetof(struct cpu_context, r29)); DEFINE(CC_R30, offsetof(struct cpu_context, r30)); /* special purpose registers */ DEFINE(CC_MSR, offsetof(struct cpu_context, msr)); DEFINE(CC_EAR, offsetof(struct cpu_context, ear)); DEFINE(CC_ESR, offsetof(struct cpu_context, esr)); DEFINE(CC_FSR, offsetof(struct cpu_context, fsr)); BLANK(); return 0; }
gpl-2.0
ps06756/linux-3.17.2
drivers/net/ethernet/ethoc.c
140
32756
/* * linux/drivers/net/ethernet/ethoc.c * * Copyright (C) 2007-2008 Avionic Design Development GmbH * Copyright (C) 2008-2009 Avionic Design GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Written by Thierry Reding <thierry.reding@avionic-design.de> */ #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/clk.h> #include <linux/crc32.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/module.h> #include <net/ethoc.h> static int buffer_size = 0x8000; /* 32 KBytes */ module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); /* register offsets */ #define MODER 0x00 #define INT_SOURCE 0x04 #define INT_MASK 0x08 #define IPGT 0x0c #define IPGR1 0x10 #define IPGR2 0x14 #define PACKETLEN 0x18 #define COLLCONF 0x1c #define TX_BD_NUM 0x20 #define CTRLMODER 0x24 #define MIIMODER 0x28 #define MIICOMMAND 0x2c #define MIIADDRESS 0x30 #define MIITX_DATA 0x34 #define MIIRX_DATA 0x38 #define MIISTATUS 0x3c #define MAC_ADDR0 0x40 #define MAC_ADDR1 0x44 #define ETH_HASH0 0x48 #define ETH_HASH1 0x4c #define ETH_TXCTRL 0x50 #define ETH_END 0x54 /* mode register */ #define MODER_RXEN (1 << 0) /* receive enable */ #define MODER_TXEN (1 << 1) /* transmit enable */ #define MODER_NOPRE (1 << 2) /* no preamble */ #define MODER_BRO (1 << 3) /* broadcast address */ #define MODER_IAM (1 << 4) /* individual address mode */ #define MODER_PRO (1 << 5) /* promiscuous mode */ #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ #define MODER_LOOP (1 << 7) /* loopback */ #define MODER_NBO (1 << 8) /* no back-off */ #define MODER_EDE (1 << 9) /* excess defer enable */ #define MODER_FULLD (1 << 10) /* full duplex */ #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ #define MODER_DCRC (1 << 12) /* delayed CRC enable */ #define MODER_CRC (1 << 13) /* CRC enable */ #define MODER_HUGE (1 << 14) /* huge packets enable */ #define MODER_PAD (1 << 15) /* padding enabled */ #define MODER_RSM (1 << 16) /* receive small packets */ /* interrupt source and mask registers */ #define INT_MASK_TXF (1 << 0) /* transmit frame */ #define INT_MASK_TXE (1 << 1) /* transmit error */ #define INT_MASK_RXF (1 << 2) /* receive frame */ #define INT_MASK_RXE (1 << 3) /* receive error */ #define INT_MASK_BUSY (1 << 4) #define INT_MASK_TXC (1 << 5) /* transmit control frame */ #define INT_MASK_RXC (1 << 6) /* receive control frame */ #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) #define INT_MASK_ALL ( \ INT_MASK_TXF | INT_MASK_TXE | \ INT_MASK_RXF | INT_MASK_RXE | \ INT_MASK_TXC | INT_MASK_RXC | \ INT_MASK_BUSY \ ) /* packet length register */ #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ PACKETLEN_MAX(max)) /* transmit buffer number register */ #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) /* control module mode register */ #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ /* MII mode register */ #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ #define MIIMODER_NOPRE (1 << 8) /* no preamble */ /* MII command register */ #define MIICOMMAND_SCAN (1 << 0) /* scan status */ #define MIICOMMAND_READ (1 << 1) /* read status */ #define MIICOMMAND_WRITE (1 << 2) /* write control data */ /* MII address register */ #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ MIIADDRESS_RGAD(reg)) /* MII transmit data register */ #define MIITX_DATA_VAL(x) ((x) & 0xffff) /* MII receive data register */ #define MIIRX_DATA_VAL(x) ((x) & 0xffff) /* MII status register */ #define MIISTATUS_LINKFAIL (1 << 0) #define MIISTATUS_BUSY (1 << 1) #define MIISTATUS_INVALID (1 << 2) /* TX buffer descriptor */ #define TX_BD_CS (1 << 0) /* carrier sense lost */ #define TX_BD_DF (1 << 1) /* defer indication */ #define TX_BD_LC (1 << 2) /* late collision */ #define TX_BD_RL (1 << 3) /* retransmission limit */ #define TX_BD_RETRY_MASK (0x00f0) #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) #define TX_BD_UR (1 << 8) /* transmitter underrun */ #define TX_BD_CRC (1 << 11) /* TX CRC enable */ #define TX_BD_PAD (1 << 12) /* pad enable for short packets */ #define TX_BD_WRAP (1 << 13) #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ #define TX_BD_READY (1 << 15) /* TX buffer ready */ #define TX_BD_LEN(x) (((x) & 0xffff) << 16) #define TX_BD_LEN_MASK (0xffff << 16) #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) /* RX buffer descriptor */ #define RX_BD_LC (1 << 0) /* late collision */ #define RX_BD_CRC (1 << 1) /* RX CRC error */ #define RX_BD_SF (1 << 2) /* short frame */ #define RX_BD_TL (1 << 3) /* too long */ #define RX_BD_DN (1 << 4) /* dribble nibble */ #define RX_BD_IS (1 << 5) /* invalid symbol */ #define RX_BD_OR (1 << 6) /* receiver overrun */ #define RX_BD_MISS (1 << 7) #define RX_BD_CF (1 << 8) /* control frame */ #define RX_BD_WRAP (1 << 13) #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ #define RX_BD_EMPTY (1 << 15) #define RX_BD_LEN(x) (((x) & 0xffff) << 16) #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) #define ETHOC_BUFSIZ 1536 #define ETHOC_ZLEN 64 #define ETHOC_BD_BASE 0x400 #define ETHOC_TIMEOUT (HZ / 2) #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) /** * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region * @dma_alloc: dma allocated buffer size * @io_region_size: I/O memory region size * @num_bd: number of buffer descriptors * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent * @num_rx: number of receive buffers * @cur_rx: current receive buffer * @vma: pointer to array of virtual memory addresses for buffers * @netdev: pointer to network device structure * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ struct ethoc { void __iomem *iobase; void __iomem *membase; int dma_alloc; resource_size_t io_region_size; unsigned int num_bd; unsigned int num_tx; unsigned int cur_tx; unsigned int dty_tx; unsigned int num_rx; unsigned int cur_rx; void **vma; struct net_device *netdev; struct napi_struct napi; u32 msg_enable; spinlock_t lock; struct phy_device *phy; struct mii_bus *mdio; struct clk *clk; s8 phy_id; }; /** * struct ethoc_bd - buffer descriptor * @stat: buffer statistics * @addr: physical memory address */ struct ethoc_bd { u32 stat; u32 addr; }; static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) { return ioread32(dev->iobase + offset); } static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) { iowrite32(data, dev->iobase + offset); } static inline void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); bd->stat = ethoc_read(dev, offset + 0); bd->addr = ethoc_read(dev, offset + 4); } static inline void ethoc_write_bd(struct ethoc *dev, int index, const struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); ethoc_write(dev, offset + 0, bd->stat); ethoc_write(dev, offset + 4, bd->addr); } static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask |= mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask &= ~mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) { ethoc_write(dev, INT_SOURCE, mask); } static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode |= MODER_RXEN | MODER_TXEN; ethoc_write(dev, MODER, mode); } static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode &= ~(MODER_RXEN | MODER_TXEN); ethoc_write(dev, MODER, mode); } static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) { struct ethoc_bd bd; int i; void *vma; dev->cur_tx = 0; dev->dty_tx = 0; dev->cur_rx = 0; ethoc_write(dev, TX_BD_NUM, dev->num_tx); /* setup transmission buffers */ bd.addr = mem_start; bd.stat = TX_BD_IRQ | TX_BD_CRC; vma = dev->membase; for (i = 0; i < dev->num_tx; i++) { if (i == dev->num_tx - 1) bd.stat |= TX_BD_WRAP; ethoc_write_bd(dev, i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[i] = vma; vma += ETHOC_BUFSIZ; } bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { if (i == dev->num_rx - 1) bd.stat |= RX_BD_WRAP; ethoc_write_bd(dev, dev->num_tx + i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[dev->num_tx + i] = vma; vma += ETHOC_BUFSIZ; } return 0; } static int ethoc_reset(struct ethoc *dev) { u32 mode; /* TODO: reset controller? */ ethoc_disable_rx_and_tx(dev); /* TODO: setup registers */ /* enable FCS generation and automatic padding */ mode = ethoc_read(dev, MODER); mode |= MODER_CRC | MODER_PAD; ethoc_write(dev, MODER, mode); /* set full-duplex mode */ mode = ethoc_read(dev, MODER); mode |= MODER_FULLD; ethoc_write(dev, MODER, mode); ethoc_write(dev, IPGT, 0x15); ethoc_ack_irq(dev, INT_MASK_ALL); ethoc_enable_irq(dev, INT_MASK_ALL); ethoc_enable_rx_and_tx(dev); return 0; } static unsigned int ethoc_update_rx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; unsigned int ret = 0; if (bd->stat & RX_BD_TL) { dev_err(&netdev->dev, "RX: frame too long\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_SF) { dev_err(&netdev->dev, "RX: frame too short\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_DN) { dev_err(&netdev->dev, "RX: dribble nibble\n"); netdev->stats.rx_frame_errors++; } if (bd->stat & RX_BD_CRC) { dev_err(&netdev->dev, "RX: wrong CRC\n"); netdev->stats.rx_crc_errors++; ret++; } if (bd->stat & RX_BD_OR) { dev_err(&netdev->dev, "RX: overrun\n"); netdev->stats.rx_over_errors++; ret++; } if (bd->stat & RX_BD_MISS) netdev->stats.rx_missed_errors++; if (bd->stat & RX_BD_LC) { dev_err(&netdev->dev, "RX: late collision\n"); netdev->stats.collisions++; ret++; } return ret; } static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + priv->cur_rx; ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) { ethoc_ack_irq(priv, INT_MASK_RX); /* If packet (interrupt) came in between checking * BD_EMTPY and clearing the interrupt source, then we * risk missing the packet as the RX interrupt won't * trigger right away when we reenable it; hence, check * BD_EMTPY here again to make sure there isn't such a * packet waiting for us... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; } if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb; size -= 4; /* strip the CRC */ skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - packet dropped\n"); dev->stats.rx_dropped++; break; } } /* clear the buffer descriptor so it can be reused */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); if (++priv->cur_rx == priv->num_rx) priv->cur_rx = 0; } return count; } static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; if (bd->stat & TX_BD_LC) { dev_err(&netdev->dev, "TX: late collision\n"); netdev->stats.tx_window_errors++; } if (bd->stat & TX_BD_RL) { dev_err(&netdev->dev, "TX: retransmit limit\n"); netdev->stats.tx_aborted_errors++; } if (bd->stat & TX_BD_UR) { dev_err(&netdev->dev, "TX: underrun\n"); netdev->stats.tx_fifo_errors++; } if (bd->stat & TX_BD_CS) { dev_err(&netdev->dev, "TX: carrier sense lost\n"); netdev->stats.tx_carrier_errors++; } if (bd->stat & TX_BD_STATS) netdev->stats.tx_errors++; netdev->stats.collisions += (bd->stat >> 4) & 0xf; netdev->stats.tx_bytes += bd->stat >> 16; netdev->stats.tx_packets++; } static int ethoc_tx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; struct ethoc_bd bd; for (count = 0; count < limit; ++count) { unsigned int entry; entry = priv->dty_tx & (priv->num_tx-1); ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { ethoc_ack_irq(priv, INT_MASK_TX); /* If interrupt came in between reading in the BD * and clearing the interrupt source, then we risk * missing the event as the TX interrupt won't trigger * right away when we reenable it; hence, check * BD_EMPTY here again to make sure there isn't such an * event pending... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) break; } ethoc_update_tx_stats(priv, &bd); priv->dty_tx++; } if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) netif_wake_queue(dev); return count; } static irqreturn_t ethoc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ethoc *priv = netdev_priv(dev); u32 pending; u32 mask; /* Figure out what triggered the interrupt... * The tricky bit here is that the interrupt source bits get * set in INT_SOURCE for an event regardless of whether that * event is masked or not. Thus, in order to figure out what * triggered the interrupt, we need to remove the sources * for all events that are currently masked. This behaviour * is not particularly well documented but reasonable... */ mask = ethoc_read(priv, INT_MASK); pending = ethoc_read(priv, INT_SOURCE); pending &= mask; if (unlikely(pending == 0)) return IRQ_NONE; ethoc_ack_irq(priv, pending); /* We always handle the dropped packet interrupt */ if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); dev->stats.rx_dropped++; } /* Handle receive/transmit event by switching to polling */ if (pending & (INT_MASK_TX | INT_MASK_RX)) { ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); napi_schedule(&priv->napi); } return IRQ_HANDLED; } static int ethoc_get_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; u32 reg; reg = ethoc_read(priv, MAC_ADDR0); mac[2] = (reg >> 24) & 0xff; mac[3] = (reg >> 16) & 0xff; mac[4] = (reg >> 8) & 0xff; mac[5] = (reg >> 0) & 0xff; reg = ethoc_read(priv, MAC_ADDR1); mac[0] = (reg >> 8) & 0xff; mac[1] = (reg >> 0) & 0xff; return 0; } static int ethoc_poll(struct napi_struct *napi, int budget) { struct ethoc *priv = container_of(napi, struct ethoc, napi); int rx_work_done = 0; int tx_work_done = 0; rx_work_done = ethoc_rx(priv->netdev, budget); tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { napi_complete(napi); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } return rx_work_done; } static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); for (i = 0; i < 5; i++) { u32 status = ethoc_read(priv, MIISTATUS); if (!(status & MIISTATUS_BUSY)) { u32 data = ethoc_read(priv, MIIRX_DATA); /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return data; } usleep_range(100, 200); } return -EBUSY; } static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIITX_DATA, val); ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); for (i = 0; i < 5; i++) { u32 stat = ethoc_read(priv, MIISTATUS); if (!(stat & MIISTATUS_BUSY)) { /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return 0; } usleep_range(100, 200); } return -EBUSY; } static void ethoc_mdio_poll(struct net_device *dev) { } static int ethoc_mdio_probe(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phy; int err; if (priv->phy_id != -1) phy = priv->mdio->phy_map[priv->phy_id]; else phy = phy_find_first(priv->mdio); if (!phy) { dev_err(&dev->dev, "no PHY found\n"); return -ENXIO; } err = phy_connect_direct(dev, phy, ethoc_mdio_poll, PHY_INTERFACE_MODE_GMII); if (err) { dev_err(&dev->dev, "could not attach to PHY\n"); return err; } priv->phy = phy; phy->advertising &= ~(ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half); phy->supported &= ~(SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half); return 0; } static int ethoc_open(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); int ret; ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; ethoc_init_ring(priv, dev->mem_start); ethoc_reset(priv); if (netif_queue_stopped(dev)) { dev_dbg(&dev->dev, " resuming queue\n"); netif_wake_queue(dev); } else { dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); } phy_start(priv->phy); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", dev->base_addr, dev->mem_start, dev->mem_end); } return 0; } static int ethoc_stop(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); napi_disable(&priv->napi); if (priv->phy) phy_stop(priv->phy); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); return 0; } static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ethoc *priv = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); struct phy_device *phy = NULL; if (!netif_running(dev)) return -EINVAL; if (cmd != SIOCGMIIPHY) { if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; phy = priv->mdio->phy_map[mdio->phy_id]; if (!phy) return -ENODEV; } else { phy = priv->phy; } return phy_mii_ioctl(phy, ifr, cmd); } static void ethoc_do_set_mac_address(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); unsigned char *mac = dev->dev_addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); } static int ethoc_set_mac_address(struct net_device *dev, void *p) { const struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); ethoc_do_set_mac_address(dev); return 0; } static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); struct netdev_hw_addr *ha; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ if (dev->flags & IFF_LOOPBACK) mode |= MODER_LOOP; else mode &= ~MODER_LOOP; /* receive broadcast frames if requested */ if (dev->flags & IFF_BROADCAST) mode &= ~MODER_BRO; else mode |= MODER_BRO; /* enable promiscuous mode if requested */ if (dev->flags & IFF_PROMISC) mode |= MODER_PRO; else mode &= ~MODER_PRO; ethoc_write(priv, MODER, mode); /* receive multicast frames */ if (dev->flags & IFF_ALLMULTI) { hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc(ETH_ALEN, ha->addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } } ethoc_write(priv, ETH_HASH0, hash[0]); ethoc_write(priv, ETH_HASH1, hash[1]); } static int ethoc_change_mtu(struct net_device *dev, int new_mtu) { return -ENOSYS; } static void ethoc_tx_timeout(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 pending = ethoc_read(priv, INT_SOURCE); if (likely(pending)) ethoc_interrupt(dev->irq, dev); } static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct ethoc_bd bd; unsigned int entry; void *dest; if (unlikely(skb->len > ETHOC_BUFSIZ)) { dev->stats.tx_errors++; goto out; } entry = priv->cur_tx % priv->num_tx; spin_lock_irq(&priv->lock); priv->cur_tx++; ethoc_read_bd(priv, entry, &bd); if (unlikely(skb->len < ETHOC_ZLEN)) bd.stat |= TX_BD_PAD; else bd.stat &= ~TX_BD_PAD; dest = priv->vma[entry]; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); bd.stat |= TX_BD_LEN(skb->len); ethoc_write_bd(priv, entry, &bd); bd.stat |= TX_BD_READY; ethoc_write_bd(priv, entry, &bd); if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { dev_dbg(&dev->dev, "stopping queue\n"); netif_stop_queue(dev); } spin_unlock_irq(&priv->lock); skb_tx_timestamp(skb); out: dev_kfree_skb(skb); return NETDEV_TX_OK; } static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phydev = priv->phy; if (!phydev) return -EOPNOTSUPP; return phy_ethtool_gset(phydev, cmd); } static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phydev = priv->phy; if (!phydev) return -EOPNOTSUPP; return phy_ethtool_sset(phydev, cmd); } static int ethoc_get_regs_len(struct net_device *netdev) { return ETH_END; } static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct ethoc *priv = netdev_priv(dev); u32 *regs_buff = p; unsigned i; regs->version = 0; for (i = 0; i < ETH_END / sizeof(u32); ++i) regs_buff[i] = ethoc_read(priv, i * sizeof(u32)); } static void ethoc_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct ethoc *priv = netdev_priv(dev); ring->rx_max_pending = priv->num_bd - 1; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->tx_max_pending = priv->num_bd - 1; ring->rx_pending = priv->num_rx; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; ring->tx_pending = priv->num_tx; } static int ethoc_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct ethoc *priv = netdev_priv(dev); if (ring->tx_pending < 1 || ring->rx_pending < 1 || ring->tx_pending + ring->rx_pending > priv->num_bd) return -EINVAL; if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; if (netif_running(dev)) { netif_tx_disable(dev); ethoc_disable_rx_and_tx(priv); ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); synchronize_irq(dev->irq); } priv->num_tx = rounddown_pow_of_two(ring->tx_pending); priv->num_rx = ring->rx_pending; ethoc_init_ring(priv, dev->mem_start); if (netif_running(dev)) { ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); ethoc_enable_rx_and_tx(priv); netif_wake_queue(dev); } return 0; } const struct ethtool_ops ethoc_ethtool_ops = { .get_settings = ethoc_get_settings, .set_settings = ethoc_set_settings, .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ethoc_get_ringparam, .set_ringparam = ethoc_set_ringparam, .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, .ndo_do_ioctl = ethoc_ioctl, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_rx_mode = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, .ndo_tx_timeout = ethoc_tx_timeout, .ndo_start_xmit = ethoc_start_xmit, }; /** * ethoc_probe - initialize OpenCores ethernet MAC * pdev: platform device */ static int ethoc_probe(struct platform_device *pdev) { struct net_device *netdev = NULL; struct resource *res = NULL; struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; unsigned int phy; int num_bd; int ret = 0; bool random_mac = false; struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0; /* allocate networking device */ netdev = alloc_etherdev(sizeof(struct ethoc)); if (!netdev) { ret = -ENOMEM; goto out; } SET_NETDEV_DEV(netdev, &pdev->dev); platform_set_drvdata(pdev, netdev); /* obtain I/O memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); ret = -ENXIO; goto free; } mmio = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mmio) { dev_err(&pdev->dev, "cannot request I/O memory space\n"); ret = -ENXIO; goto free; } netdev->base_addr = mmio->start; /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mem) { dev_err(&pdev->dev, "cannot request memory space\n"); ret = -ENXIO; goto free; } netdev->mem_start = mem->start; netdev->mem_end = mem->end; } /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain IRQ\n"); ret = -ENXIO; goto free; } netdev->irq = res->start; /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; priv->dma_alloc = 0; priv->io_region_size = resource_size(mmio); priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; goto error; } if (netdev->mem_end) { priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, resource_size(mem)); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; goto error; } } else { /* Allocate buffer memory */ priv->membase = dmam_alloc_coherent(&pdev->dev, buffer_size, (void *)&netdev->mem_start, GFP_KERNEL); if (!priv->membase) { dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; goto error; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; } /* calculate the number of TX/RX buffers, maximum 128 supported */ num_bd = min_t(unsigned int, 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; goto error; } priv->num_bd = num_bd; /* num_tx must be a power of two */ priv->num_tx = rounddown_pow_of_two(num_bd >> 1); priv->num_rx = num_bd - priv->num_tx; dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", priv->num_tx, priv->num_rx); priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; goto error; } /* Allow the platform setup code to pass in a MAC address. */ if (pdata) { memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } else { priv->phy_id = -1; #ifdef CONFIG_OF { const uint8_t *mac; mac = of_get_property(pdev->dev.of_node, "local-mac-address", NULL); if (mac) memcpy(netdev->dev_addr, mac, IFHWADDRLEN); } #endif } /* Check that the given MAC address is valid. If it isn't, read the * current MAC from the controller. */ if (!is_valid_ether_addr(netdev->dev_addr)) ethoc_get_mac_address(netdev, netdev->dev_addr); /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ if (!is_valid_ether_addr(netdev->dev_addr)) { eth_random_addr(netdev->dev_addr); random_mac = true; } ethoc_do_set_mac_address(netdev); if (random_mac) netdev->addr_assign_type = NET_ADDR_RANDOM; /* Allow the platform setup code to adjust MII management bus clock. */ if (!eth_clkfreq) { struct clk *clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(clk)) { priv->clk = clk; clk_prepare_enable(clk); eth_clkfreq = clk_get_rate(clk); } } if (eth_clkfreq) { u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1); if (!clkdiv) clkdiv = 2; dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv); ethoc_write(priv, MIIMODER, (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) | clkdiv); } /* register MII bus */ priv->mdio = mdiobus_alloc(); if (!priv->mdio) { ret = -ENOMEM; goto free; } priv->mdio->name = "ethoc-mdio"; snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", priv->mdio->name, pdev->id); priv->mdio->read = ethoc_mdio_read; priv->mdio->write = ethoc_mdio_write; priv->mdio->priv = priv; priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!priv->mdio->irq) { ret = -ENOMEM; goto free_mdio; } for (phy = 0; phy < PHY_MAX_ADDR; phy++) priv->mdio->irq[phy] = PHY_POLL; ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); goto free_mdio; } ret = ethoc_mdio_probe(netdev); if (ret) { dev_err(&netdev->dev, "failed to probe MDIO bus\n"); goto error; } ether_setup(netdev); /* setup the net_device structure */ netdev->netdev_ops = &ethoc_netdev_ops; netdev->watchdog_timeo = ETHOC_TIMEOUT; netdev->features |= 0; netdev->ethtool_ops = &ethoc_ethtool_ops; /* setup NAPI */ netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); spin_lock_init(&priv->lock); ret = register_netdev(netdev); if (ret < 0) { dev_err(&netdev->dev, "failed to register interface\n"); goto error2; } goto out; error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: if (priv->clk) clk_disable_unprepare(priv->clk); free_netdev(netdev); out: return ret; } /** * ethoc_remove - shutdown OpenCores ethernet MAC * @pdev: platform device */ static int ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); if (netdev) { netif_napi_del(&priv->napi); phy_disconnect(priv->phy); priv->phy = NULL; if (priv->mdio) { mdiobus_unregister(priv->mdio); kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } if (priv->clk) clk_disable_unprepare(priv->clk); unregister_netdev(netdev); free_netdev(netdev); } return 0; } #ifdef CONFIG_PM static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } static int ethoc_resume(struct platform_device *pdev) { return -ENOSYS; } #else # define ethoc_suspend NULL # define ethoc_resume NULL #endif static struct of_device_id ethoc_match[] = { { .compatible = "opencores,ethoc", }, {}, }; MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, .remove = ethoc_remove, .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { .name = "ethoc", .owner = THIS_MODULE, .of_match_table = ethoc_match, }, }; module_platform_driver(ethoc_driver); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
raulherbster/goldfish
drivers/staging/comedi/drivers/usbdux.c
140
79089
#define DRIVER_VERSION "v2.1" #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" #define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com" /* comedi/drivers/usbdux.c Copyright (C) 2003-2007 Bernd Porr, Bernd.Porr@f2s.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: usbdux Description: University of Stirling USB DAQ & INCITE Technology Limited Devices: [ITL] USB-DUX (usbdux.o) Author: Bernd Porr <BerndPorr@f2s.com> Updated: 25 Nov 2007 Status: Testing Configuration options: You have to upload firmware with the -i option. The firmware is usually installed under /usr/share/usb or /usr/local/share/usb or /lib/firmware. Connection scheme for the counter at the digital port: 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1. The sampling rate of the counter is approximately 500Hz. Please note that under USB2.0 the length of the channel list determines the max sampling rate. If you sample only one channel you get 8kHz sampling rate. If you sample two channels you get 4kHz and so on. */ /* * I must give credit here to Chris Baugher who * wrote the driver for AT-MIO-16d. I used some parts of this * driver. I also must give credits to David Brownell * who supported me with the USB development. * * Bernd Porr * * * Revision history: * 0.94: D/A output should work now with any channel list combinations * 0.95: .owner commented out for kernel vers below 2.4.19 * sanity checks in ai/ao_cmd * 0.96: trying to get it working with 2.6, moved all memory alloc to comedi's * attach final USB IDs * moved memory allocation completely to the corresponding comedi * functions firmware upload is by fxload and no longer by comedi (due to * enumeration) * 0.97: USB IDs received, adjusted table * 0.98: SMP, locking, memroy alloc: moved all usb memory alloc * to the usb subsystem and moved all comedi related memory * alloc to comedi. * | kernel | registration | usbdux-usb | usbdux-comedi | comedi | * 0.99: USB 2.0: changed protocol to isochronous transfer * IRQ transfer is too buggy and too risky in 2.0 * for the high speed ISO transfer is now a working version * available * 0.99b: Increased the iso transfer buffer for high sp.to 10 buffers. Some VIA * chipsets miss out IRQs. Deeper buffering is needed. * 1.00: full USB 2.0 support for the A/D converter. Now: max 8kHz sampling * rate. * Firmware vers 1.00 is needed for this. * Two 16 bit up/down/reset counter with a sampling rate of 1kHz * And loads of cleaning up, in particular streamlining the * bulk transfers. * 1.1: moved EP4 transfers to EP1 to make space for a PWM output on EP4 * 1.2: added PWM suport via EP4 * 2.0: PWM seems to be stable and is not interfering with the other functions * 2.1: changed PWM API * */ /* generates loads of debug info */ /* #define NOISY_DUX_DEBUGBUG */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/smp_lock.h> #include <linux/fcntl.h> #include <linux/compiler.h> #include "../comedidev.h" #define BOARDNAME "usbdux" /* timeout for the USB-transfer */ #define EZTIMEOUT 30 /* constants for "firmware" upload and download */ #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 /* internal adresses of the 8051 processor */ #define USBDUXSUB_CPUCS 0xE600 /* * the minor device number, major is 180 only for debugging purposes and to * upload special firmware (programming the eeprom etc) which is not compatible * with the comedi framwork */ #define USBDUXSUB_MINOR 32 /* max lenghth of the transfer-buffer for software upload */ #define TB_LEN 0x2000 /* Input endpoint number: ISO/IRQ */ #define ISOINEP 6 /* Output endpoint number: ISO/IRQ */ #define ISOOUTEP 2 /* This EP sends DUX commands to USBDUX */ #define COMMAND_OUT_EP 1 /* This EP receives the DUX commands from USBDUX */ #define COMMAND_IN_EP 8 /* Output endpoint for PWM */ #define PWM_EP 4 /* 300Hz max frequ under PWM */ #define MIN_PWM_PERIOD ((long)(1E9/300)) /* Default PWM frequency */ #define PWM_DEFAULT_PERIOD ((long)(1E9/100)) /* Number of channels */ #define NUMCHANNELS 8 /* Size of one A/D value */ #define SIZEADIN ((sizeof(int16_t))) /* * Size of the input-buffer IN BYTES * Always multiple of 8 for 8 microframes which is needed in the highspeed mode */ #define SIZEINBUF ((8*SIZEADIN)) /* 16 bytes. */ #define SIZEINSNBUF 16 /* Number of DA channels */ #define NUMOUTCHANNELS 8 /* size of one value for the D/A converter: channel and value */ #define SIZEDAOUT ((sizeof(int8_t)+sizeof(int16_t))) /* * Size of the output-buffer in bytes * Actually only the first 4 triplets are used but for the * high speed mode we need to pad it to 8 (microframes). */ #define SIZEOUTBUF ((8*SIZEDAOUT)) /* * Size of the buffer for the dux commands: just now max size is determined * by the analogue out + command byte + panic bytes... */ #define SIZEOFDUXBUFFER ((8*SIZEDAOUT+2)) /* Number of in-URBs which receive the data: min=2 */ #define NUMOFINBUFFERSFULL 5 /* Number of out-URBs which send the data: min=2 */ #define NUMOFOUTBUFFERSFULL 5 /* Number of in-URBs which receive the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFINBUFFERSHIGH 10 /* Number of out-URBs which send the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFOUTBUFFERSHIGH 10 /* Total number of usbdux devices */ #define NUMUSBDUX 16 /* Analogue in subdevice */ #define SUBDEV_AD 0 /* Analogue out subdevice */ #define SUBDEV_DA 1 /* Digital I/O */ #define SUBDEV_DIO 2 /* counter */ #define SUBDEV_COUNTER 3 /* timer aka pwm output */ #define SUBDEV_PWM 4 /* number of retries to get the right dux command */ #define RETRIES 10 /**************************************************/ /* comedi constants */ static const comedi_lrange range_usbdux_ai_range = { 4, { BIP_RANGE(4.096), BIP_RANGE(4.096 / 2), UNI_RANGE(4.096), UNI_RANGE(4.096 / 2) } }; static const comedi_lrange range_usbdux_ao_range = { 2, { BIP_RANGE(4.096), UNI_RANGE(4.096), } }; /* * private structure of one subdevice */ /* * This is the structure which holds all the data of * this driver one sub device just now: A/D */ struct usbduxsub { /* attached? */ int attached; /* is it associated with a subdevice? */ int probed; /* pointer to the usb-device */ struct usb_device *usbdev; /* actual number of in-buffers */ int numOfInBuffers; /* actual number of out-buffers */ int numOfOutBuffers; /* ISO-transfer handling: buffers */ struct urb **urbIn; struct urb **urbOut; /* pwm-transfer handling */ struct urb *urbPwm; /* PWM period */ lsampl_t pwmPeriod; /* PWM internal delay for the GPIF in the FX2 */ int8_t pwmDelay; /* size of the PWM buffer which holds the bit pattern */ int sizePwmBuf; /* input buffer for the ISO-transfer */ int16_t *inBuffer; /* input buffer for single insn */ int16_t *insnBuffer; /* output buffer for single DA outputs */ int16_t *outBuffer; /* interface number */ int ifnum; /* interface structure in 2.6 */ struct usb_interface *interface; /* comedi device for the interrupt context */ comedi_device *comedidev; /* is it USB_SPEED_HIGH or not? */ short int high_speed; /* asynchronous command is running */ short int ai_cmd_running; short int ao_cmd_running; /* pwm is running */ short int pwm_cmd_running; /* continous aquisition */ short int ai_continous; short int ao_continous; /* number of samples to aquire */ int ai_sample_count; int ao_sample_count; /* time between samples in units of the timer */ unsigned int ai_timer; unsigned int ao_timer; /* counter between aquisitions */ unsigned int ai_counter; unsigned int ao_counter; /* interval in frames/uframes */ unsigned int ai_interval; /* D/A commands */ int8_t *dac_commands; /* commands */ int8_t *dux_commands; struct semaphore sem; }; /* * The pointer to the private usb-data of the driver is also the private data * for the comedi-device. This has to be global as the usb subsystem needs * global variables. The other reason is that this structure must be there * _before_ any comedi command is issued. The usb subsystem must be initialised * before comedi can access it. */ static struct usbduxsub usbduxsub[NUMUSBDUX]; static DECLARE_MUTEX(start_stop_sem); /* * Stops the data acquision * It should be safe to call this function from any context */ static int usbduxsub_unlink_InURBs(struct usbduxsub *usbduxsub_tmp) { int i = 0; int err = 0; if (usbduxsub_tmp && usbduxsub_tmp->urbIn) { for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) { if (usbduxsub_tmp->urbIn[i]) { /* We wait here until all transfers have been * cancelled. */ usb_kill_urb(usbduxsub_tmp->urbIn[i]); } dev_dbg(&usbduxsub_tmp->interface->dev, "comedi: usbdux: unlinked InURB %d, err=%d\n", i, err); } } return err; } /* * This will stop a running acquisition operation * Is called from within this driver from both the * interrupt context and from comedi */ static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink) { int ret = 0; if (!this_usbduxsub) { dev_err(&this_usbduxsub->interface->dev, "comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n"); return -EFAULT; } dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n"); if (do_unlink) { /* stop aquistion */ ret = usbduxsub_unlink_InURBs(this_usbduxsub); } this_usbduxsub->ai_cmd_running = 0; return ret; } /* * This will cancel a running acquisition operation. * This is called by comedi but never from inside the driver. */ static int usbdux_ai_cancel(comedi_device *dev, comedi_subdevice *s) { struct usbduxsub *this_usbduxsub; int res = 0; /* force unlink of all urbs */ this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n"); /* prevent other CPUs from submitting new commands just now */ down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } /* unlink only if the urb really has been submitted */ res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running); up(&this_usbduxsub->sem); return res; } /* analogue IN - interrupt service routine */ static void usbduxsub_ai_IsocIrq(struct urb *urb) { int i, err, n; struct usbduxsub *this_usbduxsub; comedi_device *this_comedidev; comedi_subdevice *s; /* the context variable points to the subdevice */ this_comedidev = urb->context; /* the private structure of the subdevice is struct usbduxsub */ this_usbduxsub = this_comedidev->private; /* subdevice which is the AD converter */ s = this_comedidev->subdevices + SUBDEV_AD; /* first we test if something unusual has just happened */ switch (urb->status) { case 0: /* copy the result in the transfer buffer */ memcpy(this_usbduxsub->inBuffer, urb->transfer_buffer, SIZEINBUF); break; case -EILSEQ: /* error in the ISOchronous data */ /* we don't copy the data into the transfer buffer */ /* and recycle the last data byte */ dev_dbg(&urb->dev->dev, "comedi%d: usbdux: CRC error in ISO IN stream.\n", this_usbduxsub->comedidev->minor); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* happens after an unlink command */ if (this_usbduxsub->ai_cmd_running) { /* we are still running a command */ /* tell this comedi */ s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(this_usbduxsub->comedidev, s); /* stop the transfer w/o unlink */ usbdux_ai_stop(this_usbduxsub, 0); } return; default: /* a real error on the bus */ /* pass error to comedi if we are really running a command */ if (this_usbduxsub->ai_cmd_running) { dev_err(&urb->dev->dev, "Non-zero urb status received in ai intr " "context: %d\n", urb->status); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(this_usbduxsub->comedidev, s); /* don't do an unlink here */ usbdux_ai_stop(this_usbduxsub, 0); } return; } /* * at this point we are reasonably sure that nothing dodgy has happened * are we running a command? */ if (unlikely((!(this_usbduxsub->ai_cmd_running)))) { /* * not running a command, do not continue execution if no * asynchronous command is running in particular not resubmit */ return; } urb->dev = this_usbduxsub->usbdev; /* resubmit the urb */ err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err < 0)) { dev_err(&urb->dev->dev, "comedi_: urb resubmit failed in int-context! err=%d\n", err); if (err == -EL2NSYNC) dev_err(&urb->dev->dev, "buggy USB host controller or bug in IRQ " "handler!\n"); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(this_usbduxsub->comedidev, s); /* don't do an unlink here */ usbdux_ai_stop(this_usbduxsub, 0); return; } this_usbduxsub->ai_counter--; if (likely(this_usbduxsub->ai_counter > 0)) return; /* timer zero, transfer measurements to comedi */ this_usbduxsub->ai_counter = this_usbduxsub->ai_timer; /* test, if we transmit only a fixed number of samples */ if (!(this_usbduxsub->ai_continous)) { /* not continous, fixed number of samples */ this_usbduxsub->ai_sample_count--; /* all samples received? */ if (this_usbduxsub->ai_sample_count < 0) { /* prevent a resubmit next time */ usbdux_ai_stop(this_usbduxsub, 0); /* say comedi that the acquistion is over */ s->async->events |= COMEDI_CB_EOA; comedi_event(this_usbduxsub->comedidev, s); return; } } /* get the data from the USB bus and hand it over to comedi */ n = s->async->cmd.chanlist_len; for (i = 0; i < n; i++) { /* transfer data */ if (CR_RANGE(s->async->cmd.chanlist[i]) <= 1) { comedi_buf_put (s->async, le16_to_cpu(this_usbduxsub-> inBuffer[i]) ^ 0x800); } else { comedi_buf_put (s->async, le16_to_cpu(this_usbduxsub->inBuffer[i])); } } /* tell comedi that data is there */ comedi_event(this_usbduxsub->comedidev, s); } static int usbduxsub_unlink_OutURBs(struct usbduxsub *usbduxsub_tmp) { int i = 0; int err = 0; if (usbduxsub_tmp && usbduxsub_tmp->urbOut) { for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) { if (usbduxsub_tmp->urbOut[i]) usb_kill_urb(usbduxsub_tmp->urbOut[i]); dev_dbg(&usbduxsub_tmp->interface->dev, "comedi: usbdux: unlinked OutURB %d: res=%d\n", i, err); } } return err; } /* This will cancel a running acquisition operation * in any context. */ static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink) { int ret = 0; if (!this_usbduxsub) return -EFAULT; dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n"); if (do_unlink) ret = usbduxsub_unlink_OutURBs(this_usbduxsub); this_usbduxsub->ao_cmd_running = 0; return ret; } /* force unlink, is called by comedi */ static int usbdux_ao_cancel(comedi_device *dev, comedi_subdevice *s) { struct usbduxsub *this_usbduxsub = dev->private; int res = 0; if (!this_usbduxsub) return -EFAULT; /* prevent other CPUs from submitting a command just now */ down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } /* unlink only if it is really running */ res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running); up(&this_usbduxsub->sem); return res; } static void usbduxsub_ao_IsocIrq(struct urb *urb) { int i, ret; int8_t *datap; struct usbduxsub *this_usbduxsub; comedi_device *this_comedidev; comedi_subdevice *s; /* the context variable points to the subdevice */ this_comedidev = urb->context; /* the private structure of the subdevice is struct usbduxsub */ this_usbduxsub = this_comedidev->private; s = this_comedidev->subdevices + SUBDEV_DA; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* after an unlink command, unplug, ... etc */ /* no unlink needed here. Already shutting down. */ if (this_usbduxsub->ao_cmd_running) { s->async->events |= COMEDI_CB_EOA; comedi_event(this_usbduxsub->comedidev, s); usbdux_ao_stop(this_usbduxsub, 0); } return; default: /* a real error */ if (this_usbduxsub->ao_cmd_running) { dev_err(&urb->dev->dev, "comedi_: Non-zero urb status received in ao " "intr context: %d\n", urb->status); s->async->events |= COMEDI_CB_ERROR; s->async->events |= COMEDI_CB_EOA; comedi_event(this_usbduxsub->comedidev, s); /* we do an unlink if we are in the high speed mode */ usbdux_ao_stop(this_usbduxsub, 0); } return; } /* are we actually running? */ if (!(this_usbduxsub->ao_cmd_running)) return; /* normal operation: executing a command in this subdevice */ this_usbduxsub->ao_counter--; if (this_usbduxsub->ao_counter <= 0) { /* timer zero */ this_usbduxsub->ao_counter = this_usbduxsub->ao_timer; /* handle non continous aquisition */ if (!(this_usbduxsub->ao_continous)) { /* fixed number of samples */ this_usbduxsub->ao_sample_count--; if (this_usbduxsub->ao_sample_count < 0) { /* all samples transmitted */ usbdux_ao_stop(this_usbduxsub, 0); s->async->events |= COMEDI_CB_EOA; comedi_event(this_usbduxsub->comedidev, s); /* no resubmit of the urb */ return; } } /* transmit data to the USB bus */ ((uint8_t *) (urb->transfer_buffer))[0] = s->async->cmd.chanlist_len; for (i = 0; i < s->async->cmd.chanlist_len; i++) { sampl_t temp; if (i >= NUMOUTCHANNELS) break; /* pointer to the DA */ datap = (&(((int8_t *)urb->transfer_buffer)[i * 3 + 1])); /* get the data from comedi */ ret = comedi_buf_get(s->async, &temp); datap[0] = temp; datap[1] = temp >> 8; datap[2] = this_usbduxsub->dac_commands[i]; /* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */ /* datap[0],datap[1],datap[2]); */ if (ret < 0) { dev_err(&urb->dev->dev, "comedi: buffer underflow\n"); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_OVERFLOW; } /* transmit data to comedi */ s->async->events |= COMEDI_CB_BLOCK; comedi_event(this_usbduxsub->comedidev, s); } } urb->transfer_buffer_length = SIZEOUTBUF; urb->dev = this_usbduxsub->usbdev; urb->status = 0; if (this_usbduxsub->ao_cmd_running) { if (this_usbduxsub->high_speed) { /* uframes */ urb->interval = 8; } else { /* frames */ urb->interval = 1; } urb->number_of_packets = 1; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEOUTBUF; urb->iso_frame_desc[0].status = 0; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(&urb->dev->dev, "comedi_: ao urb resubm failed in int-cont. " "ret=%d", ret); if (ret == EL2NSYNC) dev_err(&urb->dev->dev, "buggy USB host controller or bug in " "IRQ handling!\n"); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(this_usbduxsub->comedidev, s); /* don't do an unlink here */ usbdux_ao_stop(this_usbduxsub, 0); } } } static int usbduxsub_start(struct usbduxsub *usbduxsub) { int errcode = 0; uint8_t local_transfer_buffer[16]; if (usbduxsub->probed) { /* 7f92 to zero */ local_transfer_buffer[0] = 0; errcode = usb_control_msg(usbduxsub->usbdev, /* create a pipe for a control transfer */ usb_sndctrlpipe(usbduxsub->usbdev, 0), /* bRequest, "Firmware" */ USBDUXSUB_FIRMWARE, /* bmRequestType */ VENDOR_DIR_OUT, /* Value */ USBDUXSUB_CPUCS, /* Index */ 0x0000, /* address of the transfer buffer */ local_transfer_buffer, /* Length */ 1, /* Timeout */ EZTIMEOUT); if (errcode < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (start)\n"); return errcode; } } return 0; } static int usbduxsub_stop(struct usbduxsub *usbduxsub) { int errcode = 0; uint8_t local_transfer_buffer[16]; if (usbduxsub->probed) { /* 7f92 to one */ local_transfer_buffer[0] = 1; errcode = usb_control_msg(usbduxsub->usbdev, usb_sndctrlpipe(usbduxsub->usbdev, 0), /* bRequest, "Firmware" */ USBDUXSUB_FIRMWARE, /* bmRequestType */ VENDOR_DIR_OUT, /* Value */ USBDUXSUB_CPUCS, /* Index */ 0x0000, local_transfer_buffer, /* Length */ 1, /* Timeout */ EZTIMEOUT); if (errcode < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (stop)\n"); return errcode; } } return 0; } static int usbduxsub_upload(struct usbduxsub *usbduxsub, uint8_t *local_transfer_buffer, unsigned int startAddr, unsigned int len) { int errcode; if (usbduxsub->probed) { dev_dbg(&usbduxsub->interface->dev, "comedi%d: usbdux: uploading %d bytes" " to addr %d, first byte=%d.\n", usbduxsub->comedidev->minor, len, startAddr, local_transfer_buffer[0]); errcode = usb_control_msg(usbduxsub->usbdev, usb_sndctrlpipe(usbduxsub->usbdev, 0), /* brequest, firmware */ USBDUXSUB_FIRMWARE, /* bmRequestType */ VENDOR_DIR_OUT, /* value */ startAddr, /* index */ 0x0000, /* our local safe buffer */ local_transfer_buffer, /* length */ len, /* timeout */ EZTIMEOUT); dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode); if (errcode < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n"); return errcode; } } else { /* no device on the bus for this index */ return -EFAULT; } return 0; } static int firmwareUpload(struct usbduxsub *usbduxsub, uint8_t *firmwareBinary, int sizeFirmware) { int ret; if (!firmwareBinary) return 0; ret = usbduxsub_stop(usbduxsub); if (ret < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: can not stop firmware\n"); return ret; } ret = usbduxsub_upload(usbduxsub, firmwareBinary, 0, sizeFirmware); if (ret < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: firmware upload failed\n"); return ret; } ret = usbduxsub_start(usbduxsub); if (ret < 0) { dev_err(&usbduxsub->interface->dev, "comedi_: can not start firmware\n"); return ret; } return 0; } static int usbduxsub_submit_InURBs(struct usbduxsub *usbduxsub) { int i, errFlag; if (!usbduxsub) return -EFAULT; /* Submit all URBs and start the transfer on the bus */ for (i = 0; i < usbduxsub->numOfInBuffers; i++) { /* in case of a resubmission after an unlink... */ usbduxsub->urbIn[i]->interval = usbduxsub->ai_interval; usbduxsub->urbIn[i]->context = usbduxsub->comedidev; usbduxsub->urbIn[i]->dev = usbduxsub->usbdev; usbduxsub->urbIn[i]->status = 0; usbduxsub->urbIn[i]->transfer_flags = URB_ISO_ASAP; dev_dbg(&usbduxsub->interface->dev, "comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n", usbduxsub->comedidev->minor, i, (usbduxsub->urbIn[i]->context), (usbduxsub->urbIn[i]->dev), (usbduxsub->urbIn[i]->interval)); errFlag = usb_submit_urb(usbduxsub->urbIn[i], GFP_ATOMIC); if (errFlag) { dev_err(&usbduxsub->interface->dev, "comedi_: ai: usb_submit_urb(%d) error %d\n", i, errFlag); return errFlag; } } return 0; } static int usbduxsub_submit_OutURBs(struct usbduxsub *usbduxsub) { int i, errFlag; if (!usbduxsub) return -EFAULT; for (i = 0; i < usbduxsub->numOfOutBuffers; i++) { dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting out-urb[%d]\n", i); /* in case of a resubmission after an unlink... */ usbduxsub->urbOut[i]->context = usbduxsub->comedidev; usbduxsub->urbOut[i]->dev = usbduxsub->usbdev; usbduxsub->urbOut[i]->status = 0; usbduxsub->urbOut[i]->transfer_flags = URB_ISO_ASAP; errFlag = usb_submit_urb(usbduxsub->urbOut[i], GFP_ATOMIC); if (errFlag) { dev_err(&usbduxsub->interface->dev, "comedi_: ao: usb_submit_urb(%d) error %d\n", i, errFlag); return errFlag; } } return 0; } static int usbdux_ai_cmdtest(comedi_device *dev, comedi_subdevice *s, comedi_cmd *cmd) { int err = 0, tmp, i; unsigned int tmpTimer; struct usbduxsub *this_usbduxsub = dev->private; if (!(this_usbduxsub->probed)) return -ENODEV; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ai_cmdtest\n", dev->minor); /* make sure triggers are valid */ /* Only immediate triggers are allowed */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; /* trigger should happen timed */ tmp = cmd->scan_begin_src; /* start a new _scan_ with a timer */ cmd->scan_begin_src &= TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; /* scanning is continous */ tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; /* issue a trigger when scan is finished and start a new scan */ tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; /* trigger at the end of count events or not, stop condition or not */ tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: make sure trigger sources are unique and mutually compatible * note that mutual compatiblity is not an issue here */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->scan_begin_src == TRIG_TIMER) { if (this_usbduxsub->high_speed) { /* * In high speed mode microframes are possible. * However, during one microframe we can roughly * sample one channel. Thus, the more channels * are in the channel list the more time we need. */ i = 1; /* find a power of 2 for the number of channels */ while (i < (cmd->chanlist_len)) i = i * 2; if (cmd->scan_begin_arg < (1000000 / 8 * i)) { cmd->scan_begin_arg = 1000000 / 8 * i; err++; } /* now calc the real sampling rate with all the * rounding errors */ tmpTimer = ((unsigned int)(cmd->scan_begin_arg / 125000)) * 125000; if (cmd->scan_begin_arg != tmpTimer) { cmd->scan_begin_arg = tmpTimer; err++; } } else { /* full speed */ /* 1kHz scans every USB frame */ if (cmd->scan_begin_arg < 1000000) { cmd->scan_begin_arg = 1000000; err++; } /* * calc the real sampling rate with the rounding errors */ tmpTimer = ((unsigned int)(cmd->scan_begin_arg / 1000000)) * 1000000; if (cmd->scan_begin_arg != tmpTimer) { cmd->scan_begin_arg = tmpTimer; err++; } } } /* the same argument */ if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; return 0; } /* * creates the ADC command for the MAX1271 * range is the range value from comedi */ static int8_t create_adc_command(unsigned int chan, int range) { int8_t p = (range <= 1); int8_t r = ((range % 2) == 0); return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3); } /* bulk transfers to usbdux */ #define SENDADCOMMANDS 0 #define SENDDACOMMANDS 1 #define SENDDIOCONFIGCOMMAND 2 #define SENDDIOBITSCOMMAND 3 #define SENDSINGLEAD 4 #define READCOUNTERCOMMAND 5 #define WRITECOUNTERCOMMAND 6 #define SENDPWMON 7 #define SENDPWMOFF 8 static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type) { int result, nsent; this_usbduxsub->dux_commands[0] = cmd_type; #ifdef NOISY_DUX_DEBUGBUG printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ", this_usbduxsub->comedidev->minor); for (result = 0; result < SIZEOFDUXBUFFER; result++) printk(" %02x", this_usbduxsub->dux_commands[result]); printk("\n"); #endif result = usb_bulk_msg(this_usbduxsub->usbdev, usb_sndbulkpipe(this_usbduxsub->usbdev, COMMAND_OUT_EP), this_usbduxsub->dux_commands, SIZEOFDUXBUFFER, &nsent, 10); if (result < 0) dev_err(&this_usbduxsub->interface->dev, "comedi%d: " "could not transmit dux_command to the usb-device, " "err=%d\n", this_usbduxsub->comedidev->minor, result); return result; } static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command) { int result = (-EFAULT); int nrec; int i; for (i = 0; i < RETRIES; i++) { result = usb_bulk_msg(this_usbduxsub->usbdev, usb_rcvbulkpipe(this_usbduxsub->usbdev, COMMAND_IN_EP), this_usbduxsub->insnBuffer, SIZEINSNBUF, &nrec, 1); if (result < 0) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: " "insn: USB error %d while receiving DUX command" "\n", this_usbduxsub->comedidev->minor, result); return result; } if (le16_to_cpu(this_usbduxsub->insnBuffer[0]) == command) return result; } /* this is only reached if the data has been requested a couple of * times */ dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: " "wrong data returned from firmware: want cmd %d, got cmd %d.\n", this_usbduxsub->comedidev->minor, command, le16_to_cpu(this_usbduxsub->insnBuffer[0])); return -EFAULT; } static int usbdux_ai_inttrig(comedi_device *dev, comedi_subdevice *s, unsigned int trignum) { int ret; struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ai_inttrig\n", dev->minor); if (trignum != 0) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ai_inttrig: invalid trignum\n", dev->minor); up(&this_usbduxsub->sem); return -EINVAL; } if (!(this_usbduxsub->ai_cmd_running)) { this_usbduxsub->ai_cmd_running = 1; ret = usbduxsub_submit_InURBs(this_usbduxsub); if (ret < 0) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ai_inttrig: " "urbSubmit: err=%d\n", dev->minor, ret); this_usbduxsub->ai_cmd_running = 0; up(&this_usbduxsub->sem); return ret; } s->async->inttrig = NULL; } else { dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_inttrig but acqu is already running\n", dev->minor); } up(&this_usbduxsub->sem); return 1; } static int usbdux_ai_cmd(comedi_device *dev, comedi_subdevice *s) { comedi_cmd *cmd = &s->async->cmd; unsigned int chan, range; int i, ret; struct usbduxsub *this_usbduxsub = dev->private; int result; if (!this_usbduxsub) return -EFAULT; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ai_cmd\n", dev->minor); /* block other CPUs from starting an ai_cmd */ down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } if (this_usbduxsub->ai_cmd_running) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: " "ai_cmd not possible. Another ai_cmd is running.\n", dev->minor); up(&this_usbduxsub->sem); return -EBUSY; } /* set current channel of the running aquisition to zero */ s->async->cur_chan = 0; this_usbduxsub->dux_commands[1] = cmd->chanlist_len; for (i = 0; i < cmd->chanlist_len; ++i) { chan = CR_CHAN(cmd->chanlist[i]); range = CR_RANGE(cmd->chanlist[i]); if (i >= NUMCHANNELS) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: channel list too long\n", dev->minor); break; } this_usbduxsub->dux_commands[i + 2] = create_adc_command(chan, range); } dev_dbg(&this_usbduxsub->interface->dev, "comedi %d: sending commands to the usb device: size=%u\n", dev->minor, NUMCHANNELS); result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS); if (result < 0) { up(&this_usbduxsub->sem); return result; } if (this_usbduxsub->high_speed) { /* * every channel gets a time window of 125us. Thus, if we * sample all 8 channels we need 1ms. If we sample only one * channel we need only 125us */ this_usbduxsub->ai_interval = 1; /* find a power of 2 for the interval */ while ((this_usbduxsub->ai_interval) < (cmd->chanlist_len)) { this_usbduxsub->ai_interval = (this_usbduxsub->ai_interval) * 2; } this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 * (this_usbduxsub->ai_interval)); } else { /* interval always 1ms */ this_usbduxsub->ai_interval = 1; this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000; } if (this_usbduxsub->ai_timer < 1) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: " "timer=%d, scan_begin_arg=%d. " "Not properly tested by cmdtest?\n", dev->minor, this_usbduxsub->ai_timer, cmd->scan_begin_arg); up(&this_usbduxsub->sem); return -EINVAL; } this_usbduxsub->ai_counter = this_usbduxsub->ai_timer; if (cmd->stop_src == TRIG_COUNT) { /* data arrives as one packet */ this_usbduxsub->ai_sample_count = cmd->stop_arg; this_usbduxsub->ai_continous = 0; } else { /* continous aquisition */ this_usbduxsub->ai_continous = 1; this_usbduxsub->ai_sample_count = 0; } if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ this_usbduxsub->ai_cmd_running = 1; ret = usbduxsub_submit_InURBs(this_usbduxsub); if (ret < 0) { this_usbduxsub->ai_cmd_running = 0; /* fixme: unlink here?? */ up(&this_usbduxsub->sem); return ret; } s->async->inttrig = NULL; } else { /* TRIG_INT */ /* don't enable the acquision operation */ /* wait for an internal signal */ s->async->inttrig = usbdux_ai_inttrig; } up(&this_usbduxsub->sem); return 0; } /* Mode 0 is used to get a single conversion on demand */ static int usbdux_ai_insn_read(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { int i; lsampl_t one = 0; int chan, range; int err; struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return 0; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n", dev->minor, insn->n, insn->subdev); down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } if (this_usbduxsub->ai_cmd_running) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_insn_read not possible. " "Async Command is running.\n", dev->minor); up(&this_usbduxsub->sem); return 0; } /* sample one channel */ chan = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); /* set command for the first channel */ this_usbduxsub->dux_commands[1] = create_adc_command(chan, range); /* adc commands */ err = send_dux_commands(this_usbduxsub, SENDSINGLEAD); if (err < 0) { up(&this_usbduxsub->sem); return err; } for (i = 0; i < insn->n; i++) { err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD); if (err < 0) { up(&this_usbduxsub->sem); return 0; } one = le16_to_cpu(this_usbduxsub->insnBuffer[1]); if (CR_RANGE(insn->chanspec) <= 1) one = one ^ 0x800; data[i] = one; } up(&this_usbduxsub->sem); return i; } /************************************/ /* analog out */ static int usbdux_ao_insn_read(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { int i; int chan = CR_CHAN(insn->chanspec); struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } for (i = 0; i < insn->n; i++) data[i] = this_usbduxsub->outBuffer[chan]; up(&this_usbduxsub->sem); return i; } static int usbdux_ao_insn_write(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { int i, err; int chan = CR_CHAN(insn->chanspec); struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: ao_insn_write\n", dev->minor); down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } if (this_usbduxsub->ao_cmd_running) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: ao_insn_write: " "ERROR: asynchronous ao_cmd is running\n", dev->minor); up(&this_usbduxsub->sem); return 0; } for (i = 0; i < insn->n; i++) { dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n", dev->minor, chan, i, data[i]); /* number of channels: 1 */ this_usbduxsub->dux_commands[1] = 1; /* one 16 bit value */ *((int16_t *) (this_usbduxsub->dux_commands + 2)) = cpu_to_le16(data[i]); this_usbduxsub->outBuffer[chan] = data[i]; /* channel number */ this_usbduxsub->dux_commands[4] = (chan << 6); err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS); if (err < 0) { up(&this_usbduxsub->sem); return err; } } up(&this_usbduxsub->sem); return i; } static int usbdux_ao_inttrig(comedi_device *dev, comedi_subdevice *s, unsigned int trignum) { int ret; struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } if (trignum != 0) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ao_inttrig: invalid trignum\n", dev->minor); return -EINVAL; } if (!(this_usbduxsub->ao_cmd_running)) { this_usbduxsub->ao_cmd_running = 1; ret = usbduxsub_submit_OutURBs(this_usbduxsub); if (ret < 0) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ao_inttrig: submitURB: " "err=%d\n", dev->minor, ret); this_usbduxsub->ao_cmd_running = 0; up(&this_usbduxsub->sem); return ret; } s->async->inttrig = NULL; } else { dev_err(&this_usbduxsub->interface->dev, "comedi%d: ao_inttrig but acqu is already running.\n", dev->minor); } up(&this_usbduxsub->sem); return 1; } static int usbdux_ao_cmdtest(comedi_device *dev, comedi_subdevice *s, comedi_cmd *cmd) { int err = 0, tmp; struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; if (!(this_usbduxsub->probed)) return -ENODEV; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: usbdux_ao_cmdtest\n", dev->minor); /* make sure triggers are valid */ /* Only immediate triggers are allowed */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; /* trigger should happen timed */ tmp = cmd->scan_begin_src; /* just now we scan also in the high speed mode every frame */ /* this is due to ehci driver limitations */ if (0) { /* (this_usbduxsub->high_speed) */ /* start immidiately a new scan */ /* the sampling rate is set by the coversion rate */ cmd->scan_begin_src &= TRIG_FOLLOW; } else { /* start a new scan (output at once) with a timer */ cmd->scan_begin_src &= TRIG_TIMER; } if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; /* scanning is continous */ tmp = cmd->convert_src; /* we always output at 1kHz just now all channels at once */ if (0) { /* (this_usbduxsub->high_speed) */ /* * in usb-2.0 only one conversion it tranmitted but with 8kHz/n */ cmd->convert_src &= TRIG_TIMER; } else { /* all conversion events happen simultaneously with a rate of * 1kHz/n */ cmd->convert_src &= TRIG_NOW; } if (!cmd->convert_src || tmp != cmd->convert_src) err++; /* issue a trigger when scan is finished and start a new scan */ tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; /* trigger at the end of count events or not, stop condition or not */ tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: make sure trigger sources are unique and mutually compatible * note that mutual compatiblity is not an issue here */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->scan_begin_src == TRIG_TIMER) { /* timer */ if (cmd->scan_begin_arg < 1000000) { cmd->scan_begin_arg = 1000000; err++; } } /* not used now, is for later use */ if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < 125000) { cmd->convert_arg = 125000; err++; } } /* the same argument */ if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: err=%d, " "scan_begin_src=%d, scan_begin_arg=%d, convert_src=%d, " "convert_arg=%d\n", dev->minor, err, cmd->scan_begin_src, cmd->scan_begin_arg, cmd->convert_src, cmd->convert_arg); if (err) return 3; return 0; } static int usbdux_ao_cmd(comedi_device *dev, comedi_subdevice *s) { comedi_cmd *cmd = &s->async->cmd; unsigned int chan, gain; int i, ret; struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n", dev->minor, __func__); /* set current channel of the running aquisition to zero */ s->async->cur_chan = 0; for (i = 0; i < cmd->chanlist_len; ++i) { chan = CR_CHAN(cmd->chanlist[i]); gain = CR_RANGE(cmd->chanlist[i]); if (i >= NUMOUTCHANNELS) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: %s: channel list too long\n", dev->minor, __func__); break; } this_usbduxsub->dac_commands[i] = (chan << 6); dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: dac command for ch %d is %x\n", dev->minor, i, this_usbduxsub->dac_commands[i]); } /* we count in steps of 1ms (125us) */ /* 125us mode not used yet */ if (0) { /* (this_usbduxsub->high_speed) */ /* 125us */ /* timing of the conversion itself: every 125 us */ this_usbduxsub->ao_timer = cmd->convert_arg / 125000; } else { /* 1ms */ /* timing of the scan: we get all channels at once */ this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: scan_begin_src=%d, scan_begin_arg=%d, " "convert_src=%d, convert_arg=%d\n", dev->minor, cmd->scan_begin_src, cmd->scan_begin_arg, cmd->convert_src, cmd->convert_arg); dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: ao_timer=%d (ms)\n", dev->minor, this_usbduxsub->ao_timer); if (this_usbduxsub->ao_timer < 1) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: usbdux: ao_timer=%d, " "scan_begin_arg=%d. " "Not properly tested by cmdtest?\n", dev->minor, this_usbduxsub->ao_timer, cmd->scan_begin_arg); up(&this_usbduxsub->sem); return -EINVAL; } } this_usbduxsub->ao_counter = this_usbduxsub->ao_timer; if (cmd->stop_src == TRIG_COUNT) { /* not continous */ /* counter */ /* high speed also scans everything at once */ if (0) { /* (this_usbduxsub->high_speed) */ this_usbduxsub->ao_sample_count = (cmd->stop_arg) * (cmd->scan_end_arg); } else { /* there's no scan as the scan has been */ /* perf inside the FX2 */ /* data arrives as one packet */ this_usbduxsub->ao_sample_count = cmd->stop_arg; } this_usbduxsub->ao_continous = 0; } else { /* continous aquisition */ this_usbduxsub->ao_continous = 1; this_usbduxsub->ao_sample_count = 0; } if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ this_usbduxsub->ao_cmd_running = 1; ret = usbduxsub_submit_OutURBs(this_usbduxsub); if (ret < 0) { this_usbduxsub->ao_cmd_running = 0; /* fixme: unlink here?? */ up(&this_usbduxsub->sem); return ret; } s->async->inttrig = NULL; } else { /* TRIG_INT */ /* submit the urbs later */ /* wait for an internal signal */ s->async->inttrig = usbdux_ao_inttrig; } up(&this_usbduxsub->sem); return 0; } static int usbdux_dio_insn_config(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { int chan = CR_CHAN(insn->chanspec); /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= 1 << chan; /* 1 means Out */ break; case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~(1 << chan); break; case INSN_CONFIG_DIO_QUERY: data[1] = (s-> io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; break; default: return -EINVAL; break; } /* we don't tell the firmware here as it would take 8 frames */ /* to submit the information. We do it in the insn_bits. */ return insn->n; } static int usbdux_dio_insn_bits(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { struct usbduxsub *this_usbduxsub = dev->private; int err; if (!this_usbduxsub) return -EFAULT; if (insn->n != 2) return -EINVAL; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ s->state &= ~data[0]; s->state |= data[0] & data[1]; this_usbduxsub->dux_commands[1] = s->io_bits; this_usbduxsub->dux_commands[2] = s->state; /* This command also tells the firmware to return */ /* the digital input lines */ err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND); if (err < 0) { up(&this_usbduxsub->sem); return err; } err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND); if (err < 0) { up(&this_usbduxsub->sem); return err; } data[1] = le16_to_cpu(this_usbduxsub->insnBuffer[1]); up(&this_usbduxsub->sem); return 2; } /* reads the 4 counters, only two are used just now */ static int usbdux_counter_read(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { struct usbduxsub *this_usbduxsub = dev->private; int chan = insn->chanspec; int err; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } err = send_dux_commands(this_usbduxsub, READCOUNTERCOMMAND); if (err < 0) { up(&this_usbduxsub->sem); return err; } err = receive_dux_commands(this_usbduxsub, READCOUNTERCOMMAND); if (err < 0) { up(&this_usbduxsub->sem); return err; } data[0] = le16_to_cpu(this_usbduxsub->insnBuffer[chan + 1]); up(&this_usbduxsub->sem); return 1; } static int usbdux_counter_write(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { struct usbduxsub *this_usbduxsub = dev->private; int err; if (!this_usbduxsub) return -EFAULT; down(&this_usbduxsub->sem); if (!(this_usbduxsub->probed)) { up(&this_usbduxsub->sem); return -ENODEV; } this_usbduxsub->dux_commands[1] = insn->chanspec; *((int16_t *) (this_usbduxsub->dux_commands + 2)) = cpu_to_le16(*data); err = send_dux_commands(this_usbduxsub, WRITECOUNTERCOMMAND); if (err < 0) { up(&this_usbduxsub->sem); return err; } up(&this_usbduxsub->sem); return 1; } static int usbdux_counter_config(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { /* nothing to do so far */ return 2; } /***********************************/ /* PWM */ static int usbduxsub_unlink_PwmURBs(struct usbduxsub *usbduxsub_tmp) { int err = 0; if (usbduxsub_tmp && usbduxsub_tmp->urbPwm) { if (usbduxsub_tmp->urbPwm) usb_kill_urb(usbduxsub_tmp->urbPwm); dev_dbg(&usbduxsub_tmp->interface->dev, "comedi: unlinked PwmURB: res=%d\n", err); } return err; } /* This cancels a running acquisition operation * in any context. */ static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink) { int ret = 0; if (!this_usbduxsub) return -EFAULT; dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__); if (do_unlink) ret = usbduxsub_unlink_PwmURBs(this_usbduxsub); this_usbduxsub->pwm_cmd_running = 0; return ret; } /* force unlink - is called by comedi */ static int usbdux_pwm_cancel(comedi_device *dev, comedi_subdevice *s) { struct usbduxsub *this_usbduxsub = dev->private; int res = 0; /* unlink only if it is really running */ res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running); dev_dbg(&this_usbduxsub->interface->dev, "comedi %d: sending pwm off command to the usb device.\n", dev->minor); res = send_dux_commands(this_usbduxsub, SENDPWMOFF); if (res < 0) return res; return res; } static void usbduxsub_pwm_irq(struct urb *urb) { int ret; struct usbduxsub *this_usbduxsub; comedi_device *this_comedidev; comedi_subdevice *s; /* printk(KERN_DEBUG "PWM: IRQ\n"); */ /* the context variable points to the subdevice */ this_comedidev = urb->context; /* the private structure of the subdevice is struct usbduxsub */ this_usbduxsub = this_comedidev->private; s = this_comedidev->subdevices + SUBDEV_DA; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* * after an unlink command, unplug, ... etc * no unlink needed here. Already shutting down. */ if (this_usbduxsub->pwm_cmd_running) usbdux_pwm_stop(this_usbduxsub, 0); return; default: /* a real error */ if (this_usbduxsub->pwm_cmd_running) { dev_err(&this_usbduxsub->interface->dev, "comedi_: Non-zero urb status received in " "pwm intr context: %d\n", urb->status); usbdux_pwm_stop(this_usbduxsub, 0); } return; } /* are we actually running? */ if (!(this_usbduxsub->pwm_cmd_running)) return; urb->transfer_buffer_length = this_usbduxsub->sizePwmBuf; urb->dev = this_usbduxsub->usbdev; urb->status = 0; if (this_usbduxsub->pwm_cmd_running) { ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(&this_usbduxsub->interface->dev, "comedi_: pwm urb resubm failed in int-cont. " "ret=%d", ret); if (ret == EL2NSYNC) dev_err(&this_usbduxsub->interface->dev, "buggy USB host controller or bug in " "IRQ handling!\n"); /* don't do an unlink here */ usbdux_pwm_stop(this_usbduxsub, 0); } } } static int usbduxsub_submit_PwmURBs(struct usbduxsub *usbduxsub) { int errFlag; if (!usbduxsub) return -EFAULT; dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n"); /* in case of a resubmission after an unlink... */ usb_fill_bulk_urb(usbduxsub->urbPwm, usbduxsub->usbdev, usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP), usbduxsub->urbPwm->transfer_buffer, usbduxsub->sizePwmBuf, usbduxsub_pwm_irq, usbduxsub->comedidev); errFlag = usb_submit_urb(usbduxsub->urbPwm, GFP_ATOMIC); if (errFlag) { dev_err(&usbduxsub->interface->dev, "comedi_: usbdux: pwm: usb_submit_urb error %d\n", errFlag); return errFlag; } return 0; } static int usbdux_pwm_period(comedi_device *dev, comedi_subdevice *s, lsampl_t period) { struct usbduxsub *this_usbduxsub = dev->private; int fx2delay = 255; if (period < MIN_PWM_PERIOD) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: illegal period setting for pwm.\n", dev->minor); return -EAGAIN; } else { fx2delay = period / ((int)(6*512*(1.0/0.033))) - 6; if (fx2delay > 255) { dev_err(&this_usbduxsub->interface->dev, "comedi%d: period %d for pwm is too low.\n", dev->minor, period); return -EAGAIN; } } this_usbduxsub->pwmDelay = fx2delay; this_usbduxsub->pwmPeriod = period; dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n", __func__, period, fx2delay); return 0; } /* is called from insn so there's no need to do all the sanity checks */ static int usbdux_pwm_start(comedi_device *dev, comedi_subdevice *s) { int ret, i; struct usbduxsub *this_usbduxsub = dev->private; dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n", dev->minor, __func__); if (this_usbduxsub->pwm_cmd_running) { /* already running */ return 0; } this_usbduxsub->dux_commands[1] = ((int8_t) this_usbduxsub->pwmDelay); ret = send_dux_commands(this_usbduxsub, SENDPWMON); if (ret < 0) return ret; /* initalise the buffer */ for (i = 0; i < this_usbduxsub->sizePwmBuf; i++) ((char *)(this_usbduxsub->urbPwm->transfer_buffer))[i] = 0; this_usbduxsub->pwm_cmd_running = 1; ret = usbduxsub_submit_PwmURBs(this_usbduxsub); if (ret < 0) { this_usbduxsub->pwm_cmd_running = 0; return ret; } return 0; } /* generates the bit pattern for PWM with the optional sign bit */ static int usbdux_pwm_pattern(comedi_device *dev, comedi_subdevice *s, int channel, lsampl_t value, lsampl_t sign) { struct usbduxsub *this_usbduxsub = dev->private; int i, szbuf; char *pBuf; char pwm_mask; char sgn_mask; char c; if (!this_usbduxsub) return -EFAULT; /* this is the DIO bit which carries the PWM data */ pwm_mask = (1 << channel); /* this is the DIO bit which carries the optional direction bit */ sgn_mask = (16 << channel); /* this is the buffer which will be filled with the with bit */ /* pattern for one period */ szbuf = this_usbduxsub->sizePwmBuf; pBuf = (char *)(this_usbduxsub->urbPwm->transfer_buffer); for (i = 0; i < szbuf; i++) { c = *pBuf; /* reset bits */ c = c & (~pwm_mask); /* set the bit as long as the index is lower than the value */ if (i < value) c = c | pwm_mask; /* set the optional sign bit for a relay */ if (!sign) { /* positive value */ c = c & (~sgn_mask); } else { /* negative value */ c = c | sgn_mask; } *(pBuf++) = c; } return 1; } static int usbdux_pwm_write(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { struct usbduxsub *this_usbduxsub = dev->private; if (!this_usbduxsub) return -EFAULT; if ((insn->n) != 1) { /* * doesn't make sense to have more than one value here because * it would just overwrite the PWM buffer a couple of times */ return -EINVAL; } /* * the sign is set via a special INSN only, this gives us 8 bits for * normal operation * relay sign 0 by default */ return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0); } static int usbdux_pwm_read(comedi_device *x1, comedi_subdevice *x2, comedi_insn *x3, lsampl_t *x4) { /* not needed */ return -EINVAL; }; /* switches on/off PWM */ static int usbdux_pwm_config(comedi_device *dev, comedi_subdevice *s, comedi_insn *insn, lsampl_t *data) { struct usbduxsub *this_usbduxsub = dev->private; switch (data[0]) { case INSN_CONFIG_ARM: /* switch it on */ dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s: pwm on\n", dev->minor, __func__); /* * if not zero the PWM is limited to a certain time which is * not supported here */ if (data[1] != 0) return -EINVAL; return usbdux_pwm_start(dev, s); case INSN_CONFIG_DISARM: dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s: pwm off\n", dev->minor, __func__); return usbdux_pwm_cancel(dev, s); case INSN_CONFIG_GET_PWM_STATUS: /* * to check if the USB transmission has failed or in case PWM * was limited to n cycles to check if it has terminated */ data[1] = this_usbduxsub->pwm_cmd_running; return 0; case INSN_CONFIG_PWM_SET_PERIOD: dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s: setting period\n", dev->minor, __func__); return usbdux_pwm_period(dev, s, data[1]); case INSN_CONFIG_PWM_GET_PERIOD: data[1] = this_usbduxsub->pwmPeriod; return 0; case INSN_CONFIG_PWM_SET_H_BRIDGE: /* value in the first byte and the sign in the second for a relay */ return usbdux_pwm_pattern(dev, s, /* the channel number */ CR_CHAN(insn->chanspec), /* actual PWM data */ data[1], /* just a sign */ (data[2] != 0)); case INSN_CONFIG_PWM_GET_H_BRIDGE: /* values are not kept in this driver, nothing to return here */ return -EINVAL; } return -EINVAL; } /* end of PWM */ /*****************************************************************/ static void tidy_up(struct usbduxsub *usbduxsub_tmp) { int i; if (!usbduxsub_tmp) return; dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n"); /* shows the usb subsystem that the driver is down */ if (usbduxsub_tmp->interface) usb_set_intfdata(usbduxsub_tmp->interface, NULL); usbduxsub_tmp->probed = 0; if (usbduxsub_tmp->urbIn) { if (usbduxsub_tmp->ai_cmd_running) { usbduxsub_tmp->ai_cmd_running = 0; usbduxsub_unlink_InURBs(usbduxsub_tmp); } for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) { kfree(usbduxsub_tmp->urbIn[i]->transfer_buffer); usbduxsub_tmp->urbIn[i]->transfer_buffer = NULL; usb_kill_urb(usbduxsub_tmp->urbIn[i]); usb_free_urb(usbduxsub_tmp->urbIn[i]); usbduxsub_tmp->urbIn[i] = NULL; } kfree(usbduxsub_tmp->urbIn); usbduxsub_tmp->urbIn = NULL; } if (usbduxsub_tmp->urbOut) { if (usbduxsub_tmp->ao_cmd_running) { usbduxsub_tmp->ao_cmd_running = 0; usbduxsub_unlink_OutURBs(usbduxsub_tmp); } for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) { if (usbduxsub_tmp->urbOut[i]->transfer_buffer) { kfree(usbduxsub_tmp->urbOut[i]-> transfer_buffer); usbduxsub_tmp->urbOut[i]->transfer_buffer = NULL; } if (usbduxsub_tmp->urbOut[i]) { usb_kill_urb(usbduxsub_tmp->urbOut[i]); usb_free_urb(usbduxsub_tmp->urbOut[i]); usbduxsub_tmp->urbOut[i] = NULL; } } kfree(usbduxsub_tmp->urbOut); usbduxsub_tmp->urbOut = NULL; } if (usbduxsub_tmp->urbPwm) { if (usbduxsub_tmp->pwm_cmd_running) { usbduxsub_tmp->pwm_cmd_running = 0; usbduxsub_unlink_PwmURBs(usbduxsub_tmp); } kfree(usbduxsub_tmp->urbPwm->transfer_buffer); usbduxsub_tmp->urbPwm->transfer_buffer = NULL; usb_kill_urb(usbduxsub_tmp->urbPwm); usb_free_urb(usbduxsub_tmp->urbPwm); usbduxsub_tmp->urbPwm = NULL; } kfree(usbduxsub_tmp->inBuffer); usbduxsub_tmp->inBuffer = NULL; kfree(usbduxsub_tmp->insnBuffer); usbduxsub_tmp->insnBuffer = NULL; kfree(usbduxsub_tmp->inBuffer); usbduxsub_tmp->inBuffer = NULL; kfree(usbduxsub_tmp->dac_commands); usbduxsub_tmp->dac_commands = NULL; kfree(usbduxsub_tmp->dux_commands); usbduxsub_tmp->dux_commands = NULL; usbduxsub_tmp->ai_cmd_running = 0; usbduxsub_tmp->ao_cmd_running = 0; usbduxsub_tmp->pwm_cmd_running = 0; } static unsigned hex2unsigned(char *h) { unsigned hi, lo; if (h[0] > '9') hi = h[0] - 'A' + 0x0a; else hi = h[0] - '0'; if (h[1] > '9') lo = h[1] - 'A' + 0x0a; else lo = h[1] - '0'; return hi * 0x10 + lo; } /* for FX2 */ #define FIRMWARE_MAX_LEN 0x2000 /* taken from David Brownell's fxload and adjusted for this driver */ static int read_firmware(struct usbduxsub *usbduxsub, void *firmwarePtr, long size) { struct device *dev = &usbduxsub->interface->dev; int i = 0; unsigned char *fp = (char *)firmwarePtr; unsigned char *firmwareBinary = NULL; int res = 0; int maxAddr = 0; firmwareBinary = kzalloc(FIRMWARE_MAX_LEN, GFP_KERNEL); if (!firmwareBinary) { dev_err(dev, "comedi_: mem alloc for firmware failed\n"); return -ENOMEM; } for (;;) { char buf[256], *cp; char type; int len; int idx, off; int j = 0; /* get one line */ while ((i < size) && (fp[i] != 13) && (fp[i] != 10)) { buf[j] = fp[i]; i++; j++; if (j >= sizeof(buf)) { dev_err(dev, "comedi_: bogus firmware file!\n"); return -1; } } /* get rid of LF/CR/... */ while ((i < size) && ((fp[i] == 13) || (fp[i] == 10) || (fp[i] == 0))) { i++; } buf[j] = 0; /* dev_dbg(dev, "comedi_: buf=%s\n", buf); */ /* * EXTENSION: * "# comment-till-end-of-line", for copyrights etc */ if (buf[0] == '#') continue; if (buf[0] != ':') { dev_err(dev, "comedi_: upload: not an ihex record: %s", buf); return -EFAULT; } /* Read the length field (up to 16 bytes) */ len = hex2unsigned(buf + 1); /* Read the target offset */ off = (hex2unsigned(buf + 3) * 0x0100) + hex2unsigned(buf + 5); if ((off + len) > maxAddr) maxAddr = off + len; if (maxAddr >= FIRMWARE_MAX_LEN) { dev_err(dev, "comedi_: firmware upload goes " "beyond FX2 RAM boundaries.\n"); return -EFAULT; } /* dev_dbg(dev, "comedi_: off=%x, len=%x:\n", off, len); */ /* Read the record type */ type = hex2unsigned(buf + 7); /* If this is an EOF record, then make it so. */ if (type == 1) break; if (type != 0) { dev_err(dev, "comedi_: unsupported record type: %u\n", type); return -EFAULT; } for (idx = 0, cp = buf + 9; idx < len; idx += 1, cp += 2) { firmwareBinary[idx + off] = hex2unsigned(cp); /*printk("%02x ",firmwareBinary[idx+off]); */ } /*printk("\n"); */ if (i >= size) { dev_err(dev, "comedi_: unexpected end of hex file\n"); break; } } res = firmwareUpload(usbduxsub, firmwareBinary, maxAddr + 1); kfree(firmwareBinary); return res; } /* allocate memory for the urbs and initialise them */ static int usbduxsub_probe(struct usb_interface *uinterf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(uinterf); struct device *dev = &uinterf->dev; int i; int index; dev_dbg(dev, "comedi_: usbdux_: " "finding a free structure for the usb-device\n"); down(&start_stop_sem); /* look for a free place in the usbdux array */ index = -1; for (i = 0; i < NUMUSBDUX; i++) { if (!(usbduxsub[i].probed)) { index = i; break; } } /* no more space */ if (index == -1) { dev_err(dev, "Too many usbdux-devices connected.\n"); up(&start_stop_sem); return -EMFILE; } dev_dbg(dev, "comedi_: usbdux: " "usbduxsub[%d] is ready to connect to comedi.\n", index); init_MUTEX(&(usbduxsub[index].sem)); /* save a pointer to the usb device */ usbduxsub[index].usbdev = udev; /* 2.6: save the interface itself */ usbduxsub[index].interface = uinterf; /* get the interface number from the interface */ usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber; /* hand the private data over to the usb subsystem */ /* will be needed for disconnect */ usb_set_intfdata(uinterf, &(usbduxsub[index])); dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum); /* test if it is high speed (USB 2.0) */ usbduxsub[index].high_speed = (usbduxsub[index].usbdev->speed == USB_SPEED_HIGH); /* create space for the commands of the DA converter */ usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL); if (!usbduxsub[index].dac_commands) { dev_err(dev, "comedi_: usbdux: " "error alloc space for dac commands\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } /* create space for the commands going to the usb device */ usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL); if (!usbduxsub[index].dux_commands) { dev_err(dev, "comedi_: usbdux: " "error alloc space for dac commands\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } /* create space for the in buffer and set it to zero */ usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL); if (!(usbduxsub[index].inBuffer)) { dev_err(dev, "comedi_: usbdux: " "could not alloc space for inBuffer\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } /* create space of the instruction buffer */ usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL); if (!(usbduxsub[index].insnBuffer)) { dev_err(dev, "comedi_: usbdux: " "could not alloc space for insnBuffer\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } /* create space for the outbuffer */ usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL); if (!(usbduxsub[index].outBuffer)) { dev_err(dev, "comedi_: usbdux: " "could not alloc space for outBuffer\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } /* setting to alternate setting 3: enabling iso ep and bulk ep. */ i = usb_set_interface(usbduxsub[index].usbdev, usbduxsub[index].ifnum, 3); if (i < 0) { dev_err(dev, "comedi_: usbdux%d: " "could not set alternate setting 3 in high speed.\n", index); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENODEV; } if (usbduxsub[index].high_speed) usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSHIGH; else usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL; usbduxsub[index].urbIn = kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfInBuffers, GFP_KERNEL); if (!(usbduxsub[index].urbIn)) { dev_err(dev, "comedi_: usbdux: Could not alloc. urbIn array\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } for (i = 0; i < usbduxsub[index].numOfInBuffers; i++) { /* one frame: 1ms */ usbduxsub[index].urbIn[i] = usb_alloc_urb(1, GFP_KERNEL); if (usbduxsub[index].urbIn[i] == NULL) { dev_err(dev, "comedi_: usbdux%d: " "Could not alloc. urb(%d)\n", index, i); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxsub[index].urbIn[i]->dev = usbduxsub[index].usbdev; /* will be filled later with a pointer to the comedi-device */ /* and ONLY then the urb should be submitted */ usbduxsub[index].urbIn[i]->context = NULL; usbduxsub[index].urbIn[i]->pipe = usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP); usbduxsub[index].urbIn[i]->transfer_flags = URB_ISO_ASAP; usbduxsub[index].urbIn[i]->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL); if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) { dev_err(dev, "comedi_: usbdux%d: " "could not alloc. transb.\n", index); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxsub[index].urbIn[i]->complete = usbduxsub_ai_IsocIrq; usbduxsub[index].urbIn[i]->number_of_packets = 1; usbduxsub[index].urbIn[i]->transfer_buffer_length = SIZEINBUF; usbduxsub[index].urbIn[i]->iso_frame_desc[0].offset = 0; usbduxsub[index].urbIn[i]->iso_frame_desc[0].length = SIZEINBUF; } /* out */ if (usbduxsub[index].high_speed) usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSHIGH; else usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL; usbduxsub[index].urbOut = kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfOutBuffers, GFP_KERNEL); if (!(usbduxsub[index].urbOut)) { dev_err(dev, "comedi_: usbdux: " "Could not alloc. urbOut array\n"); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } for (i = 0; i < usbduxsub[index].numOfOutBuffers; i++) { /* one frame: 1ms */ usbduxsub[index].urbOut[i] = usb_alloc_urb(1, GFP_KERNEL); if (usbduxsub[index].urbOut[i] == NULL) { dev_err(dev, "comedi_: usbdux%d: " "Could not alloc. urb(%d)\n", index, i); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxsub[index].urbOut[i]->dev = usbduxsub[index].usbdev; /* will be filled later with a pointer to the comedi-device */ /* and ONLY then the urb should be submitted */ usbduxsub[index].urbOut[i]->context = NULL; usbduxsub[index].urbOut[i]->pipe = usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP); usbduxsub[index].urbOut[i]->transfer_flags = URB_ISO_ASAP; usbduxsub[index].urbOut[i]->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL); if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) { dev_err(dev, "comedi_: usbdux%d: " "could not alloc. transb.\n", index); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxsub[index].urbOut[i]->complete = usbduxsub_ao_IsocIrq; usbduxsub[index].urbOut[i]->number_of_packets = 1; usbduxsub[index].urbOut[i]->transfer_buffer_length = SIZEOUTBUF; usbduxsub[index].urbOut[i]->iso_frame_desc[0].offset = 0; usbduxsub[index].urbOut[i]->iso_frame_desc[0].length = SIZEOUTBUF; if (usbduxsub[index].high_speed) { /* uframes */ usbduxsub[index].urbOut[i]->interval = 8; } else { /* frames */ usbduxsub[index].urbOut[i]->interval = 1; } } /* pwm */ if (usbduxsub[index].high_speed) { /* max bulk ep size in high speed */ usbduxsub[index].sizePwmBuf = 512; usbduxsub[index].urbPwm = usb_alloc_urb(0, GFP_KERNEL); if (usbduxsub[index].urbPwm == NULL) { dev_err(dev, "comedi_: usbdux%d: " "Could not alloc. pwm urb\n", index); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxsub[index].urbPwm->transfer_buffer = kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL); if (!(usbduxsub[index].urbPwm->transfer_buffer)) { dev_err(dev, "comedi_: usbdux%d: " "could not alloc. transb. for pwm\n", index); tidy_up(&(usbduxsub[index])); up(&start_stop_sem); return -ENOMEM; } } else { usbduxsub[index].urbPwm = NULL; usbduxsub[index].sizePwmBuf = 0; } usbduxsub[index].ai_cmd_running = 0; usbduxsub[index].ao_cmd_running = 0; usbduxsub[index].pwm_cmd_running = 0; /* we've reached the bottom of the function */ usbduxsub[index].probed = 1; up(&start_stop_sem); dev_info(dev, "comedi_: usbdux%d " "has been successfully initialised.\n", index); /* success */ return 0; } static void usbduxsub_disconnect(struct usb_interface *intf) { struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); if (!usbduxsub_tmp) { dev_err(&intf->dev, "comedi_: disconnect called with null pointer.\n"); return; } if (usbduxsub_tmp->usbdev != udev) { dev_err(&intf->dev, "comedi_: BUG! called with wrong ptr!!!\n"); return; } down(&start_stop_sem); down(&usbduxsub_tmp->sem); tidy_up(usbduxsub_tmp); up(&usbduxsub_tmp->sem); up(&start_stop_sem); dev_dbg(&intf->dev, "comedi_: disconnected from the usb\n"); } /* is called when comedi-config is called */ static int usbdux_attach(comedi_device *dev, comedi_devconfig *it) { int ret; int index; int i; struct usbduxsub *udev; comedi_subdevice *s = NULL; dev->private = NULL; down(&start_stop_sem); /* find a valid device which has been detected by the probe function of * the usb */ index = -1; for (i = 0; i < NUMUSBDUX; i++) { if ((usbduxsub[i].probed) && (!usbduxsub[i].attached)) { index = i; break; } } if (index < 0) { printk(KERN_ERR "comedi%d: usbdux: error: attach failed, no " "usbdux devs connected to the usb bus.\n", dev->minor); up(&start_stop_sem); return -ENODEV; } udev = &usbduxsub[index]; down(&udev->sem); /* pointer back to the corresponding comedi device */ udev->comedidev = dev; /* trying to upload the firmware into the chip */ if (comedi_aux_data(it->options, 0) && it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) { read_firmware(udev, comedi_aux_data(it->options, 0), it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]); } dev->board_name = BOARDNAME; /* set number of subdevices */ if (udev->high_speed) { /* with pwm */ dev->n_subdevices = 5; } else { /* without pwm */ dev->n_subdevices = 4; } /* allocate space for the subdevices */ ret = alloc_subdevices(dev, dev->n_subdevices); if (ret < 0) { dev_err(&udev->interface->dev, "comedi%d: error alloc space for subdev\n", dev->minor); up(&start_stop_sem); return ret; } dev_info(&udev->interface->dev, "comedi%d: usb-device %d is attached to comedi.\n", dev->minor, index); /* private structure is also simply the usb-structure */ dev->private = udev; /* the first subdevice is the A/D converter */ s = dev->subdevices + SUBDEV_AD; /* the URBs get the comedi subdevice */ /* which is responsible for reading */ /* this is the subdevice which reads data */ dev->read_subdev = s; /* the subdevice receives as private structure the */ /* usb-structure */ s->private = NULL; /* analog input */ s->type = COMEDI_SUBD_AI; /* readable and ref is to ground */ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; /* 8 channels */ s->n_chan = 8; /* length of the channellist */ s->len_chanlist = 8; /* callback functions */ s->insn_read = usbdux_ai_insn_read; s->do_cmdtest = usbdux_ai_cmdtest; s->do_cmd = usbdux_ai_cmd; s->cancel = usbdux_ai_cancel; /* max value from the A/D converter (12bit) */ s->maxdata = 0xfff; /* range table to convert to physical units */ s->range_table = (&range_usbdux_ai_range); /* analog out */ s = dev->subdevices + SUBDEV_DA; /* analog out */ s->type = COMEDI_SUBD_AO; /* backward pointer */ dev->write_subdev = s; /* the subdevice receives as private structure the */ /* usb-structure */ s->private = NULL; /* are writable */ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; /* 4 channels */ s->n_chan = 4; /* length of the channellist */ s->len_chanlist = 4; /* 12 bit resolution */ s->maxdata = 0x0fff; /* bipolar range */ s->range_table = (&range_usbdux_ao_range); /* callback */ s->do_cmdtest = usbdux_ao_cmdtest; s->do_cmd = usbdux_ao_cmd; s->cancel = usbdux_ao_cancel; s->insn_read = usbdux_ao_insn_read; s->insn_write = usbdux_ao_insn_write; /* digital I/O */ s = dev->subdevices + SUBDEV_DIO; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = (&range_digital); s->insn_bits = usbdux_dio_insn_bits; s->insn_config = usbdux_dio_insn_config; /* we don't use it */ s->private = NULL; /* counter */ s = dev->subdevices + SUBDEV_COUNTER; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 0xFFFF; s->insn_read = usbdux_counter_read; s->insn_write = usbdux_counter_write; s->insn_config = usbdux_counter_config; if (udev->high_speed) { /* timer / pwm */ s = dev->subdevices + SUBDEV_PWM; s->type = COMEDI_SUBD_PWM; s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE; s->n_chan = 8; /* this defines the max duty cycle resolution */ s->maxdata = udev->sizePwmBuf; s->insn_write = usbdux_pwm_write; s->insn_read = usbdux_pwm_read; s->insn_config = usbdux_pwm_config; usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD); } /* finally decide that it's attached */ udev->attached = 1; up(&udev->sem); up(&start_stop_sem); dev_info(&udev->interface->dev, "comedi%d: attached to usbdux.\n", dev->minor); return 0; } static int usbdux_detach(comedi_device *dev) { struct usbduxsub *usbduxsub_tmp; if (!dev) { printk(KERN_ERR "comedi?: usbdux: detach without dev variable...\n"); return -EFAULT; } usbduxsub_tmp = dev->private; if (!usbduxsub_tmp) { printk(KERN_ERR "comedi?: usbdux: detach without ptr to usbduxsub[]\n"); return -EFAULT; } dev_dbg(&usbduxsub_tmp->interface->dev, "comedi%d: detach usb device\n", dev->minor); down(&usbduxsub_tmp->sem); /* Don't allow detach to free the private structure */ /* It's one entry of of usbduxsub[] */ dev->private = NULL; usbduxsub_tmp->attached = 0; usbduxsub_tmp->comedidev = NULL; dev_dbg(&usbduxsub_tmp->interface->dev, "comedi%d: detach: successfully removed\n", dev->minor); up(&usbduxsub_tmp->sem); return 0; } /* main driver struct */ static comedi_driver driver_usbdux = { .driver_name = "usbdux", .module = THIS_MODULE, .attach = usbdux_attach, .detach = usbdux_detach, }; static void init_usb_devices(void) { int index; /* all devices entries are invalid to begin with */ /* they will become valid by the probe function */ /* and then finally by the attach-function */ for (index = 0; index < NUMUSBDUX; index++) { memset(&(usbduxsub[index]), 0x00, sizeof(usbduxsub[index])); init_MUTEX(&(usbduxsub[index].sem)); } } /* Table with the USB-devices: just now only testing IDs */ static struct usb_device_id usbduxsub_table[] = { {USB_DEVICE(0x13d8, 0x0001) }, {USB_DEVICE(0x13d8, 0x0002) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usbduxsub_table); /* The usbduxsub-driver */ static struct usb_driver usbduxsub_driver = { .name = BOARDNAME, .probe = usbduxsub_probe, .disconnect = usbduxsub_disconnect, .id_table = usbduxsub_table, }; /* Can't use the nice macro as I have also to initialise the USB */ /* subsystem: */ /* registering the usb-system _and_ the comedi-driver */ static int init_usbdux(void) { printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); init_usb_devices(); usb_register(&usbduxsub_driver); comedi_driver_register(&driver_usbdux); return 0; } /* deregistering the comedi driver and the usb-subsystem */ static void exit_usbdux(void) { comedi_driver_unregister(&driver_usbdux); usb_deregister(&usbduxsub_driver); } module_init(init_usbdux); module_exit(exit_usbdux); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
broonie/regulator-2.6
drivers/w1/w1_netlink.c
140
5928
/* * w1_netlink.c * * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/connector.h> #include "w1.h" #include "w1_log.h" #include "w1_netlink.h" #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE))) void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) { char buf[sizeof(struct cn_msg) + sizeof(struct w1_netlink_msg)]; struct cn_msg *m = (struct cn_msg *)buf; struct w1_netlink_msg *w = (struct w1_netlink_msg *)(m+1); memset(buf, 0, sizeof(buf)); m->id.idx = CN_W1_IDX; m->id.val = CN_W1_VAL; m->seq = dev->seq++; m->len = sizeof(struct w1_netlink_msg); memcpy(w, msg, sizeof(struct w1_netlink_msg)); cn_netlink_send(m, 0, GFP_KERNEL); } static int w1_process_command_master(struct w1_master *dev, struct cn_msg *msg, struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) { dev_dbg(&dev->dev, "%s: %s: cmd=%02x, len=%u.\n", __func__, dev->name, cmd->cmd, cmd->len); if (cmd->cmd != W1_CMD_SEARCH && cmd->cmd != W1_CMD_ALARM_SEARCH) return -EINVAL; w1_search_process(dev, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); return 0; } static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) { void *data; struct w1_netlink_msg *h; struct w1_netlink_cmd *c; struct cn_msg *cm; int err; data = kzalloc(sizeof(struct cn_msg) + sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len, GFP_KERNEL); if (!data) return -ENOMEM; cm = (struct cn_msg *)(data); h = (struct w1_netlink_msg *)(cm + 1); c = (struct w1_netlink_cmd *)(h + 1); memcpy(cm, msg, sizeof(struct cn_msg)); memcpy(h, hdr, sizeof(struct w1_netlink_msg)); memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); cm->ack = msg->seq+1; cm->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len; h->len = sizeof(struct w1_netlink_cmd) + cmd->len; memcpy(c->data, cmd->data, c->len); err = cn_netlink_send(cm, 0, GFP_KERNEL); kfree(data); return err; } static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) { int err = 0; dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, sl->reg_num.crc, cmd->cmd, cmd->len); switch (cmd->cmd) { case W1_CMD_READ: w1_read_block(sl->master, cmd->data, cmd->len); w1_send_read_reply(sl, msg, hdr, cmd); break; case W1_CMD_WRITE: w1_write_block(sl->master, cmd->data, cmd->len); break; case W1_CMD_SEARCH: case W1_CMD_ALARM_SEARCH: w1_search_process(sl->master, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); break; default: err = -1; break; } return err; } static void w1_cn_callback(void *data) { struct cn_msg *msg = data; struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); struct w1_netlink_cmd *cmd; struct w1_slave *sl; struct w1_master *dev; int err = 0; while (msg->len && !err) { struct w1_reg_num id; u16 mlen = m->len; u8 *cmd_data = m->data; dev = NULL; sl = NULL; memcpy(&id, m->id.id, sizeof(id)); #if 0 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n", __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len); #endif if (m->len + sizeof(struct w1_netlink_msg) > msg->len) { err = -E2BIG; break; } if (!mlen) goto out_cont; if (m->type == W1_MASTER_CMD) { dev = w1_search_master_id(m->id.mst.id); } else if (m->type == W1_SLAVE_CMD) { sl = w1_search_slave(&id); if (sl) dev = sl->master; } if (!dev) { err = -ENODEV; goto out_cont; } mutex_lock(&dev->mutex); if (sl && w1_reset_select_slave(sl)) { err = -ENODEV; goto out_up; } while (mlen) { cmd = (struct w1_netlink_cmd *)cmd_data; if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { err = -E2BIG; break; } if (sl) w1_process_command_slave(sl, msg, m, cmd); else w1_process_command_master(dev, msg, m, cmd); cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); mlen -= cmd->len + sizeof(struct w1_netlink_cmd); } out_up: atomic_dec(&dev->refcnt); if (sl) atomic_dec(&sl->refcnt); mutex_unlock(&dev->mutex); out_cont: msg->len -= sizeof(struct w1_netlink_msg) + m->len; m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); /* * Let's allow requests for nonexisting devices. */ if (err == -ENODEV) err = 0; } #if 0 if (err) { printk("%s: malformed message. Dropping.\n", __func__); } #endif } int w1_init_netlink(void) { struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; return cn_add_callback(&w1_id, "w1", &w1_cn_callback); } void w1_fini_netlink(void) { struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; cn_del_callback(&w1_id); } #else void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) { } int w1_init_netlink(void) { return 0; } void w1_fini_netlink(void) { } #endif
gpl-2.0
matianfu/barcelona-3.2.40
drivers/usb/gadget/mv_udc_core.c
140
59670
/* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. * Author: Chao Xie <chao.xie@marvell.com> * Neil Zhang <zhangwm@marvell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/pm.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/platform_data/mv_usb.h> #include <asm/system.h> #include <asm/unaligned.h> #include "mv_udc.h" #define DRIVER_DESC "Marvell PXA USB Device Controller driver" #define DRIVER_VERSION "8 Nov 2010" #define ep_dir(ep) (((ep)->ep_num == 0) ? \ ((ep)->udc->ep0_dir) : ((ep)->direction)) /* timeout value -- usec */ #define RESET_TIMEOUT 10000 #define FLUSH_TIMEOUT 10000 #define EPSTATUS_TIMEOUT 10000 #define PRIME_TIMEOUT 10000 #define READSAFE_TIMEOUT 1000 #define DTD_TIMEOUT 1000 #define LOOPS_USEC_SHIFT 4 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) static DECLARE_COMPLETION(release_done); static const char driver_name[] = "mv_udc"; static const char driver_desc[] = DRIVER_DESC; /* controller device global variable */ static struct mv_udc *the_controller; int mv_usb_otgsc; static void nuke(struct mv_ep *ep, int status); static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); /* for endpoint 0 operations */ static const struct usb_endpoint_descriptor mv_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 0, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = EP0_MAX_PKT_SIZE, }; static void ep0_reset(struct mv_udc *udc) { struct mv_ep *ep; u32 epctrlx; int i = 0; /* ep0 in and out */ for (i = 0; i < 2; i++) { ep = &udc->eps[i]; ep->udc = udc; /* ep0 dQH */ ep->dqh = &udc->ep_dqh[i]; /* configure ep0 endpoint capabilities in dQH */ ep->dqh->max_packet_length = (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | EP_QUEUE_HEAD_IOS; ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; epctrlx = readl(&udc->op_regs->epctrlx[0]); if (i) { /* TX */ epctrlx |= EPCTRL_TX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_TX_EP_TYPE_SHIFT); } else { /* RX */ epctrlx |= EPCTRL_RX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[0]); } } /* protocol ep0 stall, will automatically be cleared on new transaction */ static void ep0_stall(struct mv_udc *udc) { u32 epctrlx; /* set TX and RX to stall */ epctrlx = readl(&udc->op_regs->epctrlx[0]); epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; writel(epctrlx, &udc->op_regs->epctrlx[0]); /* update ep0 state */ udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } static int process_ep_req(struct mv_udc *udc, int index, struct mv_req *curr_req) { struct mv_dtd *curr_dtd; struct mv_dqh *curr_dqh; int td_complete, actual, remaining_length; int i, direction; int retval = 0; u32 errors; u32 bit_pos; curr_dqh = &udc->ep_dqh[index]; direction = index % 2; curr_dtd = curr_req->head; td_complete = 0; actual = curr_req->req.length; for (i = 0; i < curr_req->dtd_count; i++) { if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", udc->eps[index].name); return 1; } errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; if (!errors) { remaining_length = (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS; actual -= remaining_length; if (remaining_length) { if (direction) { dev_dbg(&udc->dev->dev, "TX dTD remains data\n"); retval = -EPROTO; break; } else break; } } else { dev_info(&udc->dev->dev, "complete_tr error: ep=%d %s: error = 0x%x\n", index >> 1, direction ? "SEND" : "RECV", errors); if (errors & DTD_STATUS_HALTED) { /* Clear the errors and Halt condition */ curr_dqh->size_ioc_int_sts &= ~errors; retval = -EPIPE; } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { retval = -EPROTO; } else if (errors & DTD_STATUS_TRANSACTION_ERR) { retval = -EILSEQ; } } if (i != curr_req->dtd_count - 1) curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; } if (retval) return retval; if (direction == EP_DIR_OUT) bit_pos = 1 << curr_req->ep->ep_num; else bit_pos = 1 << (16 + curr_req->ep->ep_num); while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) { if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { while (readl(&udc->op_regs->epstatus) & bit_pos) udelay(1); break; } udelay(1); } curr_req->req.actual = actual; return 0; } /* * done() - retire a request; caller blocked irqs * @status : request status to be set, only works when * request is still in progress. */ static void done(struct mv_ep *ep, struct mv_req *req, int status) { struct mv_udc *udc = NULL; unsigned char stopped = ep->stopped; struct mv_dtd *curr_td, *next_td; int j; udc = (struct mv_udc *)ep->udc; /* Removed the req from fsl_ep->queue */ list_del_init(&req->queue); /* req.status should be set as -EINPROGRESS in ep_queue() */ if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; /* Free dtd for the request */ next_td = req->head; for (j = 0; j < req->dtd_count; j++) { curr_td = next_td; if (j != req->dtd_count - 1) next_td = curr_td->next_dtd_virt; dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); } if (req->mapped) { dma_unmap_single(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, ((ep_dir(ep) == EP_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); req->req.dma = DMA_ADDR_INVALID; req->mapped = 0; } else dma_sync_single_for_cpu(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, ((ep_dir(ep) == EP_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); if (status && (status != -ESHUTDOWN)) dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", ep->ep.name, &req->req, status, req->req.actual, req->req.length); ep->stopped = 1; spin_unlock(&ep->udc->lock); /* * complete() is from gadget layer, * eg fsg->bulk_in_complete() */ if (req->req.complete) req->req.complete(&ep->ep, &req->req); spin_lock(&ep->udc->lock); ep->stopped = stopped; } static int queue_dtd(struct mv_ep *ep, struct mv_req *req) { u32 tmp, epstatus, bit_pos, direction; struct mv_udc *udc; struct mv_dqh *dqh; unsigned int loops; int readsafe, retval = 0; udc = ep->udc; direction = ep_dir(ep); dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); /* check if the pipe is empty */ if (!(list_empty(&ep->queue))) { struct mv_req *lastreq; lastreq = list_entry(ep->queue.prev, struct mv_req, queue); lastreq->tail->dtd_next = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; if (readl(&udc->op_regs->epprime) & bit_pos) { loops = LOOPS(PRIME_TIMEOUT); while (readl(&udc->op_regs->epprime) & bit_pos) { if (loops == 0) { retval = -ETIME; goto done; } udelay(LOOPS_USEC); loops--; } if (readl(&udc->op_regs->epstatus) & bit_pos) goto done; } readsafe = 0; loops = LOOPS(READSAFE_TIMEOUT); while (readsafe == 0) { if (loops == 0) { retval = -ETIME; goto done; } /* start with setting the semaphores */ tmp = readl(&udc->op_regs->usbcmd); tmp |= USBCMD_ATDTW_TRIPWIRE_SET; writel(tmp, &udc->op_regs->usbcmd); /* read the endpoint status */ epstatus = readl(&udc->op_regs->epstatus) & bit_pos; /* * Reread the ATDTW semaphore bit to check if it is * cleared. When hardware see a hazard, it will clear * the bit or else we remain set to 1 and we can * proceed with priming of endpoint if not already * primed. */ if (readl(&udc->op_regs->usbcmd) & USBCMD_ATDTW_TRIPWIRE_SET) { readsafe = 1; } loops--; udelay(LOOPS_USEC); } /* Clear the semaphore */ tmp = readl(&udc->op_regs->usbcmd); tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR; writel(tmp, &udc->op_regs->usbcmd); /* If endpoint is not active, we activate it now. */ if (!epstatus) { if (direction == EP_DIR_IN) { struct mv_dtd *curr_dtd = dma_to_virt( &udc->dev->dev, dqh->curr_dtd_ptr); loops = LOOPS(DTD_TIMEOUT); while (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { if (loops == 0) { retval = -ETIME; goto done; } loops--; udelay(LOOPS_USEC); } } /* No other transfers on the queue */ /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; dqh->size_ioc_int_sts = 0; /* * Ensure that updates to the QH will * occur before priming. */ wmb(); /* Prime the Endpoint */ writel(bit_pos, &udc->op_regs->epprime); } } else { /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; dqh->size_ioc_int_sts = 0; /* Ensure that updates to the QH will occur before priming. */ wmb(); /* Prime the Endpoint */ writel(bit_pos, &udc->op_regs->epprime); if (direction == EP_DIR_IN) { /* FIXME add status check after prime the IN ep */ int prime_again; u32 curr_dtd_ptr = dqh->curr_dtd_ptr; loops = LOOPS(DTD_TIMEOUT); prime_again = 0; while ((curr_dtd_ptr != req->head->td_dma)) { curr_dtd_ptr = dqh->curr_dtd_ptr; if (loops == 0) { dev_err(&udc->dev->dev, "failed to prime %s\n", ep->name); retval = -ETIME; goto done; } loops--; udelay(LOOPS_USEC); if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) { if (prime_again) goto done; dev_info(&udc->dev->dev, "prime again\n"); writel(bit_pos, &udc->op_regs->epprime); prime_again = 1; } } } } done: return retval; } static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, dma_addr_t *dma, int *is_last) { u32 temp; struct mv_dtd *dtd; struct mv_udc *udc; /* how big will this transfer be? */ *length = min(req->req.length - req->req.actual, (unsigned)EP_MAX_LENGTH_TRANSFER); udc = req->ep->udc; /* * Be careful that no _GFP_HIGHMEM is set, * or we can not use dma_to_virt */ dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma); if (dtd == NULL) return dtd; dtd->td_dma = *dma; /* initialize buffer page pointers */ temp = (u32)(req->req.dma + req->req.actual); dtd->buff_ptr0 = cpu_to_le32(temp); temp &= ~0xFFF; dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); req->req.actual += *length; /* zlp is needed if req->req.zero is set */ if (req->req.zero) { if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) *is_last = 1; else *is_last = 0; } else if (req->req.length == req->req.actual) *is_last = 1; else *is_last = 0; /* Fill in the transfer size; set active bit */ temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); /* Enable interrupt for the last dtd of a request */ if (*is_last && !req->req.no_interrupt) temp |= DTD_IOC; dtd->size_ioc_sts = temp; mb(); return dtd; } /* generate dTD linked list for a request */ static int req_to_dtd(struct mv_req *req) { unsigned count; int is_last, is_first = 1; struct mv_dtd *dtd, *last_dtd = NULL; struct mv_udc *udc; dma_addr_t dma; udc = req->ep->udc; do { dtd = build_dtd(req, &count, &dma, &is_last); if (dtd == NULL) return -ENOMEM; if (is_first) { is_first = 0; req->head = dtd; } else { last_dtd->dtd_next = dma; last_dtd->next_dtd_virt = dtd; } last_dtd = dtd; req->dtd_count++; } while (!is_last); /* set terminate bit to 1 for the last dTD */ dtd->dtd_next = DTD_NEXT_TERMINATE; req->tail = dtd; return 0; } static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u16 max = 0; u32 bit_pos, epctrlx, direction; unsigned char zlt = 0, ios = 0, mult = 0; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !desc || ep->desc || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; direction = ep_dir(ep); max = usb_endpoint_maxp(desc); /* * disable HW zero length termination select * driver handles zero length packet through req->req.zero */ zlt = 1; bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Check if the Endpoint is Primed */ if ((readl(&udc->op_regs->epprime) & bit_pos) || (readl(&udc->op_regs->epstatus) & bit_pos)) { dev_info(&udc->dev->dev, "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)ep->ep_num, direction ? "SEND" : "RECV", (unsigned)readl(&udc->op_regs->epprime), (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); goto en_done; } /* Set the max packet length, interrupt on Setup and Mult fields */ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_BULK: zlt = 1; mult = 0; break; case USB_ENDPOINT_XFER_CONTROL: ios = 1; case USB_ENDPOINT_XFER_INT: mult = 0; break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ mult = (unsigned char)(1 + ((max >> 11) & 0x03)); max = max & 0x7ff; /* bit 0~10 */ /* 3 transactions at most */ if (mult > 3) goto en_done; break; default: goto en_done; } spin_lock_irqsave(&udc->lock, flags); /* Get the endpoint queue head address */ dqh = ep->dqh; dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | (mult << EP_QUEUE_HEAD_MULT_POS) | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) | (ios ? EP_QUEUE_HEAD_IOS : 0); dqh->next_dtd_ptr = 1; dqh->size_ioc_int_sts = 0; ep->ep.maxpacket = max; ep->desc = desc; ep->stopped = 0; /* Enable the endpoint for Rx or Tx and set the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_ALL_MASK; epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_TX_EP_TYPE_SHIFT); } else { epctrlx &= ~EPCTRL_RX_ALL_MASK; epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* * Implement Guideline (GL# USB-7) The unused endpoint type must * be programmed to bulk. */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_RX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_TX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } spin_unlock_irqrestore(&udc->lock, flags); return 0; en_done: return -EINVAL; } static int mv_ep_disable(struct usb_ep *_ep) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u32 bit_pos, epctrlx, direction; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); if ((_ep == NULL) || !ep->desc) return -EINVAL; udc = ep->udc; /* Get the endpoint queue head address */ dqh = ep->dqh; spin_lock_irqsave(&udc->lock, flags); direction = ep_dir(ep); bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Reset the max packet length and the interrupt on Setup */ dqh->max_packet_length = 0; /* Disable the endpoint for Rx or Tx and reset the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); epctrlx &= ~((direction == EP_DIR_IN) ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); ep->desc = NULL; ep->stopped = 1; spin_unlock_irqrestore(&udc->lock, flags); return 0; } static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct mv_req *req = NULL; req = kzalloc(sizeof *req, gfp_flags); if (!req) return NULL; req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&req->queue); return &req->req; } static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct mv_req *req = NULL; req = container_of(_req, struct mv_req, req); if (_req) kfree(req); } static void mv_ep_fifo_flush(struct usb_ep *_ep) { struct mv_udc *udc; u32 bit_pos, direction; struct mv_ep *ep; unsigned int loops; if (!_ep) return; ep = container_of(_ep, struct mv_ep, ep); if (!ep->desc) return; udc = ep->udc; direction = ep_dir(ep); if (ep->ep_num == 0) bit_pos = (1 << 16) | 1; else if (direction == EP_DIR_OUT) bit_pos = 1 << ep->ep_num; else bit_pos = 1 << (16 + ep->ep_num); loops = LOOPS(EPSTATUS_TIMEOUT); do { unsigned int inter_loops; if (loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); return; } /* Write 1 to the Flush register */ writel(bit_pos, &udc->op_regs->epflush); /* Wait until flushing completed */ inter_loops = LOOPS(FLUSH_TIMEOUT); while (readl(&udc->op_regs->epflush)) { /* * ENDPTFLUSH bit should be cleared to indicate this * operation is complete */ if (inter_loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTFLUSH=0x%x," "bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epflush), (unsigned)bit_pos); return; } inter_loops--; udelay(LOOPS_USEC); } loops--; } while (readl(&udc->op_regs->epstatus) & bit_pos); } /* queues (submits) an I/O request to an endpoint */ static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_udc *udc = ep->udc; unsigned long flags; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { dev_err(&udc->dev->dev, "%s, bad params", __func__); return -EINVAL; } if (unlikely(!_ep || !ep->desc)) { dev_err(&udc->dev->dev, "%s, bad ep", __func__); return -EINVAL; } if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { if (req->req.length > ep->ep.maxpacket) return -EMSGSIZE; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; req->ep = ep; /* map virtual address to hardware */ if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } else { dma_sync_single_for_device(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 0; } req->req.status = -EINPROGRESS; req->req.actual = 0; req->dtd_count = 0; spin_lock_irqsave(&udc->lock, flags); /* build dtds and push them to device queue */ if (!req_to_dtd(req)) { int retval; retval = queue_dtd(ep, req); if (retval) { spin_unlock_irqrestore(&udc->lock, flags); return retval; } } else { spin_unlock_irqrestore(&udc->lock, flags); return -ENOMEM; } /* Update ep0 state */ if (ep->ep_num == 0) udc->ep0_state = DATA_STATE_XMIT; /* irq handler advances the queue */ if (req != NULL) list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return 0; } /* dequeues (cancels, unlinks) an I/O request from an endpoint */ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req; struct mv_udc *udc = ep->udc; unsigned long flags; int stopped, ret = 0; u32 epctrlx; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); stopped = ep->stopped; /* Stop the ep before we deal with the queue */ ep->stopped = 1; epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx &= ~EPCTRL_TX_ENABLE; else epctrlx &= ~EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { ret = -EINVAL; goto out; } /* The request is in progress, or completed but not dequeued */ if (ep->queue.next == &req->queue) { _req->status = -ECONNRESET; mv_ep_fifo_flush(_ep); /* flush current transfer */ /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { struct mv_dqh *qh; struct mv_req *next_req; qh = ep->dqh; next_req = list_entry(req->queue.next, struct mv_req, queue); /* Point the QH to the first TD of next request */ writel((u32) next_req->head, &qh->curr_dtd_ptr); } else { struct mv_dqh *qh; qh = ep->dqh; qh->next_dtd_ptr = 1; qh->size_ioc_int_sts = 0; } /* The request hasn't been processed, patch up the TD chain */ } else { struct mv_req *prev_req; prev_req = list_entry(req->queue.prev, struct mv_req, queue); writel(readl(&req->tail->dtd_next), &prev_req->tail->dtd_next); } done(ep, req, -ECONNRESET); /* Enable EP */ out: epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx |= EPCTRL_TX_ENABLE; else epctrlx |= EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); ep->stopped = stopped; spin_unlock_irqrestore(&ep->udc->lock, flags); return ret; } static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (stall) { if (direction == EP_DIR_IN) epctrlx |= EPCTRL_TX_EP_STALL; else epctrlx |= EPCTRL_RX_EP_STALL; } else { if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_EP_STALL; epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; } else { epctrlx &= ~EPCTRL_RX_EP_STALL; epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; } } writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); } static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (direction == EP_DIR_OUT) return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; else return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; } static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) { struct mv_ep *ep; unsigned long flags = 0; int status = 0; struct mv_udc *udc; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !ep->desc) { status = -EINVAL; goto out; } if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { status = -EOPNOTSUPP; goto out; } /* * Attempt to halt IN ep will fail if any transfer requests * are still queue */ if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { status = -EAGAIN; goto out; } spin_lock_irqsave(&ep->udc->lock, flags); ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); if (halt && wedge) ep->wedge = 1; else if (!halt) ep->wedge = 0; spin_unlock_irqrestore(&ep->udc->lock, flags); if (ep->ep_num == 0) { udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } out: return status; } static int mv_ep_set_halt(struct usb_ep *_ep, int halt) { return mv_ep_set_halt_wedge(_ep, halt, 0); } static int mv_ep_set_wedge(struct usb_ep *_ep) { return mv_ep_set_halt_wedge(_ep, 1, 1); } static struct usb_ep_ops mv_ep_ops = { .enable = mv_ep_enable, .disable = mv_ep_disable, .alloc_request = mv_alloc_request, .free_request = mv_free_request, .queue = mv_ep_queue, .dequeue = mv_ep_dequeue, .set_wedge = mv_ep_set_wedge, .set_halt = mv_ep_set_halt, .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ }; static void udc_clock_enable(struct mv_udc *udc) { unsigned int i; for (i = 0; i < udc->clknum; i++) clk_enable(udc->clk[i]); } static void udc_clock_disable(struct mv_udc *udc) { unsigned int i; for (i = 0; i < udc->clknum; i++) clk_disable(udc->clk[i]); } static void udc_stop(struct mv_udc *udc) { u32 tmp; /* Disable interrupts */ tmp = readl(&udc->op_regs->usbintr); tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); writel(tmp, &udc->op_regs->usbintr); /* Reset the Run the bit in the command register to stop VUSB */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); } static void udc_start(struct mv_udc *udc) { u32 usbintr; usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; /* Enable interrupts */ writel(usbintr, &udc->op_regs->usbintr); /* Set the Run bit in the command register */ writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); } static int udc_reset(struct mv_udc *udc) { unsigned int loops; u32 tmp, portsc; /* Stop the controller */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); /* Reset the controller to get default values */ writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); /* wait for reset to complete */ loops = LOOPS(RESET_TIMEOUT); while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { if (loops == 0) { dev_err(&udc->dev->dev, "Wait for RESET completed TIMEOUT\n"); return -ETIMEDOUT; } loops--; udelay(LOOPS_USEC); } /* set controller to device mode */ tmp = readl(&udc->op_regs->usbmode); tmp |= USBMODE_CTRL_MODE_DEVICE; /* turn setup lockout off, require setup tripwire in usbcmd */ tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE; writel(tmp, &udc->op_regs->usbmode); writel(0x0, &udc->op_regs->epsetupstat); /* Configure the Endpoint List Address */ writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, &udc->op_regs->eplistaddr); portsc = readl(&udc->op_regs->portsc[0]); if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); if (udc->force_fs) portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; else portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); writel(portsc, &udc->op_regs->portsc[0]); tmp = readl(&udc->op_regs->epctrlx[0]); tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); writel(tmp, &udc->op_regs->epctrlx[0]); return 0; } static int mv_udc_enable(struct mv_udc *udc) { int retval; if (udc->clock_gating == 0 || udc->active) return 0; dev_dbg(&udc->dev->dev, "enable udc\n"); udc_clock_enable(udc); if (udc->pdata->phy_init) { retval = udc->pdata->phy_init(udc->phy_regs); if (retval) { dev_err(&udc->dev->dev, "init phy error %d\n", retval); udc_clock_disable(udc); return retval; } } udc->active = 1; return 0; } static void mv_udc_disable(struct mv_udc *udc) { if (udc->clock_gating && udc->active) { dev_dbg(&udc->dev->dev, "disable udc\n"); if (udc->pdata->phy_deinit) udc->pdata->phy_deinit(udc->phy_regs); udc_clock_disable(udc); udc->active = 0; } } static int mv_udc_get_frame(struct usb_gadget *gadget) { struct mv_udc *udc; u16 retval; if (!gadget) return -ENODEV; udc = container_of(gadget, struct mv_udc, gadget); retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS; return retval; } /* Tries to wake up the host connected to this gadget */ static int mv_udc_wakeup(struct usb_gadget *gadget) { struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); u32 portsc; /* Remote wakeup feature not enabled by host */ if (!udc->remote_wakeup) return -ENOTSUPP; portsc = readl(&udc->op_regs->portsc); /* not suspended? */ if (!(portsc & PORTSCX_PORT_SUSPEND)) return 0; /* trigger force resume */ portsc |= PORTSCX_PORT_FORCE_RESUME; writel(portsc, &udc->op_regs->portsc[0]); return 0; } static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); udc->vbus_active = (is_active != 0); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->softconnect) { /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); udc->softconnect = (is_on != 0); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->vbus_active) { /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)); static int mv_udc_stop(struct usb_gadget_driver *driver); /* device controller usb_gadget_ops structure */ static const struct usb_gadget_ops mv_ops = { /* returns the current frame number */ .get_frame = mv_udc_get_frame, /* tries to wake up the host connected to this gadget */ .wakeup = mv_udc_wakeup, /* notify controller that VBUS is powered or not */ .vbus_session = mv_udc_vbus_session, /* D+ pullup, software-controlled connect/disconnect to USB host */ .pullup = mv_udc_pullup, .start = mv_udc_start, .stop = mv_udc_stop, }; static int eps_init(struct mv_udc *udc) { struct mv_ep *ep; char name[14]; int i; /* initialize ep0 */ ep = &udc->eps[0]; ep->udc = udc; strncpy(ep->name, "ep0", sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->wedge = 0; ep->stopped = 0; ep->ep.maxpacket = EP0_MAX_PKT_SIZE; ep->ep_num = 0; ep->desc = &mv_ep0_desc; INIT_LIST_HEAD(&ep->queue); ep->ep_type = USB_ENDPOINT_XFER_CONTROL; /* initialize other endpoints */ for (i = 2; i < udc->max_eps * 2; i++) { ep = &udc->eps[i]; if (i % 2) { snprintf(name, sizeof(name), "ep%din", i / 2); ep->direction = EP_DIR_IN; } else { snprintf(name, sizeof(name), "ep%dout", i / 2); ep->direction = EP_DIR_OUT; } ep->udc = udc; strncpy(ep->name, name, sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->stopped = 0; ep->ep.maxpacket = (unsigned short) ~0; ep->ep_num = i / 2; INIT_LIST_HEAD(&ep->queue); list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->dqh = &udc->ep_dqh[i]; } return 0; } /* delete all endpoint requests, called with spinlock held */ static void nuke(struct mv_ep *ep, int status) { /* called with spinlock held */ ep->stopped = 1; /* endpoint fifo flush */ mv_ep_fifo_flush(&ep->ep); while (!list_empty(&ep->queue)) { struct mv_req *req = NULL; req = list_entry(ep->queue.next, struct mv_req, queue); done(ep, req, status); } } /* stop all USB activities */ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) { struct mv_ep *ep; nuke(&udc->eps[0], -ESHUTDOWN); list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { nuke(ep, -ESHUTDOWN); } /* report disconnect; the driver is already quiesced */ if (driver) { spin_unlock(&udc->lock); driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } } static int mv_udc_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct mv_udc *udc = the_controller; int retval = 0; unsigned long flags; if (!udc) return -ENODEV; if (udc->driver) return -EBUSY; spin_lock_irqsave(&udc->lock, flags); /* hook up the driver ... */ driver->driver.bus = NULL; udc->driver = driver; udc->gadget.dev.driver = &driver->driver; udc->usb_state = USB_STATE_ATTACHED; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; spin_unlock_irqrestore(&udc->lock, flags); retval = bind(&udc->gadget); if (retval) { dev_err(&udc->dev->dev, "bind to driver %s --> %d\n", driver->driver.name, retval); udc->driver = NULL; udc->gadget.dev.driver = NULL; return retval; } /* pullup is always on */ mv_udc_pullup(&udc->gadget, 1); /* When boot with cable attached, there will be no vbus irq occurred */ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return 0; } static int mv_udc_stop(struct usb_gadget_driver *driver) { struct mv_udc *udc = the_controller; unsigned long flags; if (!udc) return -ENODEV; spin_lock_irqsave(&udc->lock, flags); mv_udc_enable(udc); udc_stop(udc); /* stop all usb activities */ udc->gadget.speed = USB_SPEED_UNKNOWN; stop_activity(udc, driver); mv_udc_disable(udc); spin_unlock_irqrestore(&udc->lock, flags); /* unbind gadget driver */ driver->unbind(&udc->gadget); udc->gadget.dev.driver = NULL; udc->driver = NULL; return 0; } static void mv_set_ptc(struct mv_udc *udc, u32 mode) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); portsc |= mode << 16; writel(portsc, &udc->op_regs->portsc[0]); } static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) { struct mv_udc *udc = the_controller; struct mv_req *req = container_of(_req, struct mv_req, req); unsigned long flags; dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); spin_lock_irqsave(&udc->lock, flags); if (req->test_mode) { mv_set_ptc(udc, req->test_mode); req->test_mode = 0; } spin_unlock_irqrestore(&udc->lock, flags); } static int udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) { int retval = 0; struct mv_req *req; struct mv_ep *ep; ep = &udc->eps[0]; udc->ep0_dir = direction; udc->ep0_state = WAIT_FOR_OUT_STATUS; req = udc->status_req; /* fill in the reqest structure */ if (empty == false) { *((u16 *) req->req.buf) = cpu_to_le16(status); req->req.length = 2; } else req->req.length = 0; req->ep = ep; req->req.status = -EINPROGRESS; req->req.actual = 0; if (udc->test_mode) { req->req.complete = prime_status_complete; req->test_mode = udc->test_mode; udc->test_mode = 0; } else req->req.complete = NULL; req->dtd_count = 0; if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } /* prime the data phase */ if (!req_to_dtd(req)) retval = queue_dtd(ep, req); else{ /* no mem */ retval = -ENOMEM; goto out; } if (retval) { dev_err(&udc->dev->dev, "response error on GET_STATUS request\n"); goto out; } list_add_tail(&req->queue, &ep->queue); return 0; out: return retval; } static void mv_udc_testmode(struct mv_udc *udc, u16 index) { if (index <= TEST_FORCE_EN) { udc->test_mode = index; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } else dev_err(&udc->dev->dev, "This test mode(%d) is not supported\n", index); } static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) { udc->dev_addr = (u8)setup->wValue; /* update usb state */ udc->usb_state = USB_STATE_ADDRESS; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } static void ch9getstatus(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) { u16 status = 0; int retval; if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) != (USB_DIR_IN | USB_TYPE_STANDARD)) return; if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { status = 1 << USB_DEVICE_SELF_POWERED; status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE) { /* get interface status */ status = 0; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { u8 ep_num, direction; ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; status = ep_is_stall(udc, ep_num, direction) << USB_ENDPOINT_HALT; } retval = udc_prime_status(udc, EP_DIR_IN, status, false); if (retval) ep0_stall(udc); else udc->ep0_state = DATA_STATE_XMIT; } static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; struct mv_ep *ep; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 0; break; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; ep = &udc->eps[ep_num * 2 + direction]; if (ep->wedge == 1) break; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 0); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (setup->wIndex & 0xFF || udc->gadget.speed != USB_SPEED_HIGH) ep0_stall(udc); if (udc->usb_state != USB_STATE_CONFIGURED && udc->usb_state != USB_STATE_ADDRESS && udc->usb_state != USB_STATE_DEFAULT) ep0_stall(udc); mv_udc_testmode(udc, (setup->wIndex >> 8)); goto out; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 1); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) { bool delegate = false; nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", setup->bRequestType, setup->bRequest, setup->wValue, setup->wIndex, setup->wLength); /* We process some stardard setup requests here */ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (setup->bRequest) { case USB_REQ_GET_STATUS: ch9getstatus(udc, ep_num, setup); break; case USB_REQ_SET_ADDRESS: ch9setaddress(udc, setup); break; case USB_REQ_CLEAR_FEATURE: ch9clearfeature(udc, setup); break; case USB_REQ_SET_FEATURE: ch9setfeature(udc, setup); break; default: delegate = true; } } else delegate = true; /* delegate USB standard requests to the gadget driver */ if (delegate == true) { /* USB requests handled by gadget */ if (setup->wLength) { /* DATA phase from gadget, STATUS phase from udc */ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) ? EP_DIR_IN : EP_DIR_OUT; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = (setup->bRequestType & USB_DIR_IN) ? DATA_STATE_XMIT : DATA_STATE_RECV; } else { /* no DATA phase, IN STATUS phase from gadget */ udc->ep0_dir = EP_DIR_IN; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = WAIT_FOR_OUT_STATUS; } } } /* complete DATA or STATUS phase of ep0 prime status phase if needed */ static void ep0_req_complete(struct mv_udc *udc, struct mv_ep *ep0, struct mv_req *req) { u32 new_addr; if (udc->usb_state == USB_STATE_ADDRESS) { /* set the new address */ new_addr = (u32)udc->dev_addr; writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, &udc->op_regs->deviceaddr); } done(ep0, req, 0); switch (udc->ep0_state) { case DATA_STATE_XMIT: /* receive status phase */ if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) ep0_stall(udc); break; case DATA_STATE_RECV: /* send status phase */ if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) ep0_stall(udc); break; case WAIT_FOR_OUT_STATUS: udc->ep0_state = WAIT_FOR_SETUP; break; case WAIT_FOR_SETUP: dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); break; default: ep0_stall(udc); break; } } static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) { u32 temp; struct mv_dqh *dqh; dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; /* Clear bit in ENDPTSETUPSTAT */ writel((1 << ep_num), &udc->op_regs->epsetupstat); /* while a hazard exists when setup package arrives */ do { /* Set Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); /* Copy the setup packet to local buffer */ memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); /* Clear Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); } static void irq_process_tr_complete(struct mv_udc *udc) { u32 tmp, bit_pos; int i, ep_num = 0, direction = 0; struct mv_ep *curr_ep; struct mv_req *curr_req, *temp_req; int status; /* * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE * because the setup packets are to be read ASAP */ /* Process all Setup packet received interrupts */ tmp = readl(&udc->op_regs->epsetupstat); if (tmp) { for (i = 0; i < udc->max_eps; i++) { if (tmp & (1 << i)) { get_setup_data(udc, i, (u8 *)(&udc->local_setup_buff)); handle_setup_packet(udc, i, &udc->local_setup_buff); } } } /* Don't clear the endpoint setup status register here. * It is cleared as a setup packet is read out of the buffer */ /* Process non-setup transaction complete interrupts */ tmp = readl(&udc->op_regs->epcomplete); if (!tmp) return; writel(tmp, &udc->op_regs->epcomplete); for (i = 0; i < udc->max_eps * 2; i++) { ep_num = i >> 1; direction = i % 2; bit_pos = 1 << (ep_num + 16 * direction); if (!(bit_pos & tmp)) continue; if (i == 1) curr_ep = &udc->eps[0]; else curr_ep = &udc->eps[i]; /* process the req queue until an uncomplete request */ list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue, queue) { status = process_ep_req(udc, i, curr_req); if (status) break; /* write back status to req */ curr_req->req.status = status; /* ep0 request completion */ if (ep_num == 0) { ep0_req_complete(udc, curr_ep, curr_req); break; } else { done(curr_ep, curr_req, status); } } } } void irq_process_reset(struct mv_udc *udc) { u32 tmp; unsigned int loops; udc->ep0_dir = EP_DIR_OUT; udc->ep0_state = WAIT_FOR_SETUP; udc->remote_wakeup = 0; /* default to 0 on reset */ /* The address bits are past bit 25-31. Set the address */ tmp = readl(&udc->op_regs->deviceaddr); tmp &= ~(USB_DEVICE_ADDRESS_MASK); writel(tmp, &udc->op_regs->deviceaddr); /* Clear all the setup token semaphores */ tmp = readl(&udc->op_regs->epsetupstat); writel(tmp, &udc->op_regs->epsetupstat); /* Clear all the endpoint complete status bits */ tmp = readl(&udc->op_regs->epcomplete); writel(tmp, &udc->op_regs->epcomplete); /* wait until all endptprime bits cleared */ loops = LOOPS(PRIME_TIMEOUT); while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { if (loops == 0) { dev_err(&udc->dev->dev, "Timeout for ENDPTPRIME = 0x%x\n", readl(&udc->op_regs->epprime)); break; } loops--; udelay(LOOPS_USEC); } /* Write 1s to the Flush register */ writel((u32)~0, &udc->op_regs->epflush); if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { dev_info(&udc->dev->dev, "usb bus reset\n"); udc->usb_state = USB_STATE_DEFAULT; /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); } else { dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", readl(&udc->op_regs->portsc)); /* * re-initialize * controller reset */ udc_reset(udc); /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); /* reset ep0 dQH and endptctrl */ ep0_reset(udc); /* enable interrupt and set controller to run state */ udc_start(udc); udc->usb_state = USB_STATE_ATTACHED; } } static void handle_bus_resume(struct mv_udc *udc) { udc->usb_state = udc->resume_state; udc->resume_state = 0; /* report resume to the driver */ if (udc->driver) { if (udc->driver->resume) { spin_unlock(&udc->lock); udc->driver->resume(&udc->gadget); spin_lock(&udc->lock); } } } static void irq_process_suspend(struct mv_udc *udc) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } static void irq_process_port_change(struct mv_udc *udc) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); if (!(portsc & PORTSCX_PORT_RESET)) { /* Get the speed */ u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; switch (speed) { case PORTSCX_PORT_SPEED_HIGH: udc->gadget.speed = USB_SPEED_HIGH; break; case PORTSCX_PORT_SPEED_FULL: udc->gadget.speed = USB_SPEED_FULL; break; case PORTSCX_PORT_SPEED_LOW: udc->gadget.speed = USB_SPEED_LOW; break; default: udc->gadget.speed = USB_SPEED_UNKNOWN; break; } } if (portsc & PORTSCX_PORT_SUSPEND) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } if (!(portsc & PORTSCX_PORT_SUSPEND) && udc->usb_state == USB_STATE_SUSPENDED) { handle_bus_resume(udc); } if (!udc->resume_state) udc->usb_state = USB_STATE_DEFAULT; } static void irq_process_error(struct mv_udc *udc) { /* Increment the error count */ udc->errors++; } static irqreturn_t mv_udc_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; u32 status, intr; spin_lock(&udc->lock); status = readl(&udc->op_regs->usbsts); intr = readl(&udc->op_regs->usbintr); status &= intr; if (status == 0) { spin_unlock(&udc->lock); return IRQ_NONE; } /* Clear all the interrupts occurred */ writel(status, &udc->op_regs->usbsts); if (status & USBSTS_ERR) irq_process_error(udc); if (status & USBSTS_RESET) irq_process_reset(udc); if (status & USBSTS_PORT_CHANGE) irq_process_port_change(udc); if (status & USBSTS_INT) irq_process_tr_complete(udc); if (status & USBSTS_SUSPEND) irq_process_suspend(udc); spin_unlock(&udc->lock); return IRQ_HANDLED; } static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; /* polling VBUS and init phy may cause too much time*/ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return IRQ_HANDLED; } static void mv_udc_vbus_work(struct work_struct *work) { struct mv_udc *udc; unsigned int vbus; udc = container_of(work, struct mv_udc, vbus_work); if (!udc->pdata->vbus) return; vbus = udc->pdata->vbus->poll(); dev_info(&udc->dev->dev, "vbus is %d\n", vbus); if (vbus == VBUS_HIGH) mv_udc_vbus_session(&udc->gadget, 1); else if (vbus == VBUS_LOW) mv_udc_vbus_session(&udc->gadget, 0); } /* release device structure */ static void gadget_release(struct device *_dev) { struct mv_udc *udc = the_controller; complete(udc->done); } static int __devexit mv_udc_remove(struct platform_device *dev) { struct mv_udc *udc = the_controller; int clk_i; usb_del_gadget_udc(&udc->gadget); if (udc->qwork) { flush_workqueue(udc->qwork); destroy_workqueue(udc->qwork); } if (udc->pdata && udc->pdata->vbus && udc->clock_gating) free_irq(udc->pdata->vbus->irq, &dev->dev); /* free memory allocated in probe */ if (udc->dtd_pool) dma_pool_destroy(udc->dtd_pool); if (udc->ep_dqh) dma_free_coherent(&dev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); kfree(udc->eps); if (udc->irq) free_irq(udc->irq, &dev->dev); mv_udc_disable(udc); if (udc->cap_regs) iounmap(udc->cap_regs); udc->cap_regs = NULL; if (udc->phy_regs) iounmap((void *)udc->phy_regs); udc->phy_regs = 0; if (udc->status_req) { kfree(udc->status_req->req.buf); kfree(udc->status_req); } for (clk_i = 0; clk_i <= udc->clknum; clk_i++) clk_put(udc->clk[clk_i]); device_unregister(&udc->gadget.dev); /* free dev, wait for the release() finished */ wait_for_completion(udc->done); kfree(udc); the_controller = NULL; return 0; } static int __devinit mv_udc_probe(struct platform_device *dev) { struct mv_usb_platform_data *pdata = dev->dev.platform_data; struct mv_udc *udc; int retval = 0; int clk_i = 0; struct resource *r; size_t size; if (pdata == NULL) { dev_err(&dev->dev, "missing platform_data\n"); return -ENODEV; } size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum; udc = kzalloc(size, GFP_KERNEL); if (udc == NULL) { dev_err(&dev->dev, "failed to allocate memory for udc\n"); return -ENOMEM; } the_controller = udc; udc->done = &release_done; udc->pdata = dev->dev.platform_data; spin_lock_init(&udc->lock); udc->dev = dev; udc->clknum = pdata->clknum; for (clk_i = 0; clk_i < udc->clknum; clk_i++) { udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]); if (IS_ERR(udc->clk[clk_i])) { retval = PTR_ERR(udc->clk[clk_i]); goto err_put_clk; } } r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); if (r == NULL) { dev_err(&dev->dev, "no I/O memory resource defined\n"); retval = -ENODEV; goto err_put_clk; } udc->cap_regs = (struct mv_cap_regs __iomem *) ioremap(r->start, resource_size(r)); if (udc->cap_regs == NULL) { dev_err(&dev->dev, "failed to map I/O memory\n"); retval = -EBUSY; goto err_put_clk; } r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); if (r == NULL) { dev_err(&dev->dev, "no phy I/O memory resource defined\n"); retval = -ENODEV; goto err_iounmap_capreg; } udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r)); if (udc->phy_regs == 0) { dev_err(&dev->dev, "failed to map phy I/O memory\n"); retval = -EBUSY; goto err_iounmap_capreg; } /* we will acces controller register, so enable the clk */ udc_clock_enable(udc); if (pdata->phy_init) { retval = pdata->phy_init(udc->phy_regs); if (retval) { dev_err(&dev->dev, "phy init error %d\n", retval); goto err_iounmap_phyreg; } } udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs + (readl(&udc->cap_regs->caplength_hciversion) & CAPLENGTH_MASK)); udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; /* * some platform will use usb to download image, it may not disconnect * usb gadget before loading kernel. So first stop udc here. */ udc_stop(udc); writel(0xFFFFFFFF, &udc->op_regs->usbsts); size = udc->max_eps * sizeof(struct mv_dqh) *2; size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); udc->ep_dqh = dma_alloc_coherent(&dev->dev, size, &udc->ep_dqh_dma, GFP_KERNEL); if (udc->ep_dqh == NULL) { dev_err(&dev->dev, "allocate dQH memory failed\n"); retval = -ENOMEM; goto err_disable_clock; } udc->ep_dqh_size = size; /* create dTD dma_pool resource */ udc->dtd_pool = dma_pool_create("mv_dtd", &dev->dev, sizeof(struct mv_dtd), DTD_ALIGNMENT, DMA_BOUNDARY); if (!udc->dtd_pool) { retval = -ENOMEM; goto err_free_dma; } size = udc->max_eps * sizeof(struct mv_ep) *2; udc->eps = kzalloc(size, GFP_KERNEL); if (udc->eps == NULL) { dev_err(&dev->dev, "allocate ep memory failed\n"); retval = -ENOMEM; goto err_destroy_dma; } /* initialize ep0 status request structure */ udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL); if (!udc->status_req) { dev_err(&dev->dev, "allocate status_req memory failed\n"); retval = -ENOMEM; goto err_free_eps; } INIT_LIST_HEAD(&udc->status_req->queue); /* allocate a small amount of memory to get valid address */ udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); udc->status_req->req.dma = DMA_ADDR_INVALID; udc->resume_state = USB_STATE_NOTATTACHED; udc->usb_state = USB_STATE_POWERED; udc->ep0_dir = EP_DIR_OUT; udc->remote_wakeup = 0; r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); if (r == NULL) { dev_err(&dev->dev, "no IRQ resource defined\n"); retval = -ENODEV; goto err_free_status_req; } udc->irq = r->start; if (request_irq(udc->irq, mv_udc_irq, IRQF_SHARED, driver_name, udc)) { dev_err(&dev->dev, "Request irq %d for UDC failed\n", udc->irq); retval = -ENODEV; goto err_free_status_req; } /* initialize gadget structure */ udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ udc->gadget.is_dualspeed = 1; /* support dual speed */ /* the "gadget" abstracts/virtualizes the controller */ dev_set_name(&udc->gadget.dev, "gadget"); udc->gadget.dev.parent = &dev->dev; udc->gadget.dev.dma_mask = dev->dev.dma_mask; udc->gadget.dev.release = gadget_release; udc->gadget.name = driver_name; /* gadget name */ retval = device_register(&udc->gadget.dev); if (retval) goto err_free_irq; eps_init(udc); /* VBUS detect: we can disable/enable clock on demand.*/ if (pdata->vbus) { udc->clock_gating = 1; retval = request_threaded_irq(pdata->vbus->irq, NULL, mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); if (retval) { dev_info(&dev->dev, "Can not request irq for VBUS, " "disable clock gating\n"); udc->clock_gating = 0; } udc->qwork = create_singlethread_workqueue("mv_udc_queue"); if (!udc->qwork) { dev_err(&dev->dev, "cannot create workqueue\n"); retval = -ENOMEM; goto err_unregister; } INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); } /* * When clock gating is supported, we can disable clk and phy. * If not, it means that VBUS detection is not supported, we * have to enable vbus active all the time to let controller work. */ if (udc->clock_gating) { if (udc->pdata->phy_deinit) udc->pdata->phy_deinit(udc->phy_regs); udc_clock_disable(udc); } else udc->vbus_active = 1; retval = usb_add_gadget_udc(&dev->dev, &udc->gadget); if (retval) goto err_unregister; dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n", udc->clock_gating ? "with" : "without"); return 0; err_unregister: if (udc->pdata && udc->pdata->vbus && udc->clock_gating) free_irq(pdata->vbus->irq, &dev->dev); device_unregister(&udc->gadget.dev); err_free_irq: free_irq(udc->irq, &dev->dev); err_free_status_req: kfree(udc->status_req->req.buf); kfree(udc->status_req); err_free_eps: kfree(udc->eps); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: dma_free_coherent(&dev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); err_disable_clock: if (udc->pdata->phy_deinit) udc->pdata->phy_deinit(udc->phy_regs); udc_clock_disable(udc); err_iounmap_phyreg: iounmap((void *)udc->phy_regs); err_iounmap_capreg: iounmap(udc->cap_regs); err_put_clk: for (clk_i--; clk_i >= 0; clk_i--) clk_put(udc->clk[clk_i]); the_controller = NULL; kfree(udc); return retval; } #ifdef CONFIG_PM static int mv_udc_suspend(struct device *_dev) { struct mv_udc *udc = the_controller; udc_stop(udc); return 0; } static int mv_udc_resume(struct device *_dev) { struct mv_udc *udc = the_controller; int retval; if (udc->pdata->phy_init) { retval = udc->pdata->phy_init(udc->phy_regs); if (retval) { dev_err(&udc->dev->dev, "init phy error %d when resume back\n", retval); return retval; } } udc_reset(udc); ep0_reset(udc); udc_start(udc); return 0; } static const struct dev_pm_ops mv_udc_pm_ops = { .suspend = mv_udc_suspend, .resume = mv_udc_resume, }; #endif static void mv_udc_shutdown(struct platform_device *dev) { struct mv_udc *udc = the_controller; u32 mode; /* reset controller mode to IDLE */ mode = readl(&udc->op_regs->usbmode); mode &= ~3; writel(mode, &udc->op_regs->usbmode); } static struct platform_driver udc_driver = { .probe = mv_udc_probe, .remove = __exit_p(mv_udc_remove), .shutdown = mv_udc_shutdown, .driver = { .owner = THIS_MODULE, .name = "pxa-u2o", #ifdef CONFIG_PM .pm = &mv_udc_pm_ops, #endif }, }; MODULE_ALIAS("platform:pxa-u2o"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); static int __init init(void) { return platform_driver_register(&udc_driver); } module_init(init); static void __exit cleanup(void) { platform_driver_unregister(&udc_driver); } module_exit(cleanup);
gpl-2.0
nycbjr/android_kernel_asus_tf700_3_1
arch/powerpc/sysdev/fsl_rio.c
396
47036
/* * Freescale MPC85xx/MPC86xx RapidIO support * * Copyright 2009 Sysgo AG * Thomas Moll <thomas.moll@sysgo.com> * - fixed maintenance access routines, check for aligned access * * Copyright 2009 Integrated Device Technology, Inc. * Alex Bounine <alexandre.bounine@idt.com> * - Added Port-Write message handling * - Added Machine Check exception handling * * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. * Zhang Wei <wei.zhang@freescale.com> * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/uaccess.h> #undef DEBUG_PW /* Port-Write debugging */ /* RapidIO definition irq, which read from OF-tree */ #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) #define IPWSR_CLEAR 0x98 #define OMSR_CLEAR 0x1cb3 #define IMSR_CLEAR 0x491 #define IDSR_CLEAR 0x91 #define ODSR_CLEAR 0x1c00 #define LTLEECSR_ENABLE_ALL 0xFFC000FC #define ESCSR_CLEAR 0x07120204 #define IECSR_CLEAR 0x80000000 #define RIO_PORT1_EDCSR 0x0640 #define RIO_PORT2_EDCSR 0x0680 #define RIO_PORT1_IECSR 0x10130 #define RIO_PORT2_IECSR 0x101B0 #define RIO_IM0SR 0x13064 #define RIO_IM1SR 0x13164 #define RIO_OM0SR 0x13004 #define RIO_OM1SR 0x13104 #define RIO_ATMU_REGS_OFFSET 0x10c00 #define RIO_P_MSG_REGS_OFFSET 0x11000 #define RIO_S_MSG_REGS_OFFSET 0x13000 #define RIO_GCCSR 0x13c #define RIO_ESCSR 0x158 #define RIO_PORT2_ESCSR 0x178 #define RIO_CCSR 0x15c #define RIO_LTLEDCSR 0x0608 #define RIO_LTLEDCSR_IER 0x80000000 #define RIO_LTLEDCSR_PRT 0x01000000 #define RIO_LTLEECSR 0x060c #define RIO_EPWISR 0x10010 #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ #define RIO_MAINT_WIN_SIZE 0x400000 #define RIO_DBELL_WIN_SIZE 0x1000 #define RIO_MSG_OMR_MUI 0x00000002 #define RIO_MSG_OSR_TE 0x00000080 #define RIO_MSG_OSR_QOI 0x00000020 #define RIO_MSG_OSR_QFI 0x00000010 #define RIO_MSG_OSR_MUB 0x00000004 #define RIO_MSG_OSR_EOMI 0x00000002 #define RIO_MSG_OSR_QEI 0x00000001 #define RIO_MSG_IMR_MI 0x00000002 #define RIO_MSG_ISR_TE 0x00000080 #define RIO_MSG_ISR_QFI 0x00000010 #define RIO_MSG_ISR_DIQI 0x00000001 #define RIO_IPWMR_SEN 0x00100000 #define RIO_IPWMR_QFIE 0x00000100 #define RIO_IPWMR_EIE 0x00000020 #define RIO_IPWMR_CQ 0x00000002 #define RIO_IPWMR_PWE 0x00000001 #define RIO_IPWSR_QF 0x00100000 #define RIO_IPWSR_TE 0x00000080 #define RIO_IPWSR_QFI 0x00000010 #define RIO_IPWSR_PWD 0x00000008 #define RIO_IPWSR_PWB 0x00000004 /* EPWISR Error match value */ #define RIO_EPWISR_PINT1 0x80000000 #define RIO_EPWISR_PINT2 0x40000000 #define RIO_EPWISR_MU 0x00000002 #define RIO_EPWISR_PW 0x00000001 #define RIO_MSG_DESC_SIZE 32 #define RIO_MSG_BUFFER_SIZE 4096 #define RIO_MIN_TX_RING_SIZE 2 #define RIO_MAX_TX_RING_SIZE 2048 #define RIO_MIN_RX_RING_SIZE 2 #define RIO_MAX_RX_RING_SIZE 2048 #define DOORBELL_DMR_DI 0x00000002 #define DOORBELL_DSR_TE 0x00000080 #define DOORBELL_DSR_QFI 0x00000010 #define DOORBELL_DSR_DIQI 0x00000001 #define DOORBELL_TID_OFFSET 0x02 #define DOORBELL_SID_OFFSET 0x04 #define DOORBELL_INFO_OFFSET 0x06 #define DOORBELL_MESSAGE_SIZE 0x08 #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) struct rio_atmu_regs { u32 rowtar; u32 rowtear; u32 rowbar; u32 pad2; u32 rowar; u32 pad3[3]; }; struct rio_msg_regs { u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ u32 osr; /* 0xD_3004 - Outbound message 0 status register */ u32 pad1; u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue dequeue pointer address register */ u32 pad2; u32 osar; /* 0xD_3014 - Outbound message 0 source address register */ u32 odpr; /* 0xD_3018 - Outbound message 0 destination port register */ u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes Register*/ u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count register */ u32 pad3; u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue enqueue pointer address register */ u32 pad4[13]; u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ u32 isr; /* 0xD_3064 - Inbound message 0 status register */ u32 pad5; u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue pointer address register*/ u32 pad6; u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue pointer address register */ u32 pad7[226]; u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ u32 res0[4]; u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port register */ u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes register */ u32 res1[3]; u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold configuration register */ u32 res2[12]; u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ u32 pad8; u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer address register */ u32 pad9; u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer address register */ u32 pad10[26]; u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address register */ u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address register */ }; struct rio_tx_desc { u32 res1; u32 saddr; u32 dport; u32 dattr; u32 res2; u32 res3; u32 dwcnt; u32 res4; }; struct rio_dbell_ring { void *virt; dma_addr_t phys; }; struct rio_msg_tx_ring { void *virt; dma_addr_t phys; void *virt_buffer[RIO_MAX_TX_RING_SIZE]; dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; int tx_slot; int size; void *dev_id; }; struct rio_msg_rx_ring { void *virt; dma_addr_t phys; void *virt_buffer[RIO_MAX_RX_RING_SIZE]; int rx_slot; int size; void *dev_id; }; struct rio_port_write_msg { void *virt; dma_addr_t phys; u32 msg_count; u32 err_count; u32 discard_count; }; struct rio_priv { struct device *dev; void __iomem *regs_win; struct rio_atmu_regs __iomem *atmu_regs; struct rio_atmu_regs __iomem *maint_atmu_regs; struct rio_atmu_regs __iomem *dbell_atmu_regs; void __iomem *dbell_win; void __iomem *maint_win; struct rio_msg_regs __iomem *msg_regs; struct rio_dbell_ring dbell_ring; struct rio_msg_tx_ring msg_tx_ring; struct rio_msg_rx_ring msg_rx_ring; struct rio_port_write_msg port_write_msg; int bellirq; int txirq; int rxirq; int pwirq; struct work_struct pw_work; struct kfifo pw_fifo; spinlock_t pw_fifo_lock; }; #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ " eieio\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %1,-1\n" \ " li %0,%3\n" \ " b 2b\n" \ ".section __ex_table,\"a\"\n" \ " .align 2\n" \ " .long 1b,3b\n" \ ".text" \ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) static void __iomem *rio_regs_win; #ifdef CONFIG_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; unsigned long reason; if (!rio_regs_win) return 0; reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { /* Check if we are prepared to handle this fault */ entry = search_exception_tables(regs->nip); if (entry) { pr_debug("RIO: %s - MC Exception handled\n", __func__); out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } } return 0; } EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); #endif /** * fsl_rio_doorbell_send - Send a MPC85xx doorbell message * @mport: RapidIO master port info * @index: ID of RapidIO interface * @destid: Destination ID of target device * @data: 16-bit info field of RapidIO doorbell message * * Sends a MPC85xx doorbell message. Returns %0 on success or * %-EINVAL on failure. */ static int fsl_rio_doorbell_send(struct rio_mport *mport, int index, u16 destid, u16 data) { struct rio_priv *priv = mport->priv; pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", index, destid, data); switch (mport->phy_type) { case RIO_PHY_PARALLEL: out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); out_be16(priv->dbell_win, data); break; case RIO_PHY_SERIAL: /* In the serial version silicons, such as MPC8548, MPC8641, * below operations is must be. */ out_be32(&priv->msg_regs->odmr, 0x00000000); out_be32(&priv->msg_regs->odretcr, 0x00000004); out_be32(&priv->msg_regs->oddpr, destid << 16); out_be32(&priv->msg_regs->oddatr, data); out_be32(&priv->msg_regs->odmr, 0x00000001); break; } return 0; } /** * fsl_local_config_read - Generate a MPC85xx local config space read * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be read into * * Generates a MPC85xx local configuration space read. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_read(struct rio_mport *mport, int index, u32 offset, int len, u32 *data) { struct rio_priv *priv = mport->priv; pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, offset); *data = in_be32(priv->regs_win + offset); return 0; } /** * fsl_local_config_write - Generate a MPC85xx local config space write * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be written * * Generates a MPC85xx local configuration space write. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_write(struct rio_mport *mport, int index, u32 offset, int len, u32 data) { struct rio_priv *priv = mport->priv; pr_debug ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", index, offset, data); out_be32(priv->regs_win + offset, data); return 0; } /** * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Location to be read into * * Generates a MPC85xx read maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *val) { struct rio_priv *priv = mport->priv; u8 *data; u32 rval, err = 0; pr_debug ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", index, destid, hopcount, offset, len); /* 16MB maintenance window possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: __fsl_read_rio_config(rval, data, err, "lbz"); break; case 2: __fsl_read_rio_config(rval, data, err, "lhz"); break; case 4: __fsl_read_rio_config(rval, data, err, "lwz"); break; default: return -EINVAL; } if (err) { pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", err, destid, hopcount, offset); } *val = rval; return err; } /** * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Value to be written * * Generates an MPC85xx write maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 val) { struct rio_priv *priv = mport->priv; u8 *data; pr_debug ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", index, destid, hopcount, offset, len, val); /* 16MB maintenance windows possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: out_8((u8 *) data, val); break; case 2: out_be16((u16 *) data, val); break; case 4: out_be32((u32 *) data, val); break; default: return -EINVAL; } return 0; } /** * fsl_add_outb_message - Add message to the MPC85xx outbound message queue * @mport: Master port with outbound message queue * @rdev: Target of outbound message * @mbox: Outbound mailbox * @buffer: Message to add to outbound queue * @len: Length of message * * Adds the @buffer message to the MPC85xx outbound message queue. Returns * %0 on success or %-EINVAL on failure. */ static int fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len) { struct rio_priv *priv = mport->priv; u32 omr; struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt + priv->msg_tx_ring.tx_slot; int ret = 0; pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { ret = -EINVAL; goto out; } /* Copy and clear rest of buffer */ memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, len); if (len < (RIO_MAX_MSG_SIZE - 4)) memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] + len, 0, RIO_MAX_MSG_SIZE - len); switch (mport->phy_type) { case RIO_PHY_PARALLEL: /* Set mbox field for message */ desc->dport = mbox & 0x3; /* Enable EOMI interrupt, set priority, and set destid */ desc->dattr = 0x28000000 | (rdev->destid << 2); break; case RIO_PHY_SERIAL: /* Set mbox field for message, and set destid */ desc->dport = (rdev->destid << 16) | (mbox & 0x3); /* Enable EOMI interrupt and priority */ desc->dattr = 0x28000000; break; } /* Set transfer size aligned to next power of 2 (in double words) */ desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); /* Set snooping and source buffer address */ desc->saddr = 0x00000004 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; /* Increment enqueue pointer */ omr = in_be32(&priv->msg_regs->omr); out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); /* Go to next descriptor */ if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) priv->msg_tx_ring.tx_slot = 0; out: return ret; } /** * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles outbound message interrupts. Executes a register outbound * mailbox event handler and acks the interrupt occurrence. */ static irqreturn_t fsl_rio_tx_handler(int irq, void *dev_instance) { int osr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; osr = in_be32(&priv->msg_regs->osr); if (osr & RIO_MSG_OSR_TE) { pr_info("RIO: outbound message transmission error\n"); out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); goto out; } if (osr & RIO_MSG_OSR_QOI) { pr_info("RIO: outbound message queue overflow\n"); out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); goto out; } if (osr & RIO_MSG_OSR_EOMI) { u32 dqp = in_be32(&priv->msg_regs->odqdpar); int slot = (dqp - priv->msg_tx_ring.phys) >> 5; port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, slot); /* Ack the end-of-message interrupt */ out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); } out: return IRQ_HANDLED; } /** * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox * @mport: Master port implementing the outbound message unit * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the outbound mailbox ring * * Initializes buffer ring, request the outbound message interrupt, * and enables the outbound message unit. Returns %0 on success and * %-EINVAL or %-ENOMEM on failure. */ static int fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { int i, j, rc = 0; struct rio_priv *priv = mport->priv; if ((entries < RIO_MIN_TX_RING_SIZE) || (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { rc = -EINVAL; goto out; } /* Initialize shadow copy ring */ priv->msg_tx_ring.dev_id = dev_id; priv->msg_tx_ring.size = entries; for (i = 0; i < priv->msg_tx_ring.size; i++) { priv->msg_tx_ring.virt_buffer[i] = dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); if (!priv->msg_tx_ring.virt_buffer[i]) { rc = -ENOMEM; for (j = 0; j < priv->msg_tx_ring.size; j++) if (priv->msg_tx_ring.virt_buffer[j]) dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring. virt_buffer[j], priv->msg_tx_ring. phys_buffer[j]); goto out; } } /* Initialize outbound message descriptor ring */ priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, &priv->msg_tx_ring.phys, GFP_KERNEL); if (!priv->msg_tx_ring.virt) { rc = -ENOMEM; goto out_dma; } memset(priv->msg_tx_ring.virt, 0, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); priv->msg_tx_ring.tx_slot = 0; /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); /* Configure for snooping */ out_be32(&priv->msg_regs->osar, 0x00000004); /* Clear interrupt status */ out_be32(&priv->msg_regs->osr, 0x000000b3); /* Hook up outbound message handler */ rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, "msg_tx", (void *)mport); if (rc < 0) goto out_irq; /* * Configure outbound message unit * Snooping * Interrupts (all enabled, except QEIE) * Chaining mode * Disable */ out_be32(&priv->msg_regs->omr, 0x00100220); /* Set number of entries */ out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | ((get_bitmask_order(entries) - 2) << 12)); /* Now enable the unit */ out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); out: return rc; out_irq: dma_free_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); out_dma: for (i = 0; i < priv->msg_tx_ring.size; i++) dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring.virt_buffer[i], priv->msg_tx_ring.phys_buffer[i]); return rc; } /** * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox * @mport: Master port implementing the outbound message unit * @mbox: Mailbox to close * * Disables the outbound message unit, free all buffers, and * frees the outbound message interrupt. */ static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; /* Disable inbound message unit */ out_be32(&priv->msg_regs->omr, 0); /* Free ring */ dma_free_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); /* Free interrupt */ free_irq(IRQ_RIO_TX(mport), (void *)mport); } /** * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles inbound message interrupts. Executes a registered inbound * mailbox event handler and acks the interrupt occurrence. */ static irqreturn_t fsl_rio_rx_handler(int irq, void *dev_instance) { int isr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; isr = in_be32(&priv->msg_regs->isr); if (isr & RIO_MSG_ISR_TE) { pr_info("RIO: inbound message reception error\n"); out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); goto out; } /* XXX Need to check/dispatch until queue empty */ if (isr & RIO_MSG_ISR_DIQI) { /* * We implement *only* mailbox 0, but can receive messages * for any mailbox/letter to that mailbox destination. So, * make the callback with an unknown/invalid mailbox number * argument. */ port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); /* Ack the queueing interrupt */ out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); } out: return IRQ_HANDLED; } /** * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox * @mport: Master port implementing the inbound message unit * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the inbound mailbox ring * * Initializes buffer ring, request the inbound message interrupt, * and enables the inbound message unit. Returns %0 on success * and %-EINVAL or %-ENOMEM on failure. */ static int fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { int i, rc = 0; struct rio_priv *priv = mport->priv; if ((entries < RIO_MIN_RX_RING_SIZE) || (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { rc = -EINVAL; goto out; } /* Initialize client buffer ring */ priv->msg_rx_ring.dev_id = dev_id; priv->msg_rx_ring.size = entries; priv->msg_rx_ring.rx_slot = 0; for (i = 0; i < priv->msg_rx_ring.size; i++) priv->msg_rx_ring.virt_buffer[i] = NULL; /* Initialize inbound message ring */ priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, &priv->msg_rx_ring.phys, GFP_KERNEL); if (!priv->msg_rx_ring.virt) { rc = -ENOMEM; goto out; } /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); /* Clear interrupt status */ out_be32(&priv->msg_regs->isr, 0x00000091); /* Hook up inbound message handler */ rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, "msg_rx", (void *)mport); if (rc < 0) { dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring.virt_buffer[i], priv->msg_tx_ring.phys_buffer[i]); goto out; } /* * Configure inbound message unit: * Snooping * 4KB max message size * Unmask all interrupt sources * Disable */ out_be32(&priv->msg_regs->imr, 0x001b0060); /* Set number of queue entries */ setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); /* Now enable the unit */ setbits32(&priv->msg_regs->imr, 0x1); out: return rc; } /** * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox * @mport: Master port implementing the inbound message unit * @mbox: Mailbox to close * * Disables the inbound message unit, free all buffers, and * frees the inbound message interrupt. */ static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; /* Disable inbound message unit */ out_be32(&priv->msg_regs->imr, 0); /* Free ring */ dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); /* Free interrupt */ free_irq(IRQ_RIO_RX(mport), (void *)mport); } /** * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue * @mport: Master port implementing the inbound message unit * @mbox: Inbound mailbox number * @buf: Buffer to add to inbound queue * * Adds the @buf buffer to the MPC85xx inbound message queue. Returns * %0 on success or %-EINVAL on failure. */ static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) { int rc = 0; struct rio_priv *priv = mport->priv; pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", priv->msg_rx_ring.rx_slot); if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { printk(KERN_ERR "RIO: error adding inbound buffer %d, buffer exists\n", priv->msg_rx_ring.rx_slot); rc = -EINVAL; goto out; } priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) priv->msg_rx_ring.rx_slot = 0; out: return rc; } /** * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit * @mport: Master port implementing the inbound message unit * @mbox: Inbound mailbox number * * Gets the next available inbound message from the inbound message queue. * A pointer to the message is returned on success or NULL on failure. */ static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; u32 phys_buf, virt_buf; void *buf = NULL; int buf_idx; phys_buf = in_be32(&priv->msg_regs->ifqdpar); /* If no more messages, then bail out */ if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) goto out2; virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf - priv->msg_rx_ring.phys); buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; buf = priv->msg_rx_ring.virt_buffer[buf_idx]; if (!buf) { printk(KERN_ERR "RIO: inbound message copy failed, no buffers\n"); goto out1; } /* Copy max message size, caller is expected to allocate that big */ memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); /* Clear the available buffer */ priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; out1: setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); out2: return buf; } /** * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles doorbell interrupts. Parses a list of registered * doorbell event handlers and executes a matching event handler. */ static irqreturn_t fsl_rio_dbell_handler(int irq, void *dev_instance) { int dsr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; dsr = in_be32(&priv->msg_regs->dsr); if (dsr & DOORBELL_DSR_TE) { pr_info("RIO: doorbell reception error\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); goto out; } if (dsr & DOORBELL_DSR_QFI) { pr_info("RIO: doorbell queue full\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); } /* XXX Need to check/dispatch until queue empty */ if (dsr & DOORBELL_DSR_DIQI) { u32 dmsg = (u32) priv->dbell_ring.virt + (in_be32(&priv->msg_regs->dqdpar) & 0xfff); struct rio_dbell *dbell; int found = 0; pr_debug ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); list_for_each_entry(dbell, &port->dbells, node) { if ((dbell->res->start <= DBELL_INF(dmsg)) && (dbell->res->end >= DBELL_INF(dmsg))) { found = 1; break; } } if (found) { dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); } else { pr_debug ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); } setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); } out: return IRQ_HANDLED; } /** * fsl_rio_doorbell_init - MPC85xx doorbell interface init * @mport: Master port implementing the inbound doorbell unit * * Initializes doorbell unit hardware and inbound DMA buffer * ring. Called from fsl_rio_setup(). Returns %0 on success * or %-ENOMEM on failure. */ static int fsl_rio_doorbell_init(struct rio_mport *mport) { struct rio_priv *priv = mport->priv; int rc = 0; /* Map outbound doorbell window immediately after maintenance window */ priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, RIO_DBELL_WIN_SIZE); if (!priv->dbell_win) { printk(KERN_ERR "RIO: unable to map outbound doorbell window\n"); rc = -ENOMEM; goto out; } /* Initialize inbound doorbells */ priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); if (!priv->dbell_ring.virt) { printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); rc = -ENOMEM; iounmap(priv->dbell_win); goto out; } /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); /* Clear interrupt status */ out_be32(&priv->msg_regs->dsr, 0x00000091); /* Hook up doorbell handler */ rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, "dbell_rx", (void *)mport); if (rc < 0) { iounmap(priv->dbell_win); dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, priv->dbell_ring.virt, priv->dbell_ring.phys); printk(KERN_ERR "MPC85xx RIO: unable to request inbound doorbell irq"); goto out; } /* Configure doorbells for snooping, 512 entries, and enable */ out_be32(&priv->msg_regs->dmr, 0x00108161); out: return rc; } static void port_error_handler(struct rio_mport *port, int offset) { /*XXX: Error recovery is not implemented, we just clear errors */ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); if (offset == 0) { out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); } else { out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); } } static void msg_unit_error_handler(struct rio_mport *port) { struct rio_priv *priv = port->priv; /*XXX: Error recovery is not implemented, we just clear errors */ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); } /** * fsl_rio_port_write_handler - MPC85xx port write interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles port write interrupts. Parses a list of registered * port write event handlers and executes a matching event handler. */ static irqreturn_t fsl_rio_port_write_handler(int irq, void *dev_instance) { u32 ipwmr, ipwsr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; u32 epwisr, tmp; epwisr = in_be32(priv->regs_win + RIO_EPWISR); if (!(epwisr & RIO_EPWISR_PW)) goto pw_done; ipwmr = in_be32(&priv->msg_regs->pwmr); ipwsr = in_be32(&priv->msg_regs->pwsr); #ifdef DEBUG_PW pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); if (ipwsr & RIO_IPWSR_QF) pr_debug(" QF"); if (ipwsr & RIO_IPWSR_TE) pr_debug(" TE"); if (ipwsr & RIO_IPWSR_QFI) pr_debug(" QFI"); if (ipwsr & RIO_IPWSR_PWD) pr_debug(" PWD"); if (ipwsr & RIO_IPWSR_PWB) pr_debug(" PWB"); pr_debug(" )\n"); #endif /* Schedule deferred processing if PW was received */ if (ipwsr & RIO_IPWSR_QFI) { /* Save PW message (if there is room in FIFO), * otherwise discard it. */ if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { priv->port_write_msg.msg_count++; kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, RIO_PW_MSG_SIZE); } else { priv->port_write_msg.discard_count++; pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", priv->port_write_msg.discard_count); } /* Clear interrupt and issue Clear Queue command. This allows * another port-write to be received. */ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); schedule_work(&priv->pw_work); } if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { priv->port_write_msg.err_count++; pr_debug("RIO: Port-Write Transaction Err (%d)\n", priv->port_write_msg.err_count); /* Clear Transaction Error: port-write controller should be * disabled when clearing this error */ out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); out_be32(&priv->msg_regs->pwmr, ipwmr); } if (ipwsr & RIO_IPWSR_PWD) { priv->port_write_msg.discard_count++; pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", priv->port_write_msg.discard_count); out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); } pw_done: if (epwisr & RIO_EPWISR_PINT1) { tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); port_error_handler(port, 0); } if (epwisr & RIO_EPWISR_PINT2) { tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); port_error_handler(port, 1); } if (epwisr & RIO_EPWISR_MU) { tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); msg_unit_error_handler(port); } return IRQ_HANDLED; } static void fsl_pw_dpc(struct work_struct *work) { struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); unsigned long flags; u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* * Process port-write messages */ spin_lock_irqsave(&priv->pw_fifo_lock, flags); while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, RIO_PW_MSG_SIZE)) { /* Process one message */ spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); #ifdef DEBUG_PW { u32 i; pr_debug("%s : Port-Write Message:", __func__); for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { if ((i%4) == 0) pr_debug("\n0x%02x: 0x%08x", i*4, msg_buffer[i]); else pr_debug(" 0x%08x", msg_buffer[i]); } pr_debug("\n"); } #endif /* Pass the port-write message to RIO core for processing */ rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); spin_lock_irqsave(&priv->pw_fifo_lock, flags); } spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); } /** * fsl_rio_pw_enable - enable/disable port-write interface init * @mport: Master port implementing the port write unit * @enable: 1=enable; 0=disable port-write message handling */ static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) { struct rio_priv *priv = mport->priv; u32 rval; rval = in_be32(&priv->msg_regs->pwmr); if (enable) rval |= RIO_IPWMR_PWE; else rval &= ~RIO_IPWMR_PWE; out_be32(&priv->msg_regs->pwmr, rval); return 0; } /** * fsl_rio_port_write_init - MPC85xx port write interface init * @mport: Master port implementing the port write unit * * Initializes port write unit hardware and DMA buffer * ring. Called from fsl_rio_setup(). Returns %0 on success * or %-ENOMEM on failure. */ static int fsl_rio_port_write_init(struct rio_mport *mport) { struct rio_priv *priv = mport->priv; int rc = 0; /* Following configurations require a disabled port write controller */ out_be32(&priv->msg_regs->pwmr, in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); /* Initialize port write */ priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, RIO_PW_MSG_SIZE, &priv->port_write_msg.phys, GFP_KERNEL); if (!priv->port_write_msg.virt) { pr_err("RIO: unable allocate port write queue\n"); return -ENOMEM; } priv->port_write_msg.err_count = 0; priv->port_write_msg.discard_count = 0; /* Point dequeue/enqueue pointers at first entry */ out_be32(&priv->msg_regs->epwqbar, 0); out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", in_be32(&priv->msg_regs->epwqbar), in_be32(&priv->msg_regs->pwqbar)); /* Clear interrupt status IPWSR */ out_be32(&priv->msg_regs->pwsr, (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); /* Configure port write contoller for snooping enable all reporting, clear queue full */ out_be32(&priv->msg_regs->pwmr, RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); /* Hook up port-write handler */ rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, IRQF_SHARED, "port-write", (void *)mport); if (rc < 0) { pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); goto err_out; } /* Enable Error Interrupt */ out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); INIT_WORK(&priv->pw_work, fsl_pw_dpc); spin_lock_init(&priv->pw_fifo_lock); if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { pr_err("FIFO allocation failed\n"); rc = -ENOMEM; goto err_out_irq; } pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", in_be32(&priv->msg_regs->pwmr), in_be32(&priv->msg_regs->pwsr)); return rc; err_out_irq: free_irq(IRQ_RIO_PW(mport), (void *)mport); err_out: dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, priv->port_write_msg.virt, priv->port_write_msg.phys); return rc; } static inline void fsl_rio_info(struct device *dev, u32 ccsr) { const char *str; if (ccsr & 1) { /* Serial phy */ switch (ccsr >> 30) { case 0: str = "1"; break; case 1: str = "4"; break; default: str = "Unknown"; break; } dev_info(dev, "Hardware port width: %s\n", str); switch ((ccsr >> 27) & 7) { case 0: str = "Single-lane 0"; break; case 1: str = "Single-lane 2"; break; case 2: str = "Four-lane"; break; default: str = "Unknown"; break; } dev_info(dev, "Training connection status: %s\n", str); } else { /* Parallel phy */ if (!(ccsr & 0x80000000)) dev_info(dev, "Output port operating in 8-bit mode\n"); if (!(ccsr & 0x08000000)) dev_info(dev, "Input port operating in 8-bit mode\n"); } } /** * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface * @dev: platform_device pointer * * Initializes MPC85xx RapidIO hardware interface, configures * master port with system-specific info, and registers the * master port with the RapidIO subsystem. */ int fsl_rio_setup(struct platform_device *dev) { struct rio_ops *ops; struct rio_mport *port; struct rio_priv *priv; int rc = 0; const u32 *dt_range, *cell; struct resource regs; int rlen; u32 ccsr; u64 law_start, law_size; int paw, aw, sw; if (!dev->dev.of_node) { dev_err(&dev->dev, "Device OF-Node is NULL"); return -EFAULT; } rc = of_address_to_resource(dev->dev.of_node, 0, &regs); if (rc) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->dev.of_node->full_name); return -EFAULT; } dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); dev_info(&dev->dev, "Regs: %pR\n", &regs); dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); if (!dt_range) { dev_err(&dev->dev, "Can't get %s property 'ranges'\n", dev->dev.of_node->full_name); return -EFAULT; } /* Get node address wide */ cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); if (cell) aw = *cell; else aw = of_n_addr_cells(dev->dev.of_node); /* Get node size wide */ cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); if (cell) sw = *cell; else sw = of_n_size_cells(dev->dev.of_node); /* Get parent address wide wide */ paw = of_n_addr_cells(dev->dev.of_node); law_start = of_read_number(dt_range + aw, paw); law_size = of_read_number(dt_range + aw + paw, sw); dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", law_start, law_size); ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); if (!ops) { rc = -ENOMEM; goto err_ops; } ops->lcread = fsl_local_config_read; ops->lcwrite = fsl_local_config_write; ops->cread = fsl_rio_config_read; ops->cwrite = fsl_rio_config_write; ops->dsend = fsl_rio_doorbell_send; ops->pwenable = fsl_rio_pw_enable; ops->open_outb_mbox = fsl_open_outb_mbox; ops->open_inb_mbox = fsl_open_inb_mbox; ops->close_outb_mbox = fsl_close_outb_mbox; ops->close_inb_mbox = fsl_close_inb_mbox; ops->add_outb_message = fsl_add_outb_message; ops->add_inb_buffer = fsl_add_inb_buffer; ops->get_inb_message = fsl_get_inb_message; port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) { rc = -ENOMEM; goto err_port; } port->index = 0; priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); if (!priv) { printk(KERN_ERR "Can't alloc memory for 'priv'\n"); rc = -ENOMEM; goto err_priv; } INIT_LIST_HEAD(&port->dbells); port->iores.start = law_start; port->iores.end = law_start + law_size - 1; port->iores.flags = IORESOURCE_MEM; port->iores.name = "rio_io_win"; if (request_resource(&iomem_resource, &port->iores) < 0) { dev_err(&dev->dev, "RIO: Error requesting master port region" " 0x%016llx-0x%016llx\n", (u64)port->iores.start, (u64)port->iores.end); rc = -ENOMEM; goto err_res; } priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); strcpy(port->name, "RIO0 mport"); priv->dev = &dev->dev; port->ops = ops; port->priv = priv; port->phys_efptr = 0x100; priv->regs_win = ioremap(regs.start, resource_size(&regs)); rio_regs_win = priv->regs_win; /* Probe the master port phy type */ ccsr = in_be32(priv->regs_win + RIO_CCSR); port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; dev_info(&dev->dev, "RapidIO PHY type: %s\n", (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : "unknown")); /* Checking the port training status */ if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { dev_err(&dev->dev, "Port is not ready. " "Try to restart connection...\n"); switch (port->phy_type) { case RIO_PHY_SERIAL: /* Disable ports */ out_be32(priv->regs_win + RIO_CCSR, 0); /* Set 1x lane */ setbits32(priv->regs_win + RIO_CCSR, 0x02000000); /* Enable ports */ setbits32(priv->regs_win + RIO_CCSR, 0x00600000); break; case RIO_PHY_PARALLEL: /* Disable ports */ out_be32(priv->regs_win + RIO_CCSR, 0x22000000); /* Enable ports */ out_be32(priv->regs_win + RIO_CCSR, 0x44000000); break; } msleep(100); if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { dev_err(&dev->dev, "Port restart failed.\n"); rc = -ENOLINK; goto err; } dev_info(&dev->dev, "Port restart success!\n"); } fsl_rio_info(&dev->dev, ccsr); port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) & RIO_PEF_CTLS) >> 4; dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); if (rio_register_mport(port)) goto err; if (port->host_deviceid >= 0) out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); else out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win + RIO_ATMU_REGS_OFFSET); priv->maint_atmu_regs = priv->atmu_regs + 1; priv->dbell_atmu_regs = priv->atmu_regs + 2; priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + ((port->phy_type == RIO_PHY_SERIAL) ? RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); /* Set to receive any dist ID for serial RapidIO controller. */ if (port->phy_type == RIO_PHY_SERIAL) out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); /* Configure maintenance transaction window */ out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); out_be32(&priv->maint_atmu_regs->rowar, 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); /* Configure outbound doorbell window */ out_be32(&priv->dbell_atmu_regs->rowbar, (law_start + RIO_MAINT_WIN_SIZE) >> 12); out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ fsl_rio_doorbell_init(port); fsl_rio_port_write_init(port); return 0; err: iounmap(priv->regs_win); err_res: kfree(priv); err_priv: kfree(port); err_port: kfree(ops); err_ops: return rc; } /* The probe function for RapidIO peer-to-peer network. */ static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) { printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", dev->dev.of_node->full_name); return fsl_rio_setup(dev); }; static const struct of_device_id fsl_of_rio_rpn_ids[] = { { .compatible = "fsl,rapidio-delta", }, {}, }; static struct platform_driver fsl_of_rio_rpn_driver = { .driver = { .name = "fsl-of-rio", .owner = THIS_MODULE, .of_match_table = fsl_of_rio_rpn_ids, }, .probe = fsl_of_rio_rpn_probe, }; static __init int fsl_of_rio_rpn_init(void) { return platform_driver_register(&fsl_of_rio_rpn_driver); } subsys_initcall(fsl_of_rio_rpn_init);
gpl-2.0
SimpleAOSP-Kernel/kernel_grouper
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
396
3860
/* * arch/sh/kernel/cpu/sh4/clock-sh7757.c * * SH7757 support for the clock framework * * Copyright (C) 2009-2010 Renesas Solutions Corp. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 48000000, }; static unsigned long pll_recalc(struct clk *clk) { int multiplier; multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16; return clk->parent->rate * multiplier; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6, 1, 1, 1, 16, 1, 24, 1, 1 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR }; #define DIV4(_bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { /* * P clock is always enable, because some P clock modules is used * by Host PC. */ [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc80030 #define MSTPCR1 0xffc80034 #define MSTPCR2 0xffc10028 enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112, MSTP111, MSTP110, MSTP103, MSTP102, MSTP220, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* MSTPCR0 */ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0), [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0), /* MSTPCR1 */ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0), [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0), [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0), [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0), [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0), [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0), [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0), /* MSTPCR2 */ [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), /* MSTP32 clocks */ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]), CLKDEV_CON_ID("riic", &mstp_clks[MSTP000]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP113]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP114]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]), CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]), CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); for (i = 0; i < ARRAY_SIZE(lookups); i++) clkdev_add(&lookups[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
linuxmake/kernel_softwinner_fiber
drivers/watchdog/pc87413_wdt.c
652
14681
/* * NS pc87413-wdt Watchdog Timer driver for Linux 2.6.x.x * * This code is based on wdt.c with original copyright. * * (C) Copyright 2006 Sven Anders, <anders@anduras.de> * and Marcus Junker, <junker@anduras.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Sven Anders, Marcus Junker nor ANDURAS AG * admit liability nor provide warranty for any of this software. * This material is provided "AS-IS" and at no charge. * * Release 1.1 */ #include <linux/module.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/system.h> /* #define DEBUG 1 */ #define DEFAULT_TIMEOUT 1 /* 1 minute */ #define MAX_TIMEOUT 255 #define VERSION "1.1" #define MODNAME "pc87413 WDT" #define PFX MODNAME ": " #define DPFX MODNAME " - DEBUG: " #define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */ #define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1) #define SWC_LDN 0x04 #define SIOCFG2 0x22 /* Serial IO register */ #define WDCTL 0x10 /* Watchdog-Timer-Control-Register */ #define WDTO 0x11 /* Watchdog timeout register */ #define WDCFG 0x12 /* Watchdog config register */ #define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ static int io = IO_DEFAULT; static int swc_base_addr = -1; static int timeout = DEFAULT_TIMEOUT; /* timeout value */ static unsigned long timer_enabled; /* is the timer enabled? */ static char expect_close; /* is the close expected? */ static DEFINE_SPINLOCK(io_lock); /* to guard us from io races */ static int nowayout = WATCHDOG_NOWAYOUT; /* -- Low level function ----------------------------------------*/ /* Select pins for Watchdog output */ static inline void pc87413_select_wdt_out(void) { unsigned int cr_data = 0; /* Step 1: Select multiple pin,pin55,as WDT output */ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x80; /* Set Bit7 to 1*/ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); #ifdef DEBUG printk(KERN_INFO DPFX "Select multiple pin,pin55,as WDT output: Bit7 to 1: %d\n", cr_data); #endif } /* Enable SWC functions */ static inline void pc87413_enable_swc(void) { unsigned int cr_data = 0; /* Step 2: Enable SWC functions */ outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */ outb_p(SWC_LDN, WDT_DATA_IO_PORT); outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */ cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x01; /* Set Bit0 to 1 */ outb_p(0x30, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */ #ifdef DEBUG printk(KERN_INFO DPFX "pc87413 - Enable SWC functions\n"); #endif } /* Read SWC I/O base address */ static void pc87413_get_swc_base_addr(void) { unsigned char addr_l, addr_h = 0; /* Step 3: Read SWC I/O Base Address */ outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */ addr_h = inb(WDT_DATA_IO_PORT); outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */ addr_l = inb(WDT_DATA_IO_PORT); swc_base_addr = (addr_h << 8) + addr_l; #ifdef DEBUG printk(KERN_INFO DPFX "Read SWC I/O Base Address: low %d, high %d, res %d\n", addr_l, addr_h, swc_base_addr); #endif } /* Select Bank 3 of SWC */ static inline void pc87413_swc_bank3(void) { /* Step 4: Select Bank3 of SWC */ outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); #ifdef DEBUG printk(KERN_INFO DPFX "Select Bank3 of SWC\n"); #endif } /* Set watchdog timeout to x minutes */ static inline void pc87413_programm_wdto(char pc87413_time) { /* Step 5: Programm WDTO, Twd. */ outb_p(pc87413_time, swc_base_addr + WDTO); #ifdef DEBUG printk(KERN_INFO DPFX "Set WDTO to %d minutes\n", pc87413_time); #endif } /* Enable WDEN */ static inline void pc87413_enable_wden(void) { /* Step 6: Enable WDEN */ outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL); #ifdef DEBUG printk(KERN_INFO DPFX "Enable WDEN\n"); #endif } /* Enable SW_WD_TREN */ static inline void pc87413_enable_sw_wd_tren(void) { /* Enable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG); #ifdef DEBUG printk(KERN_INFO DPFX "Enable SW_WD_TREN\n"); #endif } /* Disable SW_WD_TREN */ static inline void pc87413_disable_sw_wd_tren(void) { /* Disable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG); #ifdef DEBUG printk(KERN_INFO DPFX "pc87413 - Disable SW_WD_TREN\n"); #endif } /* Enable SW_WD_TRG */ static inline void pc87413_enable_sw_wd_trg(void) { /* Enable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL); #ifdef DEBUG printk(KERN_INFO DPFX "pc87413 - Enable SW_WD_TRG\n"); #endif } /* Disable SW_WD_TRG */ static inline void pc87413_disable_sw_wd_trg(void) { /* Disable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL); #ifdef DEBUG printk(KERN_INFO DPFX "Disable SW_WD_TRG\n"); #endif } /* -- Higher level functions ------------------------------------*/ /* Enable the watchdog */ static void pc87413_enable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* Disable the watchdog */ static void pc87413_disable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(0); spin_unlock(&io_lock); } /* Refresh the watchdog */ static void pc87413_refresh(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* -- File operations -------------------------------------------*/ /** * pc87413_open: * @inode: inode of device * @file: file handle to device * */ static int pc87413_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ pc87413_refresh(); printk(KERN_INFO MODNAME "Watchdog enabled. Timeout set to %d minute(s).\n", timeout); return nonseekable_open(inode, file); } /** * pc87413_release: * @inode: inode to board * @file: file handle to board * * The watchdog has a configurable API. There is a religious dispute * between people who want their watchdog to be able to shut down and * those who want to be sure if the watchdog manager dies the machine * reboots. In the former case we disable the counters, in the latter * case you have to open it again very soon. */ static int pc87413_release(struct inode *inode, struct file *file) { /* Shut off the timer. */ if (expect_close == 42) { pc87413_disable(); printk(KERN_INFO MODNAME "Watchdog disabled, sleeping again...\n"); } else { printk(KERN_CRIT MODNAME "Unexpected close, not stopping watchdog!\n"); pc87413_refresh(); } clear_bit(0, &timer_enabled); expect_close = 0; return 0; } /** * pc87413_status: * * return, if the watchdog is enabled (timeout is set...) */ static int pc87413_status(void) { return 0; /* currently not supported */ } /** * pc87413_write: * @file: file handle to the watchdog * @data: data buffer to write * @len: length in bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t pc87413_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* reset expect flag */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should reload the timer */ pc87413_refresh(); } return len; } /** * pc87413_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. We only actually usefully support * querying capabilities and current status. */ static long pc87413_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "PC87413(HF/F) watchdog", }; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(pc87413_status(), uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, uarg.i)) return -EFAULT; if (options & WDIOS_DISABLECARD) { pc87413_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { pc87413_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: pc87413_refresh(); #ifdef DEBUG printk(KERN_INFO DPFX "keepalive\n"); #endif return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; /* the API states this is given in secs */ new_timeout /= 60; if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; pc87413_refresh(); /* fall through and return the new timeout... */ case WDIOC_GETTIMEOUT: new_timeout = timeout * 60; return put_user(new_timeout, uarg.i); default: return -ENOTTY; } } /* -- Notifier funtions -----------------------------------------*/ /** * notify_sys: * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. We want to turn the card * off at reboot otherwise the machine will reboot again during memory * test or worse yet during the following fsck. This would suck, in fact * trust me - if it happens it does suck. */ static int pc87413_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the card off */ pc87413_disable(); return NOTIFY_DONE; } /* -- Module's structures ---------------------------------------*/ static const struct file_operations pc87413_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pc87413_write, .unlocked_ioctl = pc87413_ioctl, .open = pc87413_open, .release = pc87413_release, }; static struct notifier_block pc87413_notifier = { .notifier_call = pc87413_notify_sys, }; static struct miscdevice pc87413_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pc87413_fops, }; /* -- Module init functions -------------------------------------*/ /** * pc87413_init: module's "constructor" * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. * The open() function will actually kick the board off. */ static int __init pc87413_init(void) { int ret; printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT); if (!request_muxed_region(io, 2, MODNAME)) return -EBUSY; ret = register_reboot_notifier(&pc87413_notifier); if (ret != 0) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); } ret = misc_register(&pc87413_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto reboot_unreg; } printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout); pc87413_select_wdt_out(); pc87413_enable_swc(); pc87413_get_swc_base_addr(); if (!request_region(swc_base_addr, 0x20, MODNAME)) { printk(KERN_ERR PFX "cannot request SWC region at 0x%x\n", swc_base_addr); ret = -EBUSY; goto misc_unreg; } pc87413_enable(); release_region(io, 2); return 0; misc_unreg: misc_deregister(&pc87413_miscdev); reboot_unreg: unregister_reboot_notifier(&pc87413_notifier); release_region(io, 2); return ret; } /** * pc87413_exit: module's "destructor" * * Unload the watchdog. You cannot do this with any file handles open. * If your watchdog is set to continue ticking on close and you unload * it, well it keeps ticking. We won't get the interrupt but the board * will not touch PC memory so all is fine. You just have to load a new * module in 60 seconds or reboot. */ static void __exit pc87413_exit(void) { /* Stop the timer before we leave */ if (!nowayout) { pc87413_disable(); printk(KERN_INFO MODNAME "Watchdog disabled.\n"); } misc_deregister(&pc87413_miscdev); unregister_reboot_notifier(&pc87413_notifier); release_region(swc_base_addr, 0x20); printk(KERN_INFO MODNAME " watchdog component driver removed.\n"); } module_init(pc87413_init); module_exit(pc87413_exit); MODULE_AUTHOR("Sven Anders <anders@anduras.de>, " "Marcus Junker <junker@anduras.de>,"); MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")."); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
gpl-2.0
insofter/linux
arch/arm/mach-s3c64xx/mach-smartq5.c
908
3425
/* * linux/arch/arm/mach-s3c64xx/mach-smartq5.c * * Copyright (C) 2010 Maurus Cuelenaere * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/init.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/map.h> #include <mach/regs-gpio.h> #include <mach/s3c6410.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <plat/regs-fb-v4.h> #include "mach-smartq.h" static struct gpio_led smartq5_leds[] = { { .name = "smartq5:green", .active_low = 1, .gpio = S3C64XX_GPN(8), }, { .name = "smartq5:red", .active_low = 1, .gpio = S3C64XX_GPN(9), }, }; static struct gpio_led_platform_data smartq5_led_data = { .num_leds = ARRAY_SIZE(smartq5_leds), .leds = smartq5_leds, }; static struct platform_device smartq5_leds_device = { .name = "leds-gpio", .id = -1, .dev.platform_data = &smartq5_led_data, }; /* Labels according to the SmartQ manual */ static struct gpio_keys_button smartq5_buttons[] = { { .gpio = S3C64XX_GPL(14), .code = KEY_POWER, .desc = "Power", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(2), .code = KEY_KPMINUS, .desc = "Minus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(12), .code = KEY_KPPLUS, .desc = "Plus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(15), .code = KEY_ENTER, .desc = "Move", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, }; static struct gpio_keys_platform_data smartq5_buttons_data = { .buttons = smartq5_buttons, .nbuttons = ARRAY_SIZE(smartq5_buttons), }; static struct platform_device smartq5_buttons_device = { .name = "gpio-keys", .id = 0, .num_resources = 0, .dev = { .platform_data = &smartq5_buttons_data, } }; static struct s3c_fb_pd_win smartq5_fb_win0 = { .win_mode = { .left_margin = 216, .right_margin = 40, .upper_margin = 35, .lower_margin = 10, .hsync_len = 1, .vsync_len = 1, .xres = 800, .yres = 480, .refresh = 80, }, .max_bpp = 32, .default_bpp = 16, }; static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &smartq5_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | VIDCON1_INV_VDEN, }; static struct platform_device *smartq5_devices[] __initdata = { &smartq5_leds_device, &smartq5_buttons_device, }; static void __init smartq5_machine_init(void) { s3c_fb_set_platdata(&smartq5_lcd_pdata); smartq_machine_init(); platform_add_devices(smartq5_devices, ARRAY_SIZE(smartq5_devices)); } MACHINE_START(SMARTQ5, "SmartQ 5") /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = smartq_map_io, .init_machine = smartq5_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
MaxiCM/android_kernel_google_msm
arch/arm/mach-msm/qdsp6/dsp_debug.c
1420
3917
/* arch/arm/mach-msm/qdsp6/dsp_dump.c * * Copyright (C) 2009 Google, Inc. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/io.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <asm/atomic.h> #include <mach/proc_comm.h> #include <mach/debug_mm.h> static wait_queue_head_t dsp_wait; static int dsp_has_crashed; static int dsp_wait_count; static atomic_t dsp_crash_count = ATOMIC_INIT(0); void q6audio_dsp_not_responding(void) { if (atomic_add_return(1, &dsp_crash_count) != 1) { pr_err("q6audio_dsp_not_responding() - parking additional crasher...\n"); for (;;) msleep(1000); } if (dsp_wait_count) { dsp_has_crashed = 1; wake_up(&dsp_wait); while (dsp_has_crashed != 2) wait_event(dsp_wait, dsp_has_crashed == 2); } else { pr_err("q6audio_dsp_not_responding() - no waiter?\n"); } BUG(); } static int dsp_open(struct inode *inode, struct file *file) { return 0; } static ssize_t dsp_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { char cmd[32]; if (count >= sizeof(cmd)) return -EINVAL; if (copy_from_user(cmd, buf, count)) return -EFAULT; cmd[count] = 0; if ((count > 1) && (cmd[count-1] == '\n')) cmd[count-1] = 0; if (!strcmp(cmd, "wait-for-crash")) { while (!dsp_has_crashed) { int res; dsp_wait_count++; res = wait_event_interruptible(dsp_wait, dsp_has_crashed); if (res < 0) { dsp_wait_count--; return res; } } #if defined(CONFIG_MACH_MAHIMAHI) /* assert DSP NMI */ msm_proc_comm(PCOM_CUSTOMER_CMD1, 0, 0); msleep(250); #endif } else if (!strcmp(cmd, "boom")) { q6audio_dsp_not_responding(); } else if (!strcmp(cmd, "continue-crash")) { dsp_has_crashed = 2; wake_up(&dsp_wait); } else { pr_err("[%s:%s] unknown dsp_debug command: %s\n", __MM_FILE__, __func__, cmd); } return count; } #define DSP_RAM_BASE 0x2E800000 #define DSP_RAM_SIZE 0x01800000 static unsigned copy_ok_count; static ssize_t dsp_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { size_t actual = 0; size_t mapsize = PAGE_SIZE; unsigned addr; void __iomem *ptr; if (*pos >= DSP_RAM_SIZE) return 0; if (*pos & (PAGE_SIZE - 1)) return -EINVAL; addr = (*pos + DSP_RAM_BASE); /* don't blow up if we're unaligned */ if (addr & (PAGE_SIZE - 1)) mapsize *= 2; while (count >= PAGE_SIZE) { ptr = ioremap(addr, mapsize); if (!ptr) { pr_err("[%s:%s] map error @ %x\n", __MM_FILE__, __func__, addr); return -EFAULT; } if (copy_to_user(buf, ptr, PAGE_SIZE)) { iounmap(ptr); pr_err("[%s:%s] copy error @ %p\n", __MM_FILE__, __func__, buf); return -EFAULT; } copy_ok_count += PAGE_SIZE; iounmap(ptr); addr += PAGE_SIZE; buf += PAGE_SIZE; actual += PAGE_SIZE; count -= PAGE_SIZE; } *pos += actual; return actual; } static int dsp_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations dsp_fops = { .owner = THIS_MODULE, .open = dsp_open, .read = dsp_read, .write = dsp_write, .release = dsp_release, }; static struct miscdevice dsp_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "dsp_debug", .fops = &dsp_fops, }; static int __init dsp_init(void) { init_waitqueue_head(&dsp_wait); return misc_register(&dsp_misc); } device_initcall(dsp_init);
gpl-2.0
Altaf-Mahdi/i9100
arch/m68k/mm/init_no.c
2188
4149
/* * linux/arch/m68knommu/mm/init.c * * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, * Kenneth Albanowski <kjahds@kjahds.com>, * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) * * Based on: * * linux/arch/m68k/mm/init.c * * Copyright (C) 1995 Hamish Macdonald * * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com) * DEC/2000 -- linux 2.4 support <davidm@snapgear.com> */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/gfp.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/machdep.h> /* * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */ unsigned long empty_zero_page; extern unsigned long memory_start; extern unsigned long memory_end; /* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views of the world. */ unsigned long end_mem = memory_end & PAGE_MASK; unsigned long zones_size[MAX_NR_ZONES] = {0, }; empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). */ set_fs (USER_DS); zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); } void __init mem_init(void) { int codek = 0, datak = 0, initk = 0; unsigned long tmp; extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end; extern unsigned int _ramend, _rambase; unsigned long len = _ramend - _rambase; unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */ pr_debug("Mem_init: start=%lx, end=%lx\n", start_mem, end_mem); end_mem &= PAGE_MASK; high_memory = (void *) end_mem; start_mem = PAGE_ALIGN(start_mem); max_mapnr = num_physpages = (((unsigned long) high_memory) - PAGE_OFFSET) >> PAGE_SHIFT; /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); codek = (&_etext - &_stext) >> 10; datak = (&_ebss - &_sdata) >> 10; initk = (&__init_begin - &__init_end) >> 10; tmp = nr_free_pages() << PAGE_SHIFT; printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n", tmp >> 10, len >> 10, codek, datak ); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { int pages = 0; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; pages++; } printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); } #endif void free_initmem(void) { #ifdef CONFIG_RAMKERNEL unsigned long addr; extern char __init_begin, __init_end; /* * The following code should be cool even if these sections * are not page aligned. */ addr = PAGE_ALIGN((unsigned long)(&__init_begin)); /* next to check that the page we free is not a partial page */ for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), (int)(addr - PAGE_SIZE)); #endif }
gpl-2.0
dwander/linaro-base
arch/sparc/crypto/md5_glue.c
2188
4879
/* Glue code for MD5 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c * and crypto/md5.c which are: * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) Mathias Krause <minipli@googlemail.com> * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/md5.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void md5_sparc64_transform(u32 *digest, const char *data, unsigned int rounds); static int md5_sparc64_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = cpu_to_le32(0x67452301); mctx->hash[1] = cpu_to_le32(0xefcdab89); mctx->hash[2] = cpu_to_le32(0x98badcfe); mctx->hash[3] = cpu_to_le32(0x10325476); mctx->byte_count = 0; return 0; } static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; sctx->byte_count += len; if (partial) { done = MD5_HMAC_BLOCK_SIZE - partial; memcpy((u8 *)sctx->block + partial, data, done); md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); } if (len - done >= MD5_HMAC_BLOCK_SIZE) { const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE; md5_sparc64_transform(sctx->hash, data + done, rounds); done += rounds * MD5_HMAC_BLOCK_SIZE; } memcpy(sctx->block, data + done, len - done); } static int md5_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < MD5_HMAC_BLOCK_SIZE) { sctx->byte_count += len; memcpy((u8 *)sctx->block + partial, data, len); } else __md5_sparc64_update(sctx, data, len, partial); return 0; } /* Add padding and return the message digest. */ static int md5_sparc64_final(struct shash_desc *desc, u8 *out) { struct md5_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; u32 *dst = (u32 *)out; __le64 bits; static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_le64(sctx->byte_count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index); /* We need to fill a whole block for __md5_sparc64_update() */ if (padlen <= 56) { sctx->byte_count += padlen; memcpy((u8 *)sctx->block + index, padding, padlen); } else { __md5_sparc64_update(sctx, padding, padlen, index); } __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); /* Store state in digest */ for (i = 0; i < MD5_HASH_WORDS; i++) dst[i] = sctx->hash[i]; /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int md5_sparc64_export(struct shash_desc *desc, void *out) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int md5_sparc64_import(struct shash_desc *desc, const void *in) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_sparc64_init, .update = md5_sparc64_update, .final = md5_sparc64_final, .export = md5_sparc64_export, .import = md5_sparc64_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name= "md5-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_md5_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_MD5)) return false; return true; } static int __init md5_sparc64_mod_init(void) { if (sparc64_has_md5_opcode()) { pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n"); return crypto_register_shash(&alg); } pr_info("sparc64 md5 opcode not available.\n"); return -ENODEV; } static void __exit md5_sparc64_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md5_sparc64_mod_init); module_exit(md5_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); MODULE_ALIAS("md5"); #include "crop_devid.c"
gpl-2.0
1N4148/android_kernel_samsung_golden
arch/blackfin/mach-bf537/boards/stamp.c
2188
78675
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/plat-ram.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <linux/i2c.h> #include <linux/i2c/adp5588.h> #include <linux/etherdevice.h> #include <linux/ata_platform.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/sl811.h> #include <linux/spi/mmc_spi.h> #include <linux/leds.h> #include <linux/input.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <asm/bfin_sport.h> #ifdef CONFIG_REGULATOR_FIXED_VOLTAGE #include <linux/regulator/fixed.h> #endif #include <linux/regulator/machine.h> #include <linux/regulator/consumer.h> #include <linux/regulator/userspace-consumer.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF537-STAMP"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x203C0000, .end = 0x203C0000 + 0x000fffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PF2, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PF3, 1, "gpio-keys: BTN1"}, {BTN_2, GPIO_PF4, 1, "gpio-keys: BTN2"}, {BTN_3, GPIO_PF5, 1, "gpio-keys: BTN3"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) static struct resource bfin_pcmcia_cf_resources[] = { { .start = 0x20310000, /* IO PORT */ .end = 0x20312000, .flags = IORESOURCE_MEM, }, { .start = 0x20311000, /* Attribute Memory */ .end = 0x20311FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, { .start = 6, /* Card Detect PF6 */ .end = 6, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pcmcia_cf_device = { .name = "bfin_cf_pcmcia", .id = -1, .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), .resource = bfin_pcmcia_cf_resources, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) static struct resource dm9000_resources[] = { [0] = { .start = 0x203FB800, .end = 0x203FB800 + 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x203FB804, .end = 0x203FB804 + 1, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_PF9, .end = IRQ_PF9, .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE), }, }; static struct platform_device dm9000_device = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(dm9000_resources), .resource = dm9000_resources, }; #endif #if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) static struct resource sl811_hcd_resources[] = { { .start = 0x20340000, .end = 0x20340000, .flags = IORESOURCE_MEM, }, { .start = 0x20340004, .end = 0x20340004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; #if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) void sl811_port_power(struct device *dev, int is_on) { gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS"); gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on); } #endif static struct sl811_platform_data sl811_priv = { .potpg = 10, .power = 250, /* == 500mA */ #if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) .port_power = &sl811_port_power, #endif }; static struct platform_device sl811_hcd_device = { .name = "sl811-hcd", .id = 0, .dev = { .platform_data = &sl811_priv, }, .num_resources = ARRAY_SIZE(sl811_hcd_resources), .resource = sl811_hcd_resources, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20360000, .end = 0x20360000, .flags = IORESOURCE_MEM, }, { .start = 0x20360004, .end = 0x20360004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF3, .end = IRQ_PF3, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) static unsigned short bfin_can_peripherals[] = { P_CAN0_RX, P_CAN0_TX, 0 }; static struct resource bfin_can_resources[] = { { .start = 0xFFC02A00, .end = 0xFFC02FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_CAN_RX, .end = IRQ_CAN_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN_TX, .end = IRQ_CAN_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN_ERROR, .end = IRQ_CAN_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_can_device = { .name = "bfin_can", .num_resources = ARRAY_SIZE(bfin_can_resources), .resource = bfin_can_resources, .dev = { .platform_data = &bfin_can_peripherals, /* Passed to driver */ }, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_MII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = PHY_POLL, /* IRQ_MAC_PHYINT */ }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_MII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static struct mtd_partition bfin_plat_nand_partitions[] = { { .name = "linux kernel(nand)", .size = 0x400000, .offset = 0, }, { .name = "file system(nand)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, }, }; #define BFIN_NAND_PLAT_CLE 2 #define BFIN_NAND_PLAT_ALE 1 static void bfin_plat_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_CLE)); else writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_ALE)); } #define BFIN_NAND_PLAT_READY GPIO_PF3 static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(BFIN_NAND_PLAT_READY); } static struct platform_nand_data bfin_plat_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, .part_probe_types = part_probes, .partitions = bfin_plat_nand_partitions, .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), }, .ctrl = { .cmd_ctrl = bfin_plat_nand_cmd_ctrl, .dev_ready = bfin_plat_nand_dev_ready, }, }; #define MAX(x, y) (x > y ? x : y) static struct resource bfin_plat_nand_resources = { .start = 0x20212000, .end = 0x20212000 + (1 << MAX(BFIN_NAND_PLAT_CLE, BFIN_NAND_PLAT_ALE)), .flags = IORESOURCE_MEM, }; static struct platform_device bfin_async_nand_device = { .name = "gen_nand", .id = -1, .num_resources = 1, .resource = &bfin_plat_nand_resources, .dev = { .platform_data = &bfin_plat_nand_data, }, }; static void bfin_plat_nand_init(void) { gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat"); } #else static void bfin_plat_nand_init(void) {} #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition stamp_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x180000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = 0x400000 - 0x40000 - 0x180000 - 0x10000, .offset = MTDPART_OFS_APPEND, }, { .name = "MAC Address(nor)", .size = MTDPART_SIZ_FULL, .offset = 0x3F0000, .mask_flags = MTD_WRITEABLE, } }; static struct physmap_flash_data stamp_flash_data = { .width = 2, .parts = stamp_partitions, .nr_parts = ARRAY_SIZE(stamp_partitions), #ifdef CONFIG_ROMKERNEL .probe_type = "map_rom", #endif }; static struct resource stamp_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device stamp_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &stamp_flash_data, }, .num_resources = 1, .resource = &stamp_flash_resource, }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0x180000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), /* .type = "m25p64", */ }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif #if defined(CONFIG_BFIN_SPI_ADC) \ || defined(CONFIG_BFIN_SPI_ADC_MODULE) /* SPI ADC chip */ static struct bfin5xx_spi_chip spi_adc_chip_info = { .enable_dma = 1, /* use dma transfer with this chip*/ .bits_per_word = 16, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AD193X) \ || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE) static struct bfin5xx_spi_chip ad1938_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) \ || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE) static struct bfin5xx_spi_chip adav801_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE) #include <linux/input/ad714x.h> static struct bfin5xx_spi_chip ad7147_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; static struct ad714x_slider_plat ad7147_spi_slider_plat[] = { { .start_stage = 0, .end_stage = 7, .max_coord = 128, }, }; static struct ad714x_button_plat ad7147_spi_button_plat[] = { { .keycode = BTN_FORWARD, .l_mask = 0, .h_mask = 0x600, }, { .keycode = BTN_LEFT, .l_mask = 0, .h_mask = 0x500, }, { .keycode = BTN_MIDDLE, .l_mask = 0, .h_mask = 0x800, }, { .keycode = BTN_RIGHT, .l_mask = 0x100, .h_mask = 0x400, }, { .keycode = BTN_BACK, .l_mask = 0x200, .h_mask = 0x400, }, }; static struct ad714x_platform_data ad7147_spi_platform_data = { .slider_num = 1, .button_num = 5, .slider = ad7147_spi_slider_plat, .button = ad7147_spi_button_plat, .stage_cfg_reg = { {0xFBFF, 0x1FFF, 0, 0x2626, 1600, 1600, 1600, 1600}, {0xEFFF, 0x1FFF, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1FFE, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1FFB, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1FEF, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1FBF, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1EFF, 0, 0x2626, 1650, 1650, 1650, 1650}, {0xFFFF, 0x1BFF, 0, 0x2626, 1600, 1600, 1600, 1600}, {0xFF7B, 0x3FFF, 0x506, 0x2626, 1100, 1100, 1150, 1150}, {0xFDFE, 0x3FFF, 0x606, 0x2626, 1100, 1100, 1150, 1150}, {0xFEBA, 0x1FFF, 0x1400, 0x2626, 1200, 1200, 1300, 1300}, {0xFFEF, 0x1FFF, 0x0, 0x2626, 1100, 1100, 1150, 1150}, }, .sys_cfg_reg = {0x2B2, 0x0, 0x3233, 0x819, 0x832, 0xCFF, 0xCFF, 0x0}, }; #endif #if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) #include <linux/input/ad714x.h> static struct ad714x_button_plat ad7142_i2c_button_plat[] = { { .keycode = BTN_1, .l_mask = 0, .h_mask = 0x1, }, { .keycode = BTN_2, .l_mask = 0, .h_mask = 0x2, }, { .keycode = BTN_3, .l_mask = 0, .h_mask = 0x4, }, { .keycode = BTN_4, .l_mask = 0x0, .h_mask = 0x8, }, }; static struct ad714x_platform_data ad7142_i2c_platform_data = { .button_num = 4, .button = ad7142_i2c_button_plat, .stage_cfg_reg = { /* fixme: figure out right setting for all comoponent according * to hardware feature of EVAL-AD7142EB board */ {0xE7FF, 0x3FFF, 0x0005, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A}, {0xFDBF, 0x3FFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A}, {0xFFFF, 0x2DFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A}, {0xFFFF, 0x37BF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, {0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320}, }, .sys_cfg_reg = {0x0B2, 0x0, 0x690, 0x664, 0x290F, 0xF, 0xF, 0x0}, }; #endif #if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) static struct bfin5xx_spi_chip ad2s90_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) static unsigned short ad2s120x_platform_data[] = { /* used as SAMPLE and RDVEL */ GPIO_PF5, GPIO_PF6, 0 }; static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) static unsigned short ad2s1210_platform_data[] = { /* use as SAMPLE, A0, A1 */ GPIO_PF7, GPIO_PF8, GPIO_PF9, # if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT) /* the RES0 and RES1 pins */ GPIO_PF4, GPIO_PF5, # endif 0, }; static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE) static struct bfin5xx_spi_chip ad7314_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE) static unsigned short ad7816_platform_data[] = { GPIO_PF4, /* rdwr_pin */ GPIO_PF5, /* convert_pin */ GPIO_PF7, /* busy_pin */ 0, }; static struct bfin5xx_spi_chip ad7816_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE) static unsigned long adt7310_platform_data[3] = { /* INT bound temperature alarm event. line 1 */ IRQ_PG4, IRQF_TRIGGER_LOW, /* CT bound temperature alarm event irq_flags. line 0 */ IRQF_TRIGGER_LOW, }; static struct bfin5xx_spi_chip adt7310_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE) static unsigned short ad7298_platform_data[] = { GPIO_PF7, /* busy_pin */ 0, }; static struct bfin5xx_spi_chip ad7298_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE) static unsigned long adt7316_spi_data[2] = { IRQF_TRIGGER_LOW, /* interrupt flags */ GPIO_PF7, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */ }; static struct bfin5xx_spi_chip adt7316_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) #define MMC_SPI_CARD_DETECT_INT IRQ_PF5 static int bfin_mmc_spi_init(struct device *dev, irqreturn_t (*detect_int)(int, void *), void *data) { return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int, IRQF_TRIGGER_FALLING, "mmc-spi-detect", data); } static void bfin_mmc_spi_exit(struct device *dev, void *data) { free_irq(MMC_SPI_CARD_DETECT_INT, data); } static struct mmc_spi_platform_data bfin_mmc_spi_pdata = { .init = bfin_mmc_spi_init, .exit = bfin_mmc_spi_exit, .detect_delay = 100, /* msecs */ }; static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, .pio_interrupt = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) #include <linux/spi/ad7877.h> static struct bfin5xx_spi_chip spi_ad7877_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; #endif #if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) #include <linux/input/adxl34x.h> static const struct adxl34x_platform_data adxl34x_info = { .x_axis_offset = 0, .y_axis_offset = 0, .z_axis_offset = 0, .tap_threshold = 0x31, .tap_duration = 0x10, .tap_latency = 0x60, .tap_window = 0xF0, .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN, .act_axis_control = 0xFF, .activity_threshold = 5, .inactivity_threshold = 3, .inactivity_time = 4, .free_fall_threshold = 0x7, .free_fall_time = 0x20, .data_rate = 0x8, .data_range = ADXL_FULL_RES, .ev_type = EV_ABS, .ev_code_x = ABS_X, /* EV_REL */ .ev_code_y = ABS_Y, /* EV_REL */ .ev_code_z = ABS_Z, /* EV_REL */ .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */ /* .ev_code_ff = KEY_F,*/ /* EV_KEY */ /* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */ .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, .fifo_mode = ADXL_FIFO_STREAM, .orientation_enable = ADXL_EN_ORIENTATION_3D, .deadzone_angle = ADXL_DEADZONE_ANGLE_10p8, .divisor_length = ADXL_LP_FILTER_DIVISOR_16, /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ .ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C}, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) static struct bfin5xx_spi_chip spi_ad7879_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) static struct bfin5xx_spi_chip spidev_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) static struct bfin5xx_spi_chip lq035q1_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE) static struct bfin5xx_spi_chip enc28j60_spi_chip_info = { .enable_dma = 1, .bits_per_word = 8, }; #endif #if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE) static struct bfin5xx_spi_chip adf7021_spi_chip_info = { .bits_per_word = 16, }; #include <linux/spi/adf702x.h> #define TXREG 0x0160A470 static const u32 adf7021_regs[] = { 0x09608FA0, 0x00575011, 0x00A7F092, 0x2B141563, 0x81F29E94, 0x00003155, 0x050A4F66, 0x00000007, 0x00000008, 0x000231E9, 0x3296354A, 0x891A2B3B, 0x00000D9C, 0x0000000D, 0x0000000E, 0x0000000F, }; static struct adf702x_platform_data adf7021_platform_data = { .regs_base = (void *)SPORT1_TCR1, .dma_ch_rx = CH_SPORT1_RX, .dma_ch_tx = CH_SPORT1_TX, .irq_sport_err = IRQ_SPORT1_ERROR, .gpio_int_rfs = GPIO_PF8, .pin_req = {P_SPORT1_DTPRI, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TSCLK, 0}, .adf702x_model = MODEL_ADF7021, .adf702x_regs = adf7021_regs, .tx_reg = TXREG, }; static inline void adf702x_mac_init(void) { random_ether_addr(adf7021_platform_data.mac_addr); } #else static inline void adf702x_mac_init(void) {} #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) #include <linux/spi/ads7846.h> static struct bfin5xx_spi_chip ad7873_spi_chip_info = { .bits_per_word = 8, }; static int ads7873_get_pendown_state(void) { return gpio_get_value(GPIO_PF6); } static struct ads7846_platform_data __initdata ad7873_pdata = { .model = 7873, /* AD7873 */ .x_max = 0xfff, .y_max = 0xfff, .x_plate_ohms = 620, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7873_get_pendown_state, }; #endif #if defined(CONFIG_MTD_DATAFLASH) \ || defined(CONFIG_MTD_DATAFLASH_MODULE) static struct mtd_partition bfin_spi_dataflash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0x180000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_dataflash_data = { .name = "SPI Dataflash", .parts = bfin_spi_dataflash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions), }; /* DataFlash chip */ static struct bfin5xx_spi_chip data_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif #if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) static struct bfin5xx_spi_chip spi_adxl34x_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif #if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE) static struct bfin5xx_spi_chip spi_ad7476_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MTD_DATAFLASH) \ || defined(CONFIG_MTD_DATAFLASH_MODULE) { /* DataFlash chip */ .modalias = "mtd_dataflash", .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_dataflash_data, .controller_data = &data_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_BFIN_SPI_ADC) \ || defined(CONFIG_BFIN_SPI_ADC_MODULE) { .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. */ .platform_data = NULL, /* No spi_driver specific config */ .controller_data = &spi_adc_chip_info, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .platform_data = "ad1836", /* only includes chip name for the moment */ .controller_data = &ad1836_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE) { .modalias = "ad193x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &ad1938_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE) { .modalias = "adav80x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &adav801_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE) { .modalias = "ad714x_captouch", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .irq = IRQ_PF4, .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_3, .platform_data = &ad7147_spi_platform_data, .controller_data = &ad7147_spi_chip_info, }, #endif #if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) { .modalias = "ad2s90", .bus_num = 0, .chip_select = 3, /* change it for your board */ .mode = SPI_MODE_3, .platform_data = NULL, .controller_data = &ad2s90_spi_chip_info, }, #endif #if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) { .modalias = "ad2s120x", .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = ad2s120x_platform_data, .controller_data = &ad2s120x_spi_chip_info, }, #endif #if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) { .modalias = "ad2s1210", .max_speed_hz = 8192000, .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = ad2s1210_platform_data, .controller_data = &ad2s1210_spi_chip_info, }, #endif #if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE) { .modalias = "ad7314", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .controller_data = &ad7314_spi_chip_info, .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE) { .modalias = "ad7818", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = ad7816_platform_data, .controller_data = &ad7816_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE) { .modalias = "adt7310", .max_speed_hz = 1000000, .irq = IRQ_PG5, /* CT alarm event. Line 0 */ .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = adt7310_platform_data, .controller_data = &adt7310_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE) { .modalias = "ad7298", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = ad7298_platform_data, .controller_data = &ad7298_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE) { .modalias = "adt7316", .max_speed_hz = 1000000, .irq = IRQ_PG5, /* interrupt line */ .bus_num = 0, .chip_select = 4, /* CS, change it for your board */ .platform_data = adt7316_spi_data, .controller_data = &adt7316_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .platform_data = &bfin_mmc_spi_pdata, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF6, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &spi_ad7877_chip_info, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PF7, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &spi_ad7879_chip_info, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &spidev_chip_info, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, .controller_data = &lq035q1_spi_chip_info, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE) { .modalias = "enc28j60", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .irq = IRQ_PF6, .bus_num = 0, .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ .controller_data = &enc28j60_spi_chip_info, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) { .modalias = "adxl34x", .platform_data = &adxl34x_info, .irq = IRQ_PF6, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, .controller_data = &spi_adxl34x_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE) { .modalias = "adf702x", .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ .controller_data = &adf7021_spi_chip_info, .platform_data = &adf7021_platform_data, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) { .modalias = "ads7846", .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .irq = IRQ_PF6, .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ .controller_data = &ad7873_spi_chip_info, .platform_data = &ad7873_pdata, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_AD7476) \ || defined(CONFIG_AD7476_MODULE) { .modalias = "ad7476", /* Name of spi_driver for this device */ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. */ .platform_data = NULL, /* No spi_driver specific config */ .controller_data = &spi_ad7476_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADE7753) \ || defined(CONFIG_ADE7753_MODULE) { .modalias = "ade7753", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_ADE7754) \ || defined(CONFIG_ADE7754_MODULE) { .modalias = "ade7754", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_ADE7758) \ || defined(CONFIG_ADE7758_MODULE) { .modalias = "ade7758", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_ADE7759) \ || defined(CONFIG_ADE7759_MODULE) { .modalias = "ade7759", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_ADE7854_SPI) \ || defined(CONFIG_ADE7854_SPI_MODULE) { .modalias = "ade7854", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADIS16060) \ || defined(CONFIG_ADIS16060_MODULE) { .modalias = "adis16060_r", .max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = MAX_CTRL_CS + 1, /* CS for read, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_0, }, { .modalias = "adis16060_w", .max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, /* CS for write, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_1, }, #endif #if defined(CONFIG_ADIS16130) \ || defined(CONFIG_ADIS16130_MODULE) { .modalias = "adis16130", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS for read, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADIS16201) \ || defined(CONFIG_ADIS16201_MODULE) { .modalias = "adis16201", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16203) \ || defined(CONFIG_ADIS16203_MODULE) { .modalias = "adis16203", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16204) \ || defined(CONFIG_ADIS16204_MODULE) { .modalias = "adis16204", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16209) \ || defined(CONFIG_ADIS16209_MODULE) { .modalias = "adis16209", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16220) \ || defined(CONFIG_ADIS16220_MODULE) { .modalias = "adis16220", .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16240) \ || defined(CONFIG_ADIS16240_MODULE) { .modalias = "adis16240", .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16260) \ || defined(CONFIG_ADIS16260_MODULE) { .modalias = "adis16260", .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16261) \ || defined(CONFIG_ADIS16261_MODULE) { .modalias = "adis16261", .max_speed_hz = 2500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_ADIS16300) \ || defined(CONFIG_ADIS16300_MODULE) { .modalias = "adis16300", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16350) \ || defined(CONFIG_ADIS16350_MODULE) { .modalias = "adis16364", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, .irq = IRQ_PF4, }, #endif #if defined(CONFIG_ADIS16400) \ || defined(CONFIG_ADIS16400_MODULE) { .modalias = "adis16400", .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, /* CS, change it for your board */ .platform_data = NULL, /* No spi_driver specific config */ .mode = SPI_MODE_3, }, #endif }; #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) /* SPORT SPI controller data */ static struct bfin5xx_spi_master bfin_sport_spi0_info = { .num_chipselect = 1, /* master only supports one device */ .enable_dma = 0, /* master don't support DMA */ .pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0}, }; static struct resource bfin_sport_spi0_resource[] = { [0] = { .start = SPORT0_TCR1, .end = SPORT0_TCR1 + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_sport_spi0_device = { .name = "bfin-sport-spi", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_sport_spi0_resource), .resource = bfin_sport_spi0_resource, .dev = { .platform_data = &bfin_sport_spi0_info, /* Passed to driver */ }, }; static struct bfin5xx_spi_master bfin_sport_spi1_info = { .num_chipselect = 1, /* master only supports one device */ .enable_dma = 0, /* master don't support DMA */ .pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0}, }; static struct resource bfin_sport_spi1_resource[] = { [0] = { .start = SPORT1_TCR1, .end = SPORT1_TCR1 + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_sport_spi1_device = { .name = "bfin-sport-spi", .id = 2, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_sport_spi1_resource), .resource = bfin_sport_spi1_resource, .dev = { .platform_data = &bfin_sport_spi1_info, /* Passed to driver */ }, }; #endif /* sport spi master and devices */ #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) static struct platform_device bfin_fb_device = { .name = "bf537-lq035", }; #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 0, /* let something else control the LCD Blacklight */ .gpio_bl = GPIO_PF7, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART0_CTSRTS { /* CTS pin */ .start = GPIO_PG7, .end = GPIO_PG7, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PG6, .end = GPIO_PG6, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif #if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = { [0] = KEY_GRAVE, [1] = KEY_1, [2] = KEY_2, [3] = KEY_3, [4] = KEY_4, [5] = KEY_5, [6] = KEY_6, [7] = KEY_7, [8] = KEY_8, [9] = KEY_9, [10] = KEY_0, [11] = KEY_MINUS, [12] = KEY_EQUAL, [13] = KEY_BACKSLASH, [15] = KEY_KP0, [16] = KEY_Q, [17] = KEY_W, [18] = KEY_E, [19] = KEY_R, [20] = KEY_T, [21] = KEY_Y, [22] = KEY_U, [23] = KEY_I, [24] = KEY_O, [25] = KEY_P, [26] = KEY_LEFTBRACE, [27] = KEY_RIGHTBRACE, [29] = KEY_KP1, [30] = KEY_KP2, [31] = KEY_KP3, [32] = KEY_A, [33] = KEY_S, [34] = KEY_D, [35] = KEY_F, [36] = KEY_G, [37] = KEY_H, [38] = KEY_J, [39] = KEY_K, [40] = KEY_L, [41] = KEY_SEMICOLON, [42] = KEY_APOSTROPHE, [43] = KEY_BACKSLASH, [45] = KEY_KP4, [46] = KEY_KP5, [47] = KEY_KP6, [48] = KEY_102ND, [49] = KEY_Z, [50] = KEY_X, [51] = KEY_C, [52] = KEY_V, [53] = KEY_B, [54] = KEY_N, [55] = KEY_M, [56] = KEY_COMMA, [57] = KEY_DOT, [58] = KEY_SLASH, [60] = KEY_KPDOT, [61] = KEY_KP7, [62] = KEY_KP8, [63] = KEY_KP9, [64] = KEY_SPACE, [65] = KEY_BACKSPACE, [66] = KEY_TAB, [67] = KEY_KPENTER, [68] = KEY_ENTER, [69] = KEY_ESC, [70] = KEY_DELETE, [74] = KEY_KPMINUS, [76] = KEY_UP, [77] = KEY_DOWN, [78] = KEY_RIGHT, [79] = KEY_LEFT, }; static struct adp5588_kpad_platform_data adp5588_kpad_data = { .rows = 8, .cols = 10, .keymap = adp5588_keymap, .keymapsize = ARRAY_SIZE(adp5588_keymap), .repeat = 0, }; #endif #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) #include <linux/mfd/adp5520.h> /* * ADP5520/5501 Backlight Data */ static struct adp5520_backlight_platform_data adp5520_backlight_data = { .fade_in = ADP5520_FADE_T_1200ms, .fade_out = ADP5520_FADE_T_1200ms, .fade_led_law = ADP5520_BL_LAW_LINEAR, .en_ambl_sens = 1, .abml_filt = ADP5520_BL_AMBL_FILT_640ms, .l1_daylight_max = ADP5520_BL_CUR_mA(15), .l1_daylight_dim = ADP5520_BL_CUR_mA(0), .l2_office_max = ADP5520_BL_CUR_mA(7), .l2_office_dim = ADP5520_BL_CUR_mA(0), .l3_dark_max = ADP5520_BL_CUR_mA(3), .l3_dark_dim = ADP5520_BL_CUR_mA(0), .l2_trip = ADP5520_L2_COMP_CURR_uA(700), .l2_hyst = ADP5520_L2_COMP_CURR_uA(50), .l3_trip = ADP5520_L3_COMP_CURR_uA(80), .l3_hyst = ADP5520_L3_COMP_CURR_uA(20), }; /* * ADP5520/5501 LEDs Data */ static struct led_info adp5520_leds[] = { { .name = "adp5520-led1", .default_trigger = "none", .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms, }, #ifdef ADP5520_EN_ALL_LEDS { .name = "adp5520-led2", .default_trigger = "none", .flags = FLAG_ID_ADP5520_LED2_ADP5501_LED1, }, { .name = "adp5520-led3", .default_trigger = "none", .flags = FLAG_ID_ADP5520_LED3_ADP5501_LED2, }, #endif }; static struct adp5520_leds_platform_data adp5520_leds_data = { .num_leds = ARRAY_SIZE(adp5520_leds), .leds = adp5520_leds, .fade_in = ADP5520_FADE_T_600ms, .fade_out = ADP5520_FADE_T_600ms, .led_on_time = ADP5520_LED_ONT_600ms, }; /* * ADP5520 GPIO Data */ static struct adp5520_gpio_platform_data adp5520_gpio_data = { .gpio_start = 50, .gpio_en_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2, .gpio_pullup_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2, }; /* * ADP5520 Keypad Data */ static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = { [ADP5520_KEY(0, 0)] = KEY_GRAVE, [ADP5520_KEY(0, 1)] = KEY_1, [ADP5520_KEY(0, 2)] = KEY_2, [ADP5520_KEY(0, 3)] = KEY_3, [ADP5520_KEY(1, 0)] = KEY_4, [ADP5520_KEY(1, 1)] = KEY_5, [ADP5520_KEY(1, 2)] = KEY_6, [ADP5520_KEY(1, 3)] = KEY_7, [ADP5520_KEY(2, 0)] = KEY_8, [ADP5520_KEY(2, 1)] = KEY_9, [ADP5520_KEY(2, 2)] = KEY_0, [ADP5520_KEY(2, 3)] = KEY_MINUS, [ADP5520_KEY(3, 0)] = KEY_EQUAL, [ADP5520_KEY(3, 1)] = KEY_BACKSLASH, [ADP5520_KEY(3, 2)] = KEY_BACKSPACE, [ADP5520_KEY(3, 3)] = KEY_ENTER, }; static struct adp5520_keys_platform_data adp5520_keys_data = { .rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0, .cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0, .keymap = adp5520_keymap, .keymapsize = ARRAY_SIZE(adp5520_keymap), .repeat = 0, }; /* * ADP5520/5501 Multifunction Device Init Data */ static struct adp5520_platform_data adp5520_pdev_data = { .backlight = &adp5520_backlight_data, .leds = &adp5520_leds_data, .gpio = &adp5520_gpio_data, .keys = &adp5520_keys_data, }; #endif #if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) static struct adp5588_gpio_platform_data adp5588_gpio_data = { .gpio_start = 50, .pullup_dis_mask = 0, }; #endif #if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE) #include <linux/i2c/adp8870.h> static struct led_info adp8870_leds[] = { { .name = "adp8870-led7", .default_trigger = "none", .flags = ADP8870_LED_D7 | ADP8870_LED_OFFT_600ms, }, }; static struct adp8870_backlight_platform_data adp8870_pdata = { .bl_led_assign = ADP8870_BL_D1 | ADP8870_BL_D2 | ADP8870_BL_D3 | ADP8870_BL_D4 | ADP8870_BL_D5 | ADP8870_BL_D6, /* 1 = Backlight 0 = Individual LED */ .pwm_assign = 0, /* 1 = Enables PWM mode */ .bl_fade_in = ADP8870_FADE_T_1200ms, /* Backlight Fade-In Timer */ .bl_fade_out = ADP8870_FADE_T_1200ms, /* Backlight Fade-Out Timer */ .bl_fade_law = ADP8870_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */ .en_ambl_sens = 1, /* 1 = enable ambient light sensor */ .abml_filt = ADP8870_BL_AMBL_FILT_320ms, /* Light sensor filter time */ .l1_daylight_max = ADP8870_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l1_daylight_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_bright_max = ADP8870_BL_CUR_mA(14), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_bright_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l3_office_max = ADP8870_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l3_office_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l4_indoor_max = ADP8870_BL_CUR_mA(3), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l4_indor_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l5_dark_max = ADP8870_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l5_dark_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_trip = ADP8870_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ .l2_hyst = ADP8870_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ .l3_trip = ADP8870_L3_COMP_CURR_uA(389), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ .l3_hyst = ADP8870_L3_COMP_CURR_uA(54), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ .l4_trip = ADP8870_L4_COMP_CURR_uA(167), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ .l4_hyst = ADP8870_L4_COMP_CURR_uA(16), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ .l5_trip = ADP8870_L5_COMP_CURR_uA(43), /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ .l5_hyst = ADP8870_L5_COMP_CURR_uA(11), /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ .leds = adp8870_leds, .num_leds = ARRAY_SIZE(adp8870_leds), .led_fade_law = ADP8870_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */ .led_fade_in = ADP8870_FADE_T_600ms, .led_fade_out = ADP8870_FADE_T_600ms, .led_on_time = ADP8870_LED_ONT_200ms, }; #endif #if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) #include <linux/i2c/adp8860.h> static struct led_info adp8860_leds[] = { { .name = "adp8860-led7", .default_trigger = "none", .flags = ADP8860_LED_D7 | ADP8860_LED_OFFT_600ms, }, }; static struct adp8860_backlight_platform_data adp8860_pdata = { .bl_led_assign = ADP8860_BL_D1 | ADP8860_BL_D2 | ADP8860_BL_D3 | ADP8860_BL_D4 | ADP8860_BL_D5 | ADP8860_BL_D6, /* 1 = Backlight 0 = Individual LED */ .bl_fade_in = ADP8860_FADE_T_1200ms, /* Backlight Fade-In Timer */ .bl_fade_out = ADP8860_FADE_T_1200ms, /* Backlight Fade-Out Timer */ .bl_fade_law = ADP8860_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */ .en_ambl_sens = 1, /* 1 = enable ambient light sensor */ .abml_filt = ADP8860_BL_AMBL_FILT_320ms, /* Light sensor filter time */ .l1_daylight_max = ADP8860_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l1_daylight_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_office_max = ADP8860_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_office_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l3_dark_max = ADP8860_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l3_dark_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ .l2_trip = ADP8860_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ .l2_hyst = ADP8860_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ .l3_trip = ADP8860_L3_COMP_CURR_uA(43), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */ .l3_hyst = ADP8860_L3_COMP_CURR_uA(11), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */ .leds = adp8860_leds, .num_leds = ARRAY_SIZE(adp8860_leds), .led_fade_law = ADP8860_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */ .led_fade_in = ADP8860_FADE_T_600ms, .led_fade_out = ADP8860_FADE_T_600ms, .led_on_time = ADP8860_LED_ONT_200ms, }; #endif #if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) static struct regulator_consumer_supply ad5398_consumer = { .supply = "current", }; static struct regulator_init_data ad5398_regulator_data = { .constraints = { .name = "current range", .max_uA = 120000, .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &ad5398_consumer, }; #if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) static struct platform_device ad5398_virt_consumer_device = { .name = "reg-virt-consumer", .id = 0, .dev = { .platform_data = "current", /* Passed to driver */ }, }; #endif #if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) static struct regulator_bulk_data ad5398_bulk_data = { .supply = "current", }; static struct regulator_userspace_consumer_data ad5398_userspace_comsumer_data = { .name = "ad5398", .num_supplies = 1, .supplies = &ad5398_bulk_data, }; static struct platform_device ad5398_userspace_consumer_device = { .name = "reg-userspace-consumer", .id = 0, .dev = { .platform_data = &ad5398_userspace_comsumer_data, }, }; #endif #endif #if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE) /* INT bound temperature alarm event. line 1 */ static unsigned long adt7410_platform_data[2] = { IRQ_PG4, IRQF_TRIGGER_LOW, }; #endif #if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE) /* INT bound temperature alarm event. line 1 */ static unsigned long adt7316_i2c_data[2] = { IRQF_TRIGGER_LOW, /* interrupt flags */ GPIO_PF4, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */ }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE) { I2C_BOARD_INFO("ad1937", 0x04), }, #endif #if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE) { I2C_BOARD_INFO("adav803", 0x10), }, #endif #if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) { I2C_BOARD_INFO("ad7142_captouch", 0x2C), .irq = IRQ_PG5, .platform_data = (void *)&ad7142_i2c_platform_data, }, #endif #if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE) { I2C_BOARD_INFO("ad7150", 0x48), .irq = IRQ_PG5, /* fixme: use real interrupt number */ }, #endif #if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE) { I2C_BOARD_INFO("ad7152", 0x48), }, #endif #if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE) { I2C_BOARD_INFO("ad774x", 0x48), }, #endif #if defined(CONFIG_AD7414) || defined(CONFIG_AD7414_MODULE) { I2C_BOARD_INFO("ad7414", 0x9), .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, }, #endif #if defined(CONFIG_AD7416) || defined(CONFIG_AD7416_MODULE) { I2C_BOARD_INFO("ad7417", 0xb), .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, .platform_data = (void *)GPIO_PF4, }, #endif #if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE) { I2C_BOARD_INFO("ade7854", 0x38), }, #endif #if defined(CONFIG_ADT75) || defined(CONFIG_ADT75_MODULE) { I2C_BOARD_INFO("adt75", 0x9), .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, }, #endif #if defined(CONFIG_ADT7408) || defined(CONFIG_ADT7408_MODULE) { I2C_BOARD_INFO("adt7408", 0x18), .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, }, #endif #if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE) { I2C_BOARD_INFO("adt7410", 0x48), /* CT critical temperature event. line 0 */ .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, .platform_data = (void *)&adt7410_platform_data, }, #endif #if defined(CONFIG_AD7291) || defined(CONFIG_AD7291_MODULE) { I2C_BOARD_INFO("ad7291", 0x20), .irq = IRQ_PG5, .irq_flags = IRQF_TRIGGER_LOW, }, #endif #if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE) { I2C_BOARD_INFO("adt7316", 0x48), .irq = IRQ_PG6, .platform_data = (void *)&adt7316_i2c_data, }, #endif #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PG6, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) { I2C_BOARD_INFO("ad7879", 0x2F), .irq = IRQ_PG5, .platform_data = (void *)&bfin_ad7879_ts_info, }, #endif #if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) { I2C_BOARD_INFO("adp5588-keys", 0x34), .irq = IRQ_PG0, .platform_data = (void *)&adp5588_kpad_data, }, #endif #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) { I2C_BOARD_INFO("pmic-adp5520", 0x32), .irq = IRQ_PG0, .platform_data = (void *)&adp5520_pdev_data, }, #endif #if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE) { I2C_BOARD_INFO("adxl34x", 0x53), .irq = IRQ_PG3, .platform_data = (void *)&adxl34x_info, }, #endif #if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) { I2C_BOARD_INFO("adp5588-gpio", 0x34), .platform_data = (void *)&adp5588_gpio_data, }, #endif #if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) { I2C_BOARD_INFO("bfin-adv7393", 0x2B), }, #endif #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) { I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2F), }, #endif #if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE) { I2C_BOARD_INFO("adp8870", 0x2B), .platform_data = (void *)&adp8870_pdata, }, #endif #if defined(CONFIG_SND_SOC_ADAU1371) || defined(CONFIG_SND_SOC_ADAU1371_MODULE) { I2C_BOARD_INFO("adau1371", 0x1A), }, #endif #if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE) { I2C_BOARD_INFO("adau1761", 0x38), }, #endif #if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE) { I2C_BOARD_INFO("adau1361", 0x38), }, #endif #if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE) { I2C_BOARD_INFO("ad5258", 0x18), }, #endif #if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) { I2C_BOARD_INFO("ssm2602", 0x1b), }, #endif #if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) { I2C_BOARD_INFO("ad5398", 0xC), .platform_data = (void *)&ad5398_regulator_data, }, #endif #if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) { I2C_BOARD_INFO("adp8860", 0x2A), .platform_data = (void *)&adp8860_pdata, }, #endif #if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE) { I2C_BOARD_INFO("adau1373", 0x1A), }, #endif #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("ad5252", 0x2e), }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) #define CF_IDE_NAND_CARD_USE_HDD_INTERFACE /* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */ #ifdef CF_IDE_NAND_CARD_USE_HDD_INTERFACE #define PATA_INT IRQ_PF5 static struct pata_platform_info bfin_pata_platform_data = { .ioport_shift = 1, .irq_flags = IRQF_TRIGGER_HIGH | IRQF_DISABLED, }; static struct resource bfin_pata_resources[] = { { .start = 0x20314020, .end = 0x2031403F, .flags = IORESOURCE_MEM, }, { .start = 0x2031401C, .end = 0x2031401F, .flags = IORESOURCE_MEM, }, { .start = PATA_INT, .end = PATA_INT, .flags = IORESOURCE_IRQ, }, }; #elif defined(CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE) static struct pata_platform_info bfin_pata_platform_data = { .ioport_shift = 0, }; /* CompactFlash Storage Card Memory Mapped Addressing * /REG = A11 = 1 */ static struct resource bfin_pata_resources[] = { { .start = 0x20211800, .end = 0x20211807, .flags = IORESOURCE_MEM, }, { .start = 0x2021180E, /* Device Ctl */ .end = 0x2021180E, .flags = IORESOURCE_MEM, }, }; #endif static struct platform_device bfin_pata_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(bfin_pata_resources), .resource = bfin_pata_resources, .dev = { .platform_data = &bfin_pata_platform_data, } }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 376000000), VRPAIR(VLEV_095, 426000000), VRPAIR(VLEV_100, 426000000), VRPAIR(VLEV_105, 476000000), VRPAIR(VLEV_110, 476000000), VRPAIR(VLEV_115, 476000000), VRPAIR(VLEV_120, 500000000), VRPAIR(VLEV_125, 533000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \ defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) #define SPORT_REQ(x) \ [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \ P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0} static const u16 bfin_snd_pin[][7] = { SPORT_REQ(0), SPORT_REQ(1), }; static struct bfin_snd_platform_data bfin_snd_data[] = { { .pin_req = &bfin_snd_pin[0][0], }, { .pin_req = &bfin_snd_pin[1][0], }, }; #define BFIN_SND_RES(x) \ [x] = { \ { \ .start = SPORT##x##_TCR1, \ .end = SPORT##x##_TCR1, \ .flags = IORESOURCE_MEM \ }, \ { \ .start = CH_SPORT##x##_RX, \ .end = CH_SPORT##x##_RX, \ .flags = IORESOURCE_DMA, \ }, \ { \ .start = CH_SPORT##x##_TX, \ .end = CH_SPORT##x##_TX, \ .flags = IORESOURCE_DMA, \ }, \ { \ .start = IRQ_SPORT##x##_ERROR, \ .end = IRQ_SPORT##x##_ERROR, \ .flags = IORESOURCE_IRQ, \ } \ } static struct resource bfin_snd_resources[][4] = { BFIN_SND_RES(0), BFIN_SND_RES(1), }; static struct platform_device bfin_pcm = { .name = "bfin-pcm-audio", .id = -1, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) static struct platform_device bfin_ad73311_codec_device = { .name = "ad73311", .id = -1, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], .dev = { .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], }, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", .id = CONFIG_SND_BF5XX_SPORT_NUM, .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], .dev = { .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], }, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", .id = CONFIG_SND_BF5XX_SPORT_NUM, .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], .dev = { .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], }, }; #endif #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) #define REGULATOR_ADP122 "adp122" #define REGULATOR_ADP122_UV 2500000 static struct regulator_consumer_supply adp122_consumers = { .supply = REGULATOR_ADP122, }; static struct regulator_init_data adp_switch_regulator_data = { .constraints = { .name = REGULATOR_ADP122, .valid_ops_mask = REGULATOR_CHANGE_STATUS, .min_uV = REGULATOR_ADP122_UV, .max_uV = REGULATOR_ADP122_UV, .min_uA = 0, .max_uA = 300000, }, .num_consumer_supplies = 1, /* only 1 */ .consumer_supplies = &adp122_consumers, }; static struct fixed_voltage_config adp_switch_pdata = { .supply_name = REGULATOR_ADP122, .microvolts = REGULATOR_ADP122_UV, .gpio = GPIO_PF2, .enable_high = 1, .enabled_at_boot = 0, .init_data = &adp_switch_regulator_data, }; static struct platform_device adp_switch_device = { .name = "reg-fixed-voltage", .id = 0, .dev = { .platform_data = &adp_switch_pdata, }, }; #if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) static struct regulator_bulk_data adp122_bulk_data = { .supply = REGULATOR_ADP122, }; static struct regulator_userspace_consumer_data adp122_userspace_comsumer_data = { .name = REGULATOR_ADP122, .num_supplies = 1, .supplies = &adp122_bulk_data, }; static struct platform_device adp122_userspace_consumer_device = { .name = "reg-userspace-consumer", .id = 0, .dev = { .platform_data = &adp122_userspace_comsumer_data, }, }; #endif #endif #if defined(CONFIG_IIO_GPIO_TRIGGER) || \ defined(CONFIG_IIO_GPIO_TRIGGER_MODULE) static struct resource iio_gpio_trigger_resources[] = { [0] = { .start = IRQ_PF5, .end = IRQ_PF5, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device iio_gpio_trigger = { .name = "iio_gpio_trigger", .num_resources = ARRAY_SIZE(iio_gpio_trigger_resources), .resource = iio_gpio_trigger_resources, }; #endif static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) &bfin_pcmcia_cf_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) &sl811_hcd_device, #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) &dm9000_device, #endif #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) &bfin_can_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) &bfin_sport_spi0_device, &bfin_sport_spi1_device, #endif #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) &bfin_fb_device, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) &bfin_pata_device, #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) &bfin_async_nand_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &stamp_flash_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \ defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_pcm, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) &bfin_ad73311_codec_device, #endif #if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) &bfin_tdm, #endif #if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) &bfin_ac97, #endif #if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) #if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) &ad5398_virt_consumer_device, #endif #if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) &ad5398_userspace_consumer_device, #endif #endif #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) &adp_switch_device, #if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) &adp122_userspace_consumer_device, #endif #endif #if defined(CONFIG_IIO_GPIO_TRIGGER) || \ defined(CONFIG_IIO_GPIO_TRIGGER_MODULE) &iio_gpio_trigger, #endif }; static int __init stamp_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); bfin_plat_nand_init(); adf702x_mac_init(); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(stamp_init); static struct platform_device *stamp_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(stamp_early_devices, ARRAY_SIZE(stamp_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } /* * Currently the MAC address is saved in Flash by U-Boot */ #define FLASH_MAC 0x203f0000 void bfin_get_ether_addr(char *addr) { *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
ReconInstruments/jet_kernel
arch/arm/mach-s3c64xx/cpufreq.c
2444
6481
/* linux/arch/arm/plat-s3c64xx/cpufreq.c * * Copyright 2009 Wolfson Microelectronics plc * * S3C64xx CPUfreq Support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/regulator/consumer.h> static struct clk *armclk; static struct regulator *vddarm; static unsigned long regulator_latency; #ifdef CONFIG_CPU_S3C6410 struct s3c64xx_dvfs { unsigned int vddarm_min; unsigned int vddarm_max; }; static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { [0] = { 1000000, 1150000 }, [1] = { 1050000, 1150000 }, [2] = { 1100000, 1150000 }, [3] = { 1200000, 1350000 }, }; static struct cpufreq_frequency_table s3c64xx_freq_table[] = { { 0, 66000 }, { 0, 133000 }, { 1, 222000 }, { 1, 266000 }, { 2, 333000 }, { 2, 400000 }, { 2, 532000 }, { 2, 533000 }, { 3, 667000 }, { 0, CPUFREQ_TABLE_END }, }; #endif static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy) { if (policy->cpu != 0) return -EINVAL; return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table); } static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) { if (cpu != 0) return 0; return clk_get_rate(armclk) / 1000; } static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int ret; unsigned int i; struct cpufreq_freqs freqs; struct s3c64xx_dvfs *dvfs; ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table, target_freq, relation, &i); if (ret != 0) return ret; freqs.cpu = 0; freqs.old = clk_get_rate(armclk) / 1000; freqs.new = s3c64xx_freq_table[i].frequency; freqs.flags = 0; dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index]; if (freqs.old == freqs.new) return 0; pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_REGULATOR if (vddarm && freqs.new > freqs.old) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err; } } #endif ret = clk_set_rate(armclk, freqs.new * 1000); if (ret < 0) { pr_err("cpufreq: Failed to set rate %dkHz: %d\n", freqs.new, ret); goto err; } #ifdef CONFIG_REGULATOR if (vddarm && freqs.new < freqs.old) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err_clk; } } #endif cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); pr_debug("cpufreq: Set actual frequency %lukHz\n", clk_get_rate(armclk) / 1000); return 0; err_clk: if (clk_set_rate(armclk, freqs.old * 1000) < 0) pr_err("Failed to restore original clock rate\n"); err: cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return ret; } #ifdef CONFIG_REGULATOR static void __init s3c64xx_cpufreq_config_regulator(void) { int count, v, i, found; struct cpufreq_frequency_table *freq; struct s3c64xx_dvfs *dvfs; count = regulator_count_voltages(vddarm); if (count < 0) { pr_err("cpufreq: Unable to check supported voltages\n"); } freq = s3c64xx_freq_table; while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { if (freq->frequency == CPUFREQ_ENTRY_INVALID) continue; dvfs = &s3c64xx_dvfs_table[freq->index]; found = 0; for (i = 0; i < count; i++) { v = regulator_list_voltage(vddarm, i); if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max) found = 1; } if (!found) { pr_debug("cpufreq: %dkHz unsupported by regulator\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } freq++; } /* Guess based on having to do an I2C/SPI write; in future we * will be able to query the regulator performance here. */ regulator_latency = 1 * 1000 * 1000; } #endif static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) { int ret; struct cpufreq_frequency_table *freq; if (policy->cpu != 0) return -EINVAL; if (s3c64xx_freq_table == NULL) { pr_err("cpufreq: No frequency information for this CPU\n"); return -ENODEV; } armclk = clk_get(NULL, "armclk"); if (IS_ERR(armclk)) { pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", PTR_ERR(armclk)); return PTR_ERR(armclk); } #ifdef CONFIG_REGULATOR vddarm = regulator_get(NULL, "vddarm"); if (IS_ERR(vddarm)) { ret = PTR_ERR(vddarm); pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); pr_err("cpufreq: Only frequency scaling available\n"); vddarm = NULL; } else { s3c64xx_cpufreq_config_regulator(); } #endif freq = s3c64xx_freq_table; while (freq->frequency != CPUFREQ_TABLE_END) { unsigned long r; /* Check for frequencies we can generate */ r = clk_round_rate(armclk, freq->frequency * 1000); r /= 1000; if (r != freq->frequency) { pr_debug("cpufreq: %dkHz unsupported by clock\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } /* If we have no regulator then assume startup * frequency is the maximum we can support. */ if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0)) freq->frequency = CPUFREQ_ENTRY_INVALID; freq++; } policy->cur = clk_get_rate(armclk) / 1000; /* Datasheet says PLL stabalisation time (if we were to use * the PLLs, which we don't currently) is ~300us worst case, * but add some fudge. */ policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency; ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); if (ret != 0) { pr_err("cpufreq: Failed to configure frequency table: %d\n", ret); regulator_put(vddarm); clk_put(armclk); } return ret; } static struct cpufreq_driver s3c64xx_cpufreq_driver = { .owner = THIS_MODULE, .flags = 0, .verify = s3c64xx_cpufreq_verify_speed, .target = s3c64xx_cpufreq_set_target, .get = s3c64xx_cpufreq_get_speed, .init = s3c64xx_cpufreq_driver_init, .name = "s3c", }; static int __init s3c64xx_cpufreq_init(void) { return cpufreq_register_driver(&s3c64xx_cpufreq_driver); } module_init(s3c64xx_cpufreq_init);
gpl-2.0
SMAICP/kernel_amazon_otter-common
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
2444
12529
/* * arch/sh/kernel/cpu/sh4a/clock-sh7724.c * * SH7724 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/hwblk.h> #include <cpu/sh7724.h> /* SH7724 registers */ #define FRQCRA 0xa4150000 #define FRQCRB 0xa4150004 #define VCLKCR 0xa4150048 #define FCLKACR 0xa4150008 #define FCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 #define SPUCLKCR 0xa415003c #define FLLFRQ 0xa4150050 #define LSTATS 0xa4150060 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 33333333, }; /* The fll multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long fll_recalc(struct clk *clk) { unsigned long mult = 0; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(FLLFRQ) & 0x3ff; if (__raw_readl(FLLFRQ) & 0x4000) div = 2; return (clk->parent->rate * mult) / div; } static struct clk_ops fll_clk_ops = { .recalc = fll_recalc, }; static struct clk fll_clk = { .ops = &fll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2; return clk->parent->rate * mult; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; /* A fixed divide-by-3 block use by the div6 clocks */ static unsigned long div3_recalc(struct clk *clk) { return clk->parent->rate / 3; } static struct clk_ops div3_clk_ops = { .recalc = div3_recalc, }; static struct clk div3_clk = { .ops = &div3_clk_ops, .parent = &pll_clk, }; /* External input clock (pin name: FSIMCKA/FSIMCKB ) */ struct clk sh7724_fsimcka_clk = { }; struct clk sh7724_fsimckb_clk = { }; static struct clk *main_clks[] = { &r_clk, &extal_clk, &fll_clk, &pll_clk, &div3_clk, &sh7724_fsimcka_clk, &sh7724_fsimckb_clk, }; static void div4_kick(struct clk *clk) { unsigned long value; /* set KICK bit in FRQCRA to update hardware setting */ value = __raw_readl(FRQCRA); value |= (1 << 31); __raw_writel(value, FRQCRA); } static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, .kick = div4_kick, }; enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0), [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT), }; enum { DIV6_V, DIV6_I, DIV6_S, DIV6_NR }; static struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0), [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0), [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT), }; enum { DIV6_FA, DIV6_FB, DIV6_REPARENT_NR }; /* Indices are important - they are the actual src selecting values */ static struct clk *fclkacr_parent[] = { [0] = &div3_clk, [1] = NULL, [2] = &sh7724_fsimcka_clk, [3] = NULL, }; static struct clk *fclkbcr_parent[] = { [0] = &div3_clk, [1] = NULL, [2] = &sh7724_fsimckb_clk, [3] = NULL, }; static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { [DIV6_FA] = SH_CLK_DIV6_EXT(&div3_clk, FCLKACR, 0, fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2), [DIV6_FB] = SH_CLK_DIV6_EXT(&div3_clk, FCLKBCR, 0, fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2), }; static struct clk mstp_clks[HWBLK_NR] = { SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_RSMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_P], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0), SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0), SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0), SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0), SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0), SH_HWBLK_CLK(HWBLK_IIC0, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_IIC1, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_MMC, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_ETHER, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_USB1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_USB0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VEU1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_CEU1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_BEU1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_2DDMAC, &div4_clks[DIV4_SH], 0), SH_HWBLK_CLK(HWBLK_SPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_BEU0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_CEU0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VEU0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0), }; #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("fll_clk", &fll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), CLKDEV_CON_ID("div3_clk", &div3_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FA]), CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FB]), CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]), CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]), /* MSTP clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]), CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]), CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]), CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]), CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]), CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]), CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]), CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]), CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]), CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]), CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]), CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]), { /* TMU0 */ .dev_id = "sh_tmu.0", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, { /* TMU1 */ .dev_id = "sh_tmu.1", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, { /* TMU2 */ .dev_id = "sh_tmu.2", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, { /* TMU3 */ .dev_id = "sh_tmu.3", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]), CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]), CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]), { /* TMU4 */ .dev_id = "sh_tmu.4", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, { /* TMU5 */ .dev_id = "sh_tmu.5", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, { /* SCIF0 */ .dev_id = "sh-sci.0", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF0], }, { /* SCIF1 */ .dev_id = "sh-sci.1", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF1], }, { /* SCIF2 */ .dev_id = "sh-sci.2", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF2], }, { /* SCIF3 */ .dev_id = "sh-sci.3", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF3], }, { /* SCIF4 */ .dev_id = "sh-sci.4", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF4], }, { /* SCIF5 */ .dev_id = "sh-sci.5", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF5], }, CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]), CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]), CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]), CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC0]), CLKDEV_CON_ID("i2c1", &mstp_clks[HWBLK_IIC1]), CLKDEV_CON_ID("mmc0", &mstp_clks[HWBLK_MMC]), CLKDEV_CON_ID("eth0", &mstp_clks[HWBLK_ETHER]), CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]), CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]), CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]), CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]), CLKDEV_CON_ID("ceu1", &mstp_clks[HWBLK_CEU1]), CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]), CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]), CLKDEV_CON_ID("spu0", &mstp_clks[HWBLK_SPU]), CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]), CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]), CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]), CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU0]), CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]), CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or fll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &fll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR); if (!ret) ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR); return ret; }
gpl-2.0
shakalaca/ASUS_ZenFone_ZE601KL
kernel/drivers/net/wireless/mwifiex/ethtool.c
2700
2361
/* * Marvell Wireless LAN device driver: ethtool * * Copyright (C) 2013, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "main.h" static void mwifiex_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); u32 conditions = le32_to_cpu(priv->adapter->hs_cfg.conditions); wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; if (conditions == HS_CFG_COND_DEF) return; if (conditions & HS_CFG_COND_UNICAST_DATA) wol->wolopts |= WAKE_UCAST; if (conditions & HS_CFG_COND_MULTICAST_DATA) wol->wolopts |= WAKE_MCAST; if (conditions & HS_CFG_COND_BROADCAST_DATA) wol->wolopts |= WAKE_BCAST; if (conditions & HS_CFG_COND_MAC_EVENT) wol->wolopts |= WAKE_PHY; } static int mwifiex_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); u32 conditions = 0; if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) return -EOPNOTSUPP; if (wol->wolopts & WAKE_UCAST) conditions |= HS_CFG_COND_UNICAST_DATA; if (wol->wolopts & WAKE_MCAST) conditions |= HS_CFG_COND_MULTICAST_DATA; if (wol->wolopts & WAKE_BCAST) conditions |= HS_CFG_COND_BROADCAST_DATA; if (wol->wolopts & WAKE_PHY) conditions |= HS_CFG_COND_MAC_EVENT; if (wol->wolopts == 0) conditions |= HS_CFG_COND_DEF; priv->adapter->hs_cfg.conditions = cpu_to_le32(conditions); return 0; } const struct ethtool_ops mwifiex_ethtool_ops = { .get_wol = mwifiex_ethtool_get_wol, .set_wol = mwifiex_ethtool_set_wol, };
gpl-2.0
MikeC84/mac_kernel_lge_hammerhead
fs/nfsd/vfs.c
3212
55598
/* * File operations used by nfsd. Some of these have been ripped from * other parts of the kernel because they weren't exported, others * are partial duplicates with added or changed functionality. * * Note that several functions dget() the dentry upon which they want * to act, most notably those that create directory entries. Response * dentry's are dput()'d if necessary in the release callback. * So if you notice code paths that apparently fail to dput() the * dentry, don't worry--they have been taken care of. * * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de> * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> */ #include <linux/fs.h> #include <linux/file.h> #include <linux/splice.h> #include <linux/fcntl.h> #include <linux/namei.h> #include <linux/delay.h> #include <linux/fsnotify.h> #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> #include <linux/jhash.h> #include <linux/ima.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/exportfs.h> #include <linux/writeback.h> #ifdef CONFIG_NFSD_V3 #include "xdr3.h" #endif /* CONFIG_NFSD_V3 */ #ifdef CONFIG_NFSD_V4 #include "acl.h" #include "idmap.h" #endif /* CONFIG_NFSD_V4 */ #include "nfsd.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_FILEOP /* * This is a cache of readahead params that help us choose the proper * readahead strategy. Initially, we set all readahead parameters to 0 * and let the VFS handle things. * If you increase the number of cached files very much, you'll need to * add a hash table here. */ struct raparms { struct raparms *p_next; unsigned int p_count; ino_t p_ino; dev_t p_dev; int p_set; struct file_ra_state p_ra; unsigned int p_hindex; }; struct raparm_hbucket { struct raparms *pb_head; spinlock_t pb_lock; } ____cacheline_aligned_in_smp; #define RAPARM_HASH_BITS 4 #define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS) #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE]; /* * Called from nfsd_lookup and encode_dirent. Check if we have crossed * a mount point. * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged, * or nfs_ok having possibly changed *dpp and *expp */ int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, struct svc_export **expp) { struct svc_export *exp = *expp, *exp2 = NULL; struct dentry *dentry = *dpp; struct path path = {.mnt = mntget(exp->ex_path.mnt), .dentry = dget(dentry)}; int err = 0; err = follow_down(&path); if (err < 0) goto out; exp2 = rqst_exp_get_by_name(rqstp, &path); if (IS_ERR(exp2)) { err = PTR_ERR(exp2); /* * We normally allow NFS clients to continue * "underneath" a mountpoint that is not exported. * The exception is V4ROOT, where no traversal is ever * allowed without an explicit export of the new * directory. */ if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT)) err = 0; path_put(&path); goto out; } if (nfsd_v4client(rqstp) || (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { /* successfully crossed mount point */ /* * This is subtle: path.dentry is *not* on path.mnt * at this point. The only reason we are safe is that * original mnt is pinned down by exp, so we should * put path *before* putting exp */ *dpp = path.dentry; path.dentry = dentry; *expp = exp2; exp2 = exp; } path_put(&path); exp_put(exp2); out: return err; } static void follow_to_parent(struct path *path) { struct dentry *dp; while (path->dentry == path->mnt->mnt_root && follow_up(path)) ; dp = dget_parent(path->dentry); dput(path->dentry); path->dentry = dp; } static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp) { struct svc_export *exp2; struct path path = {.mnt = mntget((*exp)->ex_path.mnt), .dentry = dget(dparent)}; follow_to_parent(&path); exp2 = rqst_exp_parent(rqstp, &path); if (PTR_ERR(exp2) == -ENOENT) { *dentryp = dget(dparent); } else if (IS_ERR(exp2)) { path_put(&path); return PTR_ERR(exp2); } else { *dentryp = dget(path.dentry); exp_put(*exp); *exp = exp2; } path_put(&path); return 0; } /* * For nfsd purposes, we treat V4ROOT exports as though there was an * export at *every* directory. */ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) { if (d_mountpoint(dentry)) return 1; if (nfsd4_is_junction(dentry)) return 1; if (!(exp->ex_flags & NFSEXP_V4ROOT)) return 0; return dentry->d_inode != NULL; } __be32 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, unsigned int len, struct svc_export **exp_ret, struct dentry **dentry_ret) { struct svc_export *exp; struct dentry *dparent; struct dentry *dentry; int host_err; dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name); dparent = fhp->fh_dentry; exp = fhp->fh_export; exp_get(exp); /* Lookup the name, but don't follow links */ if (isdotent(name, len)) { if (len==1) dentry = dget(dparent); else if (dparent != exp->ex_path.dentry) dentry = dget_parent(dparent); else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp)) dentry = dget(dparent); /* .. == . just like at / */ else { /* checking mountpoint crossing is very different when stepping up */ host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry); if (host_err) goto out_nfserr; } } else { fh_lock(fhp); dentry = lookup_one_len(name, dparent, len); host_err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_nfserr; /* * check if we have crossed a mount point ... */ if (nfsd_mountpoint(dentry, exp)) { if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) { dput(dentry); goto out_nfserr; } } } *dentry_ret = dentry; *exp_ret = exp; return 0; out_nfserr: exp_put(exp); return nfserrno(host_err); } /* * Look up one component of a pathname. * N.B. After this call _both_ fhp and resfh need an fh_put * * If the lookup would cross a mountpoint, and the mounted filesystem * is exported to the client with NFSEXP_NOHIDE, then the lookup is * accepted as it stands and the mounted directory is * returned. Otherwise the covered directory is returned. * NOTE: this mountpoint crossing is not supported properly by all * clients and is explicitly disallowed for NFSv3 * NeilBrown <neilb@cse.unsw.edu.au> */ __be32 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, unsigned int len, struct svc_fh *resfh) { struct svc_export *exp; struct dentry *dentry; __be32 err; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); if (err) return err; err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); if (err) return err; err = check_nfsd_access(exp, rqstp); if (err) goto out; /* * Note: we compose the file handle now, but as the * dentry may be negative, it may need to be updated. */ err = fh_compose(resfh, exp, dentry, fhp); if (!err && !dentry->d_inode) err = nfserr_noent; out: dput(dentry); exp_put(exp); return err; } static int nfsd_break_lease(struct inode *inode) { if (!S_ISREG(inode->i_mode)) return 0; return break_lease(inode, O_WRONLY | O_NONBLOCK); } /* * Commit metadata changes to stable storage. */ static int commit_metadata(struct svc_fh *fhp) { struct inode *inode = fhp->fh_dentry->d_inode; const struct export_operations *export_ops = inode->i_sb->s_export_op; if (!EX_ISSYNC(fhp->fh_export)) return 0; if (export_ops->commit_metadata) return export_ops->commit_metadata(inode); return sync_inode_metadata(inode, 1); } /* * Set various file attributes. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, int check_guard, time_t guardtime) { struct dentry *dentry; struct inode *inode; int accmode = NFSD_MAY_SATTR; umode_t ftype = 0; __be32 err; int host_err; int size_change = 0; if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; if (iap->ia_valid & ATTR_SIZE) ftype = S_IFREG; /* Get inode */ err = fh_verify(rqstp, fhp, ftype, accmode); if (err) goto out; dentry = fhp->fh_dentry; inode = dentry->d_inode; /* Ignore any mode updates on symlinks */ if (S_ISLNK(inode->i_mode)) iap->ia_valid &= ~ATTR_MODE; if (!iap->ia_valid) goto out; /* * NFSv2 does not differentiate between "set-[ac]time-to-now" * which only requires access, and "set-[ac]time-to-X" which * requires ownership. * So if it looks like it might be "set both to the same time which * is close to now", and if inode_change_ok fails, then we * convert to "set to now" instead of "set to explicit time" * * We only call inode_change_ok as the last test as technically * it is not an interface that we should be using. It is only * valid if the filesystem does not define it's own i_op->setattr. */ #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET) #define MAX_TOUCH_TIME_ERROR (30*60) if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET && iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) { /* * Looks probable. * * Now just make sure time is in the right ballpark. * Solaris, at least, doesn't seem to care what the time * request is. We require it be within 30 minutes of now. */ time_t delta = iap->ia_atime.tv_sec - get_seconds(); if (delta < 0) delta = -delta; if (delta < MAX_TOUCH_TIME_ERROR && inode_change_ok(inode, iap) != 0) { /* * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME. * This will cause notify_change to set these times * to "now" */ iap->ia_valid &= ~BOTH_TIME_SET; } } /* * The size case is special. * It changes the file as well as the attributes. */ if (iap->ia_valid & ATTR_SIZE) { if (iap->ia_size < inode->i_size) { err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE); if (err) goto out; } host_err = get_write_access(inode); if (host_err) goto out_nfserr; size_change = 1; host_err = locks_verify_truncate(inode, NULL, iap->ia_size); if (host_err) { put_write_access(inode); goto out_nfserr; } } /* sanitize the mode change */ if (iap->ia_valid & ATTR_MODE) { iap->ia_mode &= S_IALLUGO; iap->ia_mode |= (inode->i_mode & ~S_IALLUGO); } /* Revoke setuid/setgid on chown */ if (!S_ISDIR(inode->i_mode) && (((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) || ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid))) { iap->ia_valid |= ATTR_KILL_PRIV; if (iap->ia_valid & ATTR_MODE) { /* we're setting mode too, just clear the s*id bits */ iap->ia_mode &= ~S_ISUID; if (iap->ia_mode & S_IXGRP) iap->ia_mode &= ~S_ISGID; } else { /* set ATTR_KILL_* bits and let VFS handle it */ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID); } } /* Change the attributes. */ iap->ia_valid |= ATTR_CTIME; err = nfserr_notsync; if (!check_guard || guardtime == inode->i_ctime.tv_sec) { host_err = nfsd_break_lease(inode); if (host_err) goto out_nfserr; fh_lock(fhp); host_err = notify_change(dentry, iap); err = nfserrno(host_err); fh_unlock(fhp); } if (size_change) put_write_access(inode); if (!err) commit_metadata(fhp); out: return err; out_nfserr: err = nfserrno(host_err); goto out; } #if defined(CONFIG_NFSD_V2_ACL) || \ defined(CONFIG_NFSD_V3_ACL) || \ defined(CONFIG_NFSD_V4) static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf) { ssize_t buflen; ssize_t ret; buflen = vfs_getxattr(dentry, key, NULL, 0); if (buflen <= 0) return buflen; *buf = kmalloc(buflen, GFP_KERNEL); if (!*buf) return -ENOMEM; ret = vfs_getxattr(dentry, key, *buf, buflen); if (ret < 0) kfree(*buf); return ret; } #endif #if defined(CONFIG_NFSD_V4) static int set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key) { int len; size_t buflen; char *buf = NULL; int error = 0; buflen = posix_acl_xattr_size(pacl->a_count); buf = kmalloc(buflen, GFP_KERNEL); error = -ENOMEM; if (buf == NULL) goto out; len = posix_acl_to_xattr(pacl, buf, buflen); if (len < 0) { error = len; goto out; } error = vfs_setxattr(dentry, key, buf, len, 0); out: kfree(buf); return error; } __be32 nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_acl *acl) { __be32 error; int host_error; struct dentry *dentry; struct inode *inode; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; /* Get inode */ error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR); if (error) return error; dentry = fhp->fh_dentry; inode = dentry->d_inode; if (S_ISDIR(inode->i_mode)) flags = NFS4_ACL_DIR; host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags); if (host_error == -EINVAL) { return nfserr_attrnotsupp; } else if (host_error < 0) goto out_nfserr; host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS); if (host_error < 0) goto out_release; if (S_ISDIR(inode->i_mode)) host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT); out_release: posix_acl_release(pacl); posix_acl_release(dpacl); out_nfserr: if (host_error == -EOPNOTSUPP) return nfserr_attrnotsupp; else return nfserrno(host_error); } static struct posix_acl * _get_posix_acl(struct dentry *dentry, char *key) { void *buf = NULL; struct posix_acl *pacl = NULL; int buflen; buflen = nfsd_getxattr(dentry, key, &buf); if (!buflen) buflen = -ENODATA; if (buflen <= 0) return ERR_PTR(buflen); pacl = posix_acl_from_xattr(buf, buflen); kfree(buf); return pacl; } int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl) { struct inode *inode = dentry->d_inode; int error = 0; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS); if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA) pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(pacl)) { error = PTR_ERR(pacl); pacl = NULL; goto out; } if (S_ISDIR(inode->i_mode)) { dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT); if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA) dpacl = NULL; else if (IS_ERR(dpacl)) { error = PTR_ERR(dpacl); dpacl = NULL; goto out; } flags = NFS4_ACL_DIR; } *acl = nfs4_acl_posix_to_nfsv4(pacl, dpacl, flags); if (IS_ERR(*acl)) { error = PTR_ERR(*acl); *acl = NULL; } out: posix_acl_release(pacl); posix_acl_release(dpacl); return error; } /* * NFS junction information is stored in an extended attribute. */ #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs" /** * nfsd4_is_junction - Test if an object could be an NFS junction * * @dentry: object to test * * Returns 1 if "dentry" appears to contain NFS junction information. * Otherwise 0 is returned. */ int nfsd4_is_junction(struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (inode == NULL) return 0; if (inode->i_mode & S_IXUGO) return 0; if (!(inode->i_mode & S_ISVTX)) return 0; if (vfs_getxattr(dentry, NFSD_JUNCTION_XATTR_NAME, NULL, 0) <= 0) return 0; return 1; } #endif /* defined(CONFIG_NFSD_V4) */ #ifdef CONFIG_NFSD_V3 /* * Check server access rights to a file system object */ struct accessmap { u32 access; int how; }; static struct accessmap nfs3_regaccess[] = { { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, { 0, 0 } }; static struct accessmap nfs3_diraccess[] = { { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC}, { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, { 0, 0 } }; static struct accessmap nfs3_anyaccess[] = { /* Some clients - Solaris 2.6 at least, make an access call * to the server to check for access for things like /dev/null * (which really, the server doesn't care about). So * We provide simple access checking for them, looking * mainly at mode bits, and we make sure to ignore read-only * filesystem checks */ { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, { 0, 0 } }; __be32 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported) { struct accessmap *map; struct svc_export *export; struct dentry *dentry; u32 query, result = 0, sresult = 0; __be32 error; error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); if (error) goto out; export = fhp->fh_export; dentry = fhp->fh_dentry; if (S_ISREG(dentry->d_inode->i_mode)) map = nfs3_regaccess; else if (S_ISDIR(dentry->d_inode->i_mode)) map = nfs3_diraccess; else map = nfs3_anyaccess; query = *access; for (; map->access; map++) { if (map->access & query) { __be32 err2; sresult |= map->access; err2 = nfsd_permission(rqstp, export, dentry, map->how); switch (err2) { case nfs_ok: result |= map->access; break; /* the following error codes just mean the access was not allowed, * rather than an error occurred */ case nfserr_rofs: case nfserr_acces: case nfserr_perm: /* simply don't "or" in the access bit. */ break; default: error = err2; goto out; } } } *access = result; if (supported) *supported = sresult; out: return error; } #endif /* CONFIG_NFSD_V3 */ static int nfsd_open_break_lease(struct inode *inode, int access) { unsigned int mode; if (access & NFSD_MAY_NOT_BREAK_LEASE) return 0; mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; return break_lease(inode, mode | O_NONBLOCK); } /* * Open an existing file or directory. * The may_flags argument indicates the type of open (read/write/lock) * and additional flags. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) { struct dentry *dentry; struct inode *inode; int flags = O_RDONLY|O_LARGEFILE; __be32 err; int host_err = 0; validate_process_creds(); /* * If we get here, then the client has already done an "open", * and (hopefully) checked permission - so allow OWNER_OVERRIDE * in case a chmod has now revoked permission. */ err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE); if (err) goto out; dentry = fhp->fh_dentry; inode = dentry->d_inode; /* Disallow write access to files with the append-only bit set * or any access when mandatory locking enabled */ err = nfserr_perm; if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) goto out; /* * We must ignore files (but only files) which might have mandatory * locks on them because there is no way to know if the accesser has * the lock. */ if (S_ISREG((inode)->i_mode) && mandatory_lock(inode)) goto out; if (!inode->i_fop) goto out; host_err = nfsd_open_break_lease(inode, may_flags); if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; if (may_flags & NFSD_MAY_WRITE) { if (may_flags & NFSD_MAY_READ) flags = O_RDWR|O_LARGEFILE; else flags = O_WRONLY|O_LARGEFILE; } *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); else { host_err = ima_file_check(*filp, may_flags); if (may_flags & NFSD_MAY_64BIT_COOKIE) (*filp)->f_mode |= FMODE_64BITHASH; else (*filp)->f_mode |= FMODE_32BITHASH; } out_nfserr: err = nfserrno(host_err); out: validate_process_creds(); return err; } /* * Close a file. */ void nfsd_close(struct file *filp) { fput(filp); } /* * Obtain the readahead parameters for the file * specified by (dev, ino). */ static inline struct raparms * nfsd_get_raparms(dev_t dev, ino_t ino) { struct raparms *ra, **rap, **frap = NULL; int depth = 0; unsigned int hash; struct raparm_hbucket *rab; hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK; rab = &raparm_hash[hash]; spin_lock(&rab->pb_lock); for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) { if (ra->p_ino == ino && ra->p_dev == dev) goto found; depth++; if (ra->p_count == 0) frap = rap; } depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; } rap = frap; ra = *frap; ra->p_dev = dev; ra->p_ino = ino; ra->p_set = 0; ra->p_hindex = hash; found: if (rap != &rab->pb_head) { *rap = ra->p_next; ra->p_next = rab->pb_head; rab->pb_head = ra; } ra->p_count++; nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++; spin_unlock(&rab->pb_lock); return ra; } /* * Grab and keep cached pages associated with a file in the svc_rqst * so that they can be passed to the network sendmsg/sendpage routines * directly. They will be released after the sending has completed. */ static int nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct svc_rqst *rqstp = sd->u.data; struct page **pp = rqstp->rq_respages + rqstp->rq_resused; struct page *page = buf->page; size_t size; size = sd->len; if (rqstp->rq_res.page_len == 0) { get_page(page); put_page(*pp); *pp = page; rqstp->rq_resused++; rqstp->rq_res.page_base = buf->offset; rqstp->rq_res.page_len = size; } else if (page != pp[-1]) { get_page(page); if (*pp) put_page(*pp); *pp = page; rqstp->rq_resused++; rqstp->rq_res.page_len += size; } else rqstp->rq_res.page_len += size; return size; } static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { return __splice_from_pipe(pipe, sd, nfsd_splice_actor); } static __be32 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { mm_segment_t oldfs; __be32 err; int host_err; err = nfserr_perm; if (file->f_op->splice_read && rqstp->rq_splice_ok) { struct splice_desc sd = { .len = 0, .total_len = *count, .pos = offset, .u.data = rqstp, }; rqstp->rq_resused = 1; host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor); } else { oldfs = get_fs(); set_fs(KERNEL_DS); host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); set_fs(oldfs); } if (host_err >= 0) { nfsdstats.io_read += host_err; *count = host_err; err = 0; fsnotify_access(file); } else err = nfserrno(host_err); return err; } static void kill_suid(struct dentry *dentry) { struct iattr ia; ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; mutex_lock(&dentry->d_inode->i_mutex); notify_change(dentry, &ia); mutex_unlock(&dentry->d_inode->i_mutex); } /* * Gathered writes: If another process is currently writing to the file, * there's a high chance this is another nfsd (triggered by a bulk write * from a client's biod). Rather than syncing the file with each write * request, we sleep for 10 msec. * * I don't know if this roughly approximates C. Juszak's idea of * gathered writes, but it's a nice and simple solution (IMHO), and it * seems to work:-) * * Note: we do this only in the NFSv2 case, since v3 and higher have a * better tool (separate unstable writes and commits) for solving this * problem. */ static int wait_for_concurrent_writes(struct file *file) { struct inode *inode = file->f_path.dentry->d_inode; static ino_t last_ino; static dev_t last_dev; int err = 0; if (atomic_read(&inode->i_writecount) > 1 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { dprintk("nfsd: write defer %d\n", task_pid_nr(current)); msleep(10); dprintk("nfsd: write resume %d\n", task_pid_nr(current)); } if (inode->i_state & I_DIRTY) { dprintk("nfsd: write sync %d\n", task_pid_nr(current)); err = vfs_fsync(file, 0); } last_ino = inode->i_ino; last_dev = inode->i_sb->s_dev; return err; } static __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, int *stablep) { struct svc_export *exp; struct dentry *dentry; struct inode *inode; mm_segment_t oldfs; __be32 err = 0; int host_err; int stable = *stablep; int use_wgather; dentry = file->f_path.dentry; inode = dentry->d_inode; exp = fhp->fh_export; /* * Request sync writes if * - the sync export option has been set, or * - the client requested O_SYNC behavior (NFSv3 feature). * - The file system doesn't support fsync(). * When NFSv2 gathered writes have been configured for this volume, * flushing the data to disk is handled separately below. */ use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp); if (!file->f_op->fsync) {/* COMMIT3 cannot work */ stable = 2; *stablep = 2; /* FILE_SYNC */ } if (!EX_ISSYNC(exp)) stable = 0; if (stable && !use_wgather) { spin_lock(&file->f_lock); file->f_flags |= O_SYNC; spin_unlock(&file->f_lock); } /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); set_fs(oldfs); if (host_err < 0) goto out_nfserr; *cnt = host_err; nfsdstats.io_write += host_err; fsnotify_modify(file); /* clear setuid/setgid flag after write */ if (inode->i_mode & (S_ISUID | S_ISGID)) kill_suid(dentry); if (stable && use_wgather) host_err = wait_for_concurrent_writes(file); out_nfserr: dprintk("nfsd: write complete host_err=%d\n", host_err); if (host_err >= 0) err = 0; else err = nfserrno(host_err); return err; } /* * Read data from a file. count must contain the requested read count * on entry. On return, *count contains the number of bytes actually read. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { struct file *file; struct inode *inode; struct raparms *ra; __be32 err; err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); if (err) return err; inode = file->f_path.dentry->d_inode; /* Get readahead parameters */ ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino); if (ra && ra->p_set) file->f_ra = ra->p_ra; err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count); /* Write back readahead params */ if (ra) { struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex]; spin_lock(&rab->pb_lock); ra->p_ra = file->f_ra; ra->p_set = 1; ra->p_count--; spin_unlock(&rab->pb_lock); } nfsd_close(file); return err; } /* As above, but use the provided file descriptor. */ __be32 nfsd_read_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { __be32 err; if (file) { err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE); if (err) goto out; err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count); } else /* Note file may still be NULL in NFSv4 special stateid case: */ err = nfsd_read(rqstp, fhp, offset, vec, vlen, count); out: return err; } /* * Write data to a file. * The stable flag requests synchronous writes. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, int *stablep) { __be32 err = 0; if (file) { err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE); if (err) goto out; err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stablep); } else { err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); if (err) goto out; if (cnt) err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stablep); nfsd_close(file); } out: return err; } #ifdef CONFIG_NFSD_V3 /* * Commit all pending writes to stable storage. * * Note: we only guarantee that data that lies within the range specified * by the 'offset' and 'count' parameters will be synced. * * Unfortunately we cannot lock the file to make sure we return full WCC * data to the client, as locking happens lower down in the filesystem. */ __be32 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, unsigned long count) { struct file *file; loff_t end = LLONG_MAX; __be32 err = nfserr_inval; if (offset < 0) goto out; if (count != 0) { end = offset + (loff_t)count - 1; if (end < offset) goto out; } err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file); if (err) goto out; if (EX_ISSYNC(fhp->fh_export)) { int err2 = vfs_fsync_range(file, offset, end, 0); if (err2 != -EINVAL) err = nfserrno(err2); else err = nfserr_notsupp; } nfsd_close(file); out: return err; } #endif /* CONFIG_NFSD_V3 */ static __be32 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp, struct iattr *iap) { /* * Mode has already been set earlier in create: */ iap->ia_valid &= ~ATTR_MODE; /* * Setting uid/gid works only for root. Irix appears to * send along the gid on create when it tries to implement * setgid directories via NFS: */ if (current_fsuid() != 0) iap->ia_valid &= ~(ATTR_UID|ATTR_GID); if (iap->ia_valid) return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); return 0; } /* HPUX client sometimes creates a file in mode 000, and sets size to 0. * setting size to 0 may fail for some specific file systems by the permission * checking which requires WRITE permission but the mode is 000. * we ignore the resizing(to 0) on the just new created file, since the size is * 0 after file created. * * call this only after vfs_create() is called. * */ static void nfsd_check_ignore_resizing(struct iattr *iap) { if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0)) iap->ia_valid &= ~ATTR_SIZE; } /* * Create a file (regular, directory, device, fifo); UNIX sockets * not yet implemented. * If the response fh has been verified, the parent directory should * already be locked. Note that the parent directory is left locked. * * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp */ __be32 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, int type, dev_t rdev, struct svc_fh *resfhp) { struct dentry *dentry, *dchild = NULL; struct inode *dirp; __be32 err; __be32 err2; int host_err; err = nfserr_perm; if (!flen) goto out; err = nfserr_exist; if (isdotent(fname, flen)) goto out; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; dentry = fhp->fh_dentry; dirp = dentry->d_inode; err = nfserr_notdir; if (!dirp->i_op->lookup) goto out; /* * Check whether the response file handle has been verified yet. * If it has, the parent directory should already be locked. */ if (!resfhp->fh_dentry) { /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */ fh_lock_nested(fhp, I_MUTEX_PARENT); dchild = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dchild); if (IS_ERR(dchild)) goto out_nfserr; err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); if (err) goto out; } else { /* called from nfsd_proc_create */ dchild = dget(resfhp->fh_dentry); if (!fhp->fh_locked) { /* not actually possible */ printk(KERN_ERR "nfsd_create: parent %s/%s not locked!\n", dentry->d_parent->d_name.name, dentry->d_name.name); err = nfserr_io; goto out; } } /* * Make sure the child dentry is still negative ... */ err = nfserr_exist; if (dchild->d_inode) { dprintk("nfsd_create: dentry %s/%s not negative!\n", dentry->d_name.name, dchild->d_name.name); goto out; } if (!(iap->ia_valid & ATTR_MODE)) iap->ia_mode = 0; iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; err = nfserr_inval; if (!S_ISREG(type) && !S_ISDIR(type) && !special_file(type)) { printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n", type); goto out; } host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; /* * Get the dir op function pointer. */ err = 0; switch (type) { case S_IFREG: host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); if (!host_err) nfsd_check_ignore_resizing(iap); break; case S_IFDIR: host_err = vfs_mkdir(dirp, dchild, iap->ia_mode); break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); break; } if (host_err < 0) { fh_drop_write(fhp); goto out_nfserr; } err = nfsd_create_setattr(rqstp, resfhp, iap); /* * nfsd_setattr already committed the child. Transactional filesystems * had a chance to commit changes for both parent and child * simultaneously making the following commit_metadata a noop. */ err2 = nfserrno(commit_metadata(fhp)); if (err2) err = err2; fh_drop_write(fhp); /* * Update the file handle to get the new inode info. */ if (!err) err = fh_update(resfhp); out: if (dchild && !IS_ERR(dchild)) dput(dchild); return err; out_nfserr: err = nfserrno(host_err); goto out; } #ifdef CONFIG_NFSD_V3 static inline int nfsd_create_is_exclusive(int createmode) { return createmode == NFS3_CREATE_EXCLUSIVE || createmode == NFS4_CREATE_EXCLUSIVE4_1; } /* * NFSv3 and NFSv4 version of nfsd_create */ __be32 do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, struct svc_fh *resfhp, int createmode, u32 *verifier, bool *truncp, bool *created) { struct dentry *dentry, *dchild = NULL; struct inode *dirp; __be32 err; int host_err; __u32 v_mtime=0, v_atime=0; err = nfserr_perm; if (!flen) goto out; err = nfserr_exist; if (isdotent(fname, flen)) goto out; if (!(iap->ia_valid & ATTR_MODE)) iap->ia_mode = 0; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); if (err) goto out; dentry = fhp->fh_dentry; dirp = dentry->d_inode; /* Get all the sanity checks out of the way before * we lock the parent. */ err = nfserr_notdir; if (!dirp->i_op->lookup) goto out; fh_lock_nested(fhp, I_MUTEX_PARENT); /* * Compose the response file handle. */ dchild = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dchild); if (IS_ERR(dchild)) goto out_nfserr; /* If file doesn't exist, check for permissions to create one */ if (!dchild->d_inode) { err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; } err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); if (err) goto out; if (nfsd_create_is_exclusive(createmode)) { /* solaris7 gets confused (bugid 4218508) if these have * the high bit set, so just clear the high bits. If this is * ever changed to use different attrs for storing the * verifier, then do_open_lookup() will also need to be fixed * accordingly. */ v_mtime = verifier[0]&0x7fffffff; v_atime = verifier[1]&0x7fffffff; } host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; if (dchild->d_inode) { err = 0; switch (createmode) { case NFS3_CREATE_UNCHECKED: if (! S_ISREG(dchild->d_inode->i_mode)) goto out; else if (truncp) { /* in nfsv4, we need to treat this case a little * differently. we don't want to truncate the * file now; this would be wrong if the OPEN * fails for some other reason. furthermore, * if the size is nonzero, we should ignore it * according to spec! */ *truncp = (iap->ia_valid & ATTR_SIZE) && !iap->ia_size; } else { iap->ia_valid &= ATTR_SIZE; goto set_attr; } break; case NFS3_CREATE_EXCLUSIVE: if ( dchild->d_inode->i_mtime.tv_sec == v_mtime && dchild->d_inode->i_atime.tv_sec == v_atime && dchild->d_inode->i_size == 0 ) break; case NFS4_CREATE_EXCLUSIVE4_1: if ( dchild->d_inode->i_mtime.tv_sec == v_mtime && dchild->d_inode->i_atime.tv_sec == v_atime && dchild->d_inode->i_size == 0 ) goto set_attr; /* fallthru */ case NFS3_CREATE_GUARDED: err = nfserr_exist; } fh_drop_write(fhp); goto out; } host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); if (host_err < 0) { fh_drop_write(fhp); goto out_nfserr; } if (created) *created = 1; nfsd_check_ignore_resizing(iap); if (nfsd_create_is_exclusive(createmode)) { /* Cram the verifier into atime/mtime */ iap->ia_valid = ATTR_MTIME|ATTR_ATIME | ATTR_MTIME_SET|ATTR_ATIME_SET; /* XXX someone who knows this better please fix it for nsec */ iap->ia_mtime.tv_sec = v_mtime; iap->ia_atime.tv_sec = v_atime; iap->ia_mtime.tv_nsec = 0; iap->ia_atime.tv_nsec = 0; } set_attr: err = nfsd_create_setattr(rqstp, resfhp, iap); /* * nfsd_setattr already committed the child (and possibly also the parent). */ if (!err) err = nfserrno(commit_metadata(fhp)); fh_drop_write(fhp); /* * Update the filehandle to get the new inode info. */ if (!err) err = fh_update(resfhp); out: fh_unlock(fhp); if (dchild && !IS_ERR(dchild)) dput(dchild); return err; out_nfserr: err = nfserrno(host_err); goto out; } #endif /* CONFIG_NFSD_V3 */ /* * Read a symlink. On entry, *lenp must contain the maximum path length that * fits into the buffer. On return, it contains the true length. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) { struct inode *inode; mm_segment_t oldfs; __be32 err; int host_err; struct path path; err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); if (err) goto out; path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; inode = path.dentry->d_inode; err = nfserr_inval; if (!inode->i_op->readlink) goto out; touch_atime(&path); /* N.B. Why does this call need a get_fs()?? * Remove the set_fs and watch the fireworks:-) --okir */ oldfs = get_fs(); set_fs(KERNEL_DS); host_err = inode->i_op->readlink(path.dentry, buf, *lenp); set_fs(oldfs); if (host_err < 0) goto out_nfserr; *lenp = host_err; err = 0; out: return err; out_nfserr: err = nfserrno(host_err); goto out; } /* * Create a symlink and look up its inode * N.B. After this call _both_ fhp and resfhp need an fh_put */ __be32 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, char *path, int plen, struct svc_fh *resfhp, struct iattr *iap) { struct dentry *dentry, *dnew; __be32 err, cerr; int host_err; err = nfserr_noent; if (!flen || !plen) goto out; err = nfserr_exist; if (isdotent(fname, flen)) goto out; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; fh_lock(fhp); dentry = fhp->fh_dentry; dnew = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dnew); if (IS_ERR(dnew)) goto out_nfserr; host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; if (unlikely(path[plen] != 0)) { char *path_alloced = kmalloc(plen+1, GFP_KERNEL); if (path_alloced == NULL) host_err = -ENOMEM; else { strncpy(path_alloced, path, plen); path_alloced[plen] = 0; host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced); kfree(path_alloced); } } else host_err = vfs_symlink(dentry->d_inode, dnew, path); err = nfserrno(host_err); if (!err) err = nfserrno(commit_metadata(fhp)); fh_unlock(fhp); fh_drop_write(fhp); cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp); dput(dnew); if (err==0) err = cerr; out: return err; out_nfserr: err = nfserrno(host_err); goto out; } /* * Create a hardlink * N.B. After this call _both_ ffhp and tfhp need an fh_put */ __be32 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *name, int len, struct svc_fh *tfhp) { struct dentry *ddir, *dnew, *dold; struct inode *dirp; __be32 err; int host_err; err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP); if (err) goto out; err = nfserr_isdir; if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode)) goto out; err = nfserr_perm; if (!len) goto out; err = nfserr_exist; if (isdotent(name, len)) goto out; fh_lock_nested(ffhp, I_MUTEX_PARENT); ddir = ffhp->fh_dentry; dirp = ddir->d_inode; dnew = lookup_one_len(name, ddir, len); host_err = PTR_ERR(dnew); if (IS_ERR(dnew)) goto out_nfserr; dold = tfhp->fh_dentry; host_err = fh_want_write(tfhp); if (host_err) { err = nfserrno(host_err); goto out_dput; } err = nfserr_noent; if (!dold->d_inode) goto out_drop_write; host_err = nfsd_break_lease(dold->d_inode); if (host_err) { err = nfserrno(host_err); goto out_drop_write; } host_err = vfs_link(dold, dirp, dnew); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); if (!err) err = nfserrno(commit_metadata(tfhp)); } else { if (host_err == -EXDEV && rqstp->rq_vers == 2) err = nfserr_acces; else err = nfserrno(host_err); } out_drop_write: fh_drop_write(tfhp); out_dput: dput(dnew); out_unlock: fh_unlock(ffhp); out: return err; out_nfserr: err = nfserrno(host_err); goto out_unlock; } /* * Rename a file * N.B. After this call _both_ ffhp and tfhp need an fh_put */ __be32 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, struct svc_fh *tfhp, char *tname, int tlen) { struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap; struct inode *fdir, *tdir; __be32 err; int host_err; err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE); if (err) goto out; err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; fdentry = ffhp->fh_dentry; fdir = fdentry->d_inode; tdentry = tfhp->fh_dentry; tdir = tdentry->d_inode; err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev; if (ffhp->fh_export != tfhp->fh_export) goto out; err = nfserr_perm; if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) goto out; /* cannot use fh_lock as we need deadlock protective ordering * so do it by hand */ trap = lock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = 1; fill_pre_wcc(ffhp); fill_pre_wcc(tfhp); odentry = lookup_one_len(fname, fdentry, flen); host_err = PTR_ERR(odentry); if (IS_ERR(odentry)) goto out_nfserr; host_err = -ENOENT; if (!odentry->d_inode) goto out_dput_old; host_err = -EINVAL; if (odentry == trap) goto out_dput_old; ndentry = lookup_one_len(tname, tdentry, tlen); host_err = PTR_ERR(ndentry); if (IS_ERR(ndentry)) goto out_dput_old; host_err = -ENOTEMPTY; if (ndentry == trap) goto out_dput_new; host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; host_err = fh_want_write(ffhp); if (host_err) goto out_dput_new; host_err = nfsd_break_lease(odentry->d_inode); if (host_err) goto out_drop_write; if (ndentry->d_inode) { host_err = nfsd_break_lease(ndentry->d_inode); if (host_err) goto out_drop_write; } host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { host_err = commit_metadata(tfhp); if (!host_err) host_err = commit_metadata(ffhp); } out_drop_write: fh_drop_write(ffhp); out_dput_new: dput(ndentry); out_dput_old: dput(odentry); out_nfserr: err = nfserrno(host_err); /* we cannot reply on fh_unlock on the two filehandles, * as that would do the wrong thing if the two directories * were the same, so again we do it by hand */ fill_post_wcc(ffhp); fill_post_wcc(tfhp); unlock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = 0; out: return err; } /* * Unlink a file or directory * N.B. After this call fhp needs an fh_put */ __be32 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, char *fname, int flen) { struct dentry *dentry, *rdentry; struct inode *dirp; __be32 err; int host_err; err = nfserr_acces; if (!flen || isdotent(fname, flen)) goto out; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE); if (err) goto out; fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = fhp->fh_dentry; dirp = dentry->d_inode; rdentry = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(rdentry); if (IS_ERR(rdentry)) goto out_nfserr; if (!rdentry->d_inode) { dput(rdentry); err = nfserr_noent; goto out; } if (!type) type = rdentry->d_inode->i_mode & S_IFMT; host_err = fh_want_write(fhp); if (host_err) goto out_put; host_err = nfsd_break_lease(rdentry->d_inode); if (host_err) goto out_drop_write; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); if (!host_err) host_err = commit_metadata(fhp); out_drop_write: fh_drop_write(fhp); out_put: dput(rdentry); out_nfserr: err = nfserrno(host_err); out: return err; } /* * We do this buffering because we must not call back into the file * system's ->lookup() method from the filldir callback. That may well * deadlock a number of file systems. * * This is based heavily on the implementation of same in XFS. */ struct buffered_dirent { u64 ino; loff_t offset; int namlen; unsigned int d_type; char name[]; }; struct readdir_data { char *dirent; size_t used; int full; }; static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_data *buf = __buf; struct buffered_dirent *de = (void *)(buf->dirent + buf->used); unsigned int reclen; reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64)); if (buf->used + reclen > PAGE_SIZE) { buf->full = 1; return -EINVAL; } de->namlen = namlen; de->offset = offset; de->ino = ino; de->d_type = d_type; memcpy(de->name, name, namlen); buf->used += reclen; return 0; } static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func, struct readdir_cd *cdp, loff_t *offsetp) { struct readdir_data buf; struct buffered_dirent *de; int host_err; int size; loff_t offset; buf.dirent = (void *)__get_free_page(GFP_KERNEL); if (!buf.dirent) return nfserrno(-ENOMEM); offset = *offsetp; while (1) { struct inode *dir_inode = file->f_path.dentry->d_inode; unsigned int reclen; cdp->err = nfserr_eof; /* will be cleared on successful read */ buf.used = 0; buf.full = 0; host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf); if (buf.full) host_err = 0; if (host_err < 0) break; size = buf.used; if (!size) break; /* * Various filldir functions may end up calling back into * lookup_one_len() and the file system's ->lookup() method. * These expect i_mutex to be held, as it would within readdir. */ host_err = mutex_lock_killable(&dir_inode->i_mutex); if (host_err) break; de = (struct buffered_dirent *)buf.dirent; while (size > 0) { offset = de->offset; if (func(cdp, de->name, de->namlen, de->offset, de->ino, de->d_type)) break; if (cdp->err != nfs_ok) break; reclen = ALIGN(sizeof(*de) + de->namlen, sizeof(u64)); size -= reclen; de = (struct buffered_dirent *)((char *)de + reclen); } mutex_unlock(&dir_inode->i_mutex); if (size > 0) /* We bailed out early */ break; offset = vfs_llseek(file, 0, SEEK_CUR); } free_page((unsigned long)(buf.dirent)); if (host_err) return nfserrno(host_err); *offsetp = offset; return cdp->err; } /* * Read entries from a directory. * The NFSv3/4 verifier we ignore for now. */ __be32 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, struct readdir_cd *cdp, filldir_t func) { __be32 err; struct file *file; loff_t offset = *offsetp; int may_flags = NFSD_MAY_READ; /* NFSv2 only supports 32 bit cookies */ if (rqstp->rq_vers > 2) may_flags |= NFSD_MAY_64BIT_COOKIE; err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); if (err) goto out; offset = vfs_llseek(file, offset, 0); if (offset < 0) { err = nfserrno((int)offset); goto out_close; } err = nfsd_buffered_readdir(file, func, cdp, offsetp); if (err == nfserr_eof || err == nfserr_toosmall) err = nfs_ok; /* can still be found in ->err */ out_close: nfsd_close(file); out: return err; } /* * Get file system stats * N.B. After this call fhp needs an fh_put */ __be32 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) { __be32 err; err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); if (!err) { struct path path = { .mnt = fhp->fh_export->ex_path.mnt, .dentry = fhp->fh_dentry, }; if (vfs_statfs(&path, stat)) err = nfserr_io; } return err; } static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp) { return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY; } /* * Check for a user's access permissions to this inode. */ __be32 nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, struct dentry *dentry, int acc) { struct inode *inode = dentry->d_inode; int err; if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) return 0; #if 0 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n", acc, (acc & NFSD_MAY_READ)? " read" : "", (acc & NFSD_MAY_WRITE)? " write" : "", (acc & NFSD_MAY_EXEC)? " exec" : "", (acc & NFSD_MAY_SATTR)? " sattr" : "", (acc & NFSD_MAY_TRUNC)? " trunc" : "", (acc & NFSD_MAY_LOCK)? " lock" : "", (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", inode->i_mode, IS_IMMUTABLE(inode)? " immut" : "", IS_APPEND(inode)? " append" : "", __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); dprintk(" owner %d/%d user %d/%d\n", inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); #endif /* Normally we reject any write/sattr etc access on a read-only file * system. But if it is IRIX doing check on write-access for a * device special file, we ignore rofs. */ if (!(acc & NFSD_MAY_LOCAL_ACCESS)) if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) { if (exp_rdonly(rqstp, exp) || __mnt_is_readonly(exp->ex_path.mnt)) return nfserr_rofs; if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode)) return nfserr_perm; } if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) return nfserr_perm; if (acc & NFSD_MAY_LOCK) { /* If we cannot rely on authentication in NLM requests, * just allow locks, otherwise require read permission, or * ownership */ if (exp->ex_flags & NFSEXP_NOAUTHNLM) return 0; else acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE; } /* * The file owner always gets access permission for accesses that * would normally be checked at open time. This is to make * file access work even when the client has done a fchmod(fd, 0). * * However, `cp foo bar' should fail nevertheless when bar is * readonly. A sensible way to do this might be to reject all * attempts to truncate a read-only file, because a creat() call * always implies file truncation. * ... but this isn't really fair. A process may reasonably call * ftruncate on an open file descriptor on a file with perm 000. * We must trust the client to do permission checking - using "ACCESS" * with NFSv3. */ if ((acc & NFSD_MAY_OWNER_OVERRIDE) && inode->i_uid == current_fsuid()) return 0; /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC)); /* Allow read access to binaries even when mode 111 */ if (err == -EACCES && S_ISREG(inode->i_mode) && (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) || acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC))) err = inode_permission(inode, MAY_EXEC); return err? nfserrno(err) : 0; } void nfsd_racache_shutdown(void) { struct raparms *raparm, *last_raparm; unsigned int i; dprintk("nfsd: freeing readahead buffers.\n"); for (i = 0; i < RAPARM_HASH_SIZE; i++) { raparm = raparm_hash[i].pb_head; while(raparm) { last_raparm = raparm; raparm = raparm->p_next; kfree(last_raparm); } raparm_hash[i].pb_head = NULL; } } /* * Initialize readahead param cache */ int nfsd_racache_init(int cache_size) { int i; int j = 0; int nperbucket; struct raparms **raparm = NULL; if (raparm_hash[0].pb_head) return 0; nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); if (nperbucket < 2) nperbucket = 2; cache_size = nperbucket * RAPARM_HASH_SIZE; dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); for (i = 0; i < RAPARM_HASH_SIZE; i++) { spin_lock_init(&raparm_hash[i].pb_lock); raparm = &raparm_hash[i].pb_head; for (j = 0; j < nperbucket; j++) { *raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL); if (!*raparm) goto out_nomem; raparm = &(*raparm)->p_next; } *raparm = NULL; } nfsdstats.ra_size = cache_size; return 0; out_nomem: dprintk("nfsd: kmalloc failed, freeing readahead buffers\n"); nfsd_racache_shutdown(); return -ENOMEM; } #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) struct posix_acl * nfsd_get_posix_acl(struct svc_fh *fhp, int type) { struct inode *inode = fhp->fh_dentry->d_inode; char *name; void *value = NULL; ssize_t size; struct posix_acl *acl; if (!IS_POSIXACL(inode)) return ERR_PTR(-EOPNOTSUPP); switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; break; default: return ERR_PTR(-EOPNOTSUPP); } size = nfsd_getxattr(fhp->fh_dentry, name, &value); if (size < 0) return ERR_PTR(size); acl = posix_acl_from_xattr(value, size); kfree(value); return acl; } int nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl) { struct inode *inode = fhp->fh_dentry->d_inode; char *name; void *value = NULL; size_t size; int error; if (!IS_POSIXACL(inode) || !inode->i_op->setxattr || !inode->i_op->removexattr) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; break; default: return -EOPNOTSUPP; } if (acl && acl->a_count) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; error = posix_acl_to_xattr(acl, value, size); if (error < 0) goto getout; size = error; } else size = 0; error = fh_want_write(fhp); if (error) goto getout; if (size) error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0); else { if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT) error = 0; else { error = vfs_removexattr(fhp->fh_dentry, name); if (error == -ENODATA) error = 0; } } fh_drop_write(fhp); getout: kfree(value); return error; } #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
gpl-2.0
GuneetAtwal/kernel_a210
drivers/net/ethernet/sfc/efx.c
3212
76238
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2005-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/in.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> #include <linux/cpu_rmap.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "selftest.h" #include "mcdi.h" #include "workarounds.h" /************************************************************************** * * Type name strings * ************************************************************************** */ /* Loopback mode names (see LOOPBACK_MODE()) */ const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; const char *const efx_loopback_mode_names[] = { [LOOPBACK_NONE] = "NONE", [LOOPBACK_DATA] = "DATAPATH", [LOOPBACK_GMAC] = "GMAC", [LOOPBACK_XGMII] = "XGMII", [LOOPBACK_XGXS] = "XGXS", [LOOPBACK_XAUI] = "XAUI", [LOOPBACK_GMII] = "GMII", [LOOPBACK_SGMII] = "SGMII", [LOOPBACK_XGBR] = "XGBR", [LOOPBACK_XFI] = "XFI", [LOOPBACK_XAUI_FAR] = "XAUI_FAR", [LOOPBACK_GMII_FAR] = "GMII_FAR", [LOOPBACK_SGMII_FAR] = "SGMII_FAR", [LOOPBACK_XFI_FAR] = "XFI_FAR", [LOOPBACK_GPHY] = "GPHY", [LOOPBACK_PHYXS] = "PHYXS", [LOOPBACK_PCS] = "PCS", [LOOPBACK_PMAPMD] = "PMA/PMD", [LOOPBACK_XPORT] = "XPORT", [LOOPBACK_XGMII_WS] = "XGMII_WS", [LOOPBACK_XAUI_WS] = "XAUI_WS", [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", [LOOPBACK_GMII_WS] = "GMII_WS", [LOOPBACK_XFI_WS] = "XFI_WS", [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", [LOOPBACK_PHYXS_WS] = "PHYXS_WS", }; const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const char *const efx_reset_type_names[] = { [RESET_TYPE_INVISIBLE] = "INVISIBLE", [RESET_TYPE_ALL] = "ALL", [RESET_TYPE_WORLD] = "WORLD", [RESET_TYPE_DISABLE] = "DISABLE", [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", [RESET_TYPE_INT_ERROR] = "INT_ERROR", [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", [RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", }; #define EFX_MAX_MTU (9 * 1024) /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. */ static struct workqueue_struct *reset_workqueue; /************************************************************************** * * Configurable values * *************************************************************************/ /* * Use separate channels for TX and RX events * * Set this to 1 to use separate channels for TX and RX. It allows us * to control interrupt affinity separately for TX and RX. * * This is only used in MSI-X interrupt mode */ static unsigned int separate_tx_channels; module_param(separate_tx_channels, uint, 0444); MODULE_PARM_DESC(separate_tx_channels, "Use separate channels for TX and RX"); /* This is the weight assigned to each of the (per-channel) virtual * NAPI devices. */ static int napi_weight = 64; /* This is the time (in jiffies) between invocations of the hardware * monitor. On Falcon-based NICs, this will: * - Check the on-board hardware monitor; * - Poll the link state and reconfigure the hardware as necessary. */ static unsigned int efx_monitor_interval = 1 * HZ; /* Initial interrupt moderation settings. They can be modified after * module load with ethtool. * * The default for RX should strike a balance between increasing the * round-trip latency and reducing overhead. */ static unsigned int rx_irq_mod_usec = 60; /* Initial interrupt moderation settings. They can be modified after * module load with ethtool. * * This default is chosen to ensure that a 10G link does not go idle * while a TX queue is stopped after it has become full. A queue is * restarted when it drops below half full. The time this takes (assuming * worst case 3 descriptors per packet and 1024 descriptors) is * 512 / 3 * 1.2 = 205 usec. */ static unsigned int tx_irq_mod_usec = 150; /* This is the first interrupt mode to try out of: * 0 => MSI-X * 1 => MSI * 2 => legacy */ static unsigned int interrupt_mode; /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), * i.e. the number of CPUs among which we may distribute simultaneous * interrupt handling. * * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * The default (0) means to assign an interrupt to each core. */ static unsigned int rss_cpus; module_param(rss_cpus, uint, 0444); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); static int phy_flash_cfg; module_param(phy_flash_cfg, int, 0644); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); static unsigned irq_adapt_low_thresh = 8000; module_param(irq_adapt_low_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_low_thresh, "Threshold score for reducing IRQ moderation"); static unsigned irq_adapt_high_thresh = 16000; module_param(irq_adapt_high_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_high_thresh, "Threshold score for increasing IRQ moderation"); static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR | NETIF_MSG_HW); module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); /************************************************************************** * * Utility functions and prototypes * *************************************************************************/ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); static void efx_remove_channel(struct efx_channel *channel); static void efx_remove_channels(struct efx_nic *efx); static const struct efx_channel_type efx_default_channel_type; static void efx_remove_port(struct efx_nic *efx); static void efx_init_napi_channel(struct efx_channel *channel); static void efx_fini_napi(struct efx_nic *efx); static void efx_fini_napi_channel(struct efx_channel *channel); static void efx_fini_struct(struct efx_nic *efx); static void efx_start_all(struct efx_nic *efx); static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ if ((efx->state == STATE_RUNNING) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) /************************************************************************** * * Event queue processing * *************************************************************************/ /* Process channel's event queue * * This function is responsible for processing the event queue of a * single channel. The caller must guarantee that this function will * never be concurrently called more than once on the same channel, * though different channels may be being processed concurrently. */ static int efx_process_channel(struct efx_channel *channel, int budget) { int spent; if (unlikely(!channel->enabled)) return 0; spent = efx_nic_process_eventq(channel, budget); if (spent && efx_channel_has_rx_queue(channel)) { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); /* Deliver last RX packet. */ if (channel->rx_pkt) { __efx_rx_packet(channel, channel->rx_pkt); channel->rx_pkt = NULL; } if (rx_queue->enabled) { efx_rx_strategy(channel); efx_fast_push_rx_descriptors(rx_queue); } } return spent; } /* Mark channel as finished processing * * Note that since we will not receive further interrupts for this * channel before we finish processing and call the eventq_read_ack() * method, there is no need to use the interrupt hold-off timers. */ static inline void efx_channel_processed(struct efx_channel *channel) { /* The interrupt handler for this channel may set work_pending * as soon as we acknowledge the events we've seen. Make sure * it's cleared before then. */ channel->work_pending = false; smp_wmb(); efx_nic_eventq_read_ack(channel); } /* NAPI poll handler * * NAPI guarantees serialisation of polls of the same device, which * provides the guarantee required by efx_process_channel(). */ static int efx_poll(struct napi_struct *napi, int budget) { struct efx_channel *channel = container_of(napi, struct efx_channel, napi_str); struct efx_nic *efx = channel->efx; int spent; netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); spent = efx_process_channel(channel, budget); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && efx->irq_rx_adaptive && unlikely(++channel->irq_count == 1000)) { if (unlikely(channel->irq_mod_score < irq_adapt_low_thresh)) { if (channel->irq_moderation > 1) { channel->irq_moderation -= 1; efx->type->push_irq_moderation(channel); } } else if (unlikely(channel->irq_mod_score > irq_adapt_high_thresh)) { if (channel->irq_moderation < efx->irq_rx_moderation) { channel->irq_moderation += 1; efx->type->push_irq_moderation(channel); } } channel->irq_count = 0; channel->irq_mod_score = 0; } efx_filter_rfs_expire(channel); /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem * since efx_channel_processed() will have no effect if * interrupts have already been disabled. */ napi_complete(napi); efx_channel_processed(channel); } return spent; } /* Process the eventq of the specified channel immediately on this CPU * * Disable hardware generated interrupts, wait for any existing * processing to finish, then directly poll (and ack ) the eventq. * Finally reenable NAPI and interrupts. * * This is for use only during a loopback self-test. It must not * deliver any packets up the stack as this can result in deadlock. */ void efx_process_channel_now(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; BUG_ON(channel->channel >= efx->n_channels); BUG_ON(!channel->enabled); BUG_ON(!efx->loopback_selftest); /* Disable interrupts and wait for ISRs to complete */ efx_nic_disable_interrupts(efx); if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); efx->legacy_irq_enabled = false; } if (channel->irq) synchronize_irq(channel->irq); /* Wait for any NAPI processing to complete */ napi_disable(&channel->napi_str); /* Poll the channel */ efx_process_channel(channel, channel->eventq_mask + 1); /* Ack the eventq. This may cause an interrupt to be generated * when they are reenabled */ efx_channel_processed(channel); napi_enable(&channel->napi_str); if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); } /* Create event queue * Event queue memory allocations are done only once. If the channel * is reset, the memory buffer will be reused; this guards against * errors during channel reset and also simplifies interrupt handling. */ static int efx_probe_eventq(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; unsigned long entries; netif_dbg(efx, probe, efx->net_dev, "chan %d create event queue\n", channel->channel); /* Build an event queue with room for one event per tx and rx buffer, * plus some extra for link state events and MCDI completions. */ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; return efx_nic_probe_eventq(channel); } /* Prepare channel's event queue */ static void efx_init_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d init event queue\n", channel->channel); channel->eventq_read_ptr = 0; efx_nic_init_eventq(channel); } /* Enable event queue processing and NAPI */ static void efx_start_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, ifup, channel->efx->net_dev, "chan %d start event queue\n", channel->channel); /* The interrupt handler for this channel may set work_pending * as soon as we enable it. Make sure it's cleared before * then. Similarly, make sure it sees the enabled flag set. */ channel->work_pending = false; channel->enabled = true; smp_wmb(); napi_enable(&channel->napi_str); efx_nic_eventq_read_ack(channel); } /* Disable event queue processing and NAPI */ static void efx_stop_eventq(struct efx_channel *channel) { if (!channel->enabled) return; napi_disable(&channel->napi_str); channel->enabled = false; } static void efx_fini_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d fini event queue\n", channel->channel); efx_nic_fini_eventq(channel); } static void efx_remove_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d remove event queue\n", channel->channel); efx_nic_remove_eventq(channel); } /************************************************************************** * * Channel handling * *************************************************************************/ /* Allocate and initialise a channel structure. */ static struct efx_channel * efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int j; channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return NULL; channel->efx = efx; channel->channel = i; channel->type = &efx_default_channel_type; for (j = 0; j < EFX_TXQ_TYPES; j++) { tx_queue = &channel->tx_queue[j]; tx_queue->efx = efx; tx_queue->queue = i * EFX_TXQ_TYPES + j; tx_queue->channel = channel; } rx_queue = &channel->rx_queue; rx_queue->efx = efx; setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, (unsigned long)rx_queue); return channel; } /* Allocate and initialise a channel structure, copying parameters * (but not resources) from an old channel structure. */ static struct efx_channel * efx_copy_channel(const struct efx_channel *old_channel) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int j; channel = kmalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return NULL; *channel = *old_channel; channel->napi_dev = NULL; memset(&channel->eventq, 0, sizeof(channel->eventq)); for (j = 0; j < EFX_TXQ_TYPES; j++) { tx_queue = &channel->tx_queue[j]; if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); } rx_queue = &channel->rx_queue; rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, (unsigned long)rx_queue); return channel; } static int efx_probe_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; int rc; netif_dbg(channel->efx, probe, channel->efx->net_dev, "creating channel %d\n", channel->channel); rc = channel->type->pre_probe(channel); if (rc) goto fail; rc = efx_probe_eventq(channel); if (rc) goto fail; efx_for_each_channel_tx_queue(tx_queue, channel) { rc = efx_probe_tx_queue(tx_queue); if (rc) goto fail; } efx_for_each_channel_rx_queue(rx_queue, channel) { rc = efx_probe_rx_queue(rx_queue); if (rc) goto fail; } channel->n_rx_frm_trunc = 0; return 0; fail: efx_remove_channel(channel); return rc; } static void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) { struct efx_nic *efx = channel->efx; const char *type; int number; number = channel->channel; if (efx->tx_channel_offset == 0) { type = ""; } else if (channel->channel < efx->tx_channel_offset) { type = "-rx"; } else { type = "-tx"; number -= efx->tx_channel_offset; } snprintf(buf, len, "%s%s-%d", efx->name, type, number); } static void efx_set_channel_names(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) channel->type->get_name(channel, efx->channel_name[channel->channel], sizeof(efx->channel_name[0])); } static int efx_probe_channels(struct efx_nic *efx) { struct efx_channel *channel; int rc; /* Restart special buffer allocation */ efx->next_buffer_table = 0; /* Probe channels in reverse, so that any 'extra' channels * use the start of the buffer table. This allows the traffic * channels to be resized without moving them or wasting the * entries before them. */ efx_for_each_channel_rev(channel, efx) { rc = efx_probe_channel(channel); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create channel %d\n", channel->channel); goto fail; } } efx_set_channel_names(efx); return 0; fail: efx_remove_channels(efx); return rc; } /* Channels are shutdown and reinitialised whilst the NIC is running * to propagate configuration changes (mtu, checksum offload), or * to clear hardware error conditions */ static void efx_start_datapath(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; struct efx_channel *channel; /* Calculate the rx buffer allocation parameters required to * support the current MTU, including padding for header * alignment and overruns. */ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_hash_size + efx->type->rx_buffer_padding); efx->rx_buffer_order = get_order(efx->rx_buffer_len + sizeof(struct efx_rx_page_state)); /* Initialise the channels */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue(tx_queue); /* The rx buffer allocation strategy is MTU dependent */ efx_rx_strategy(channel); efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); efx_nic_generate_fill_event(rx_queue); } WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); } if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); } static void efx_stop_datapath(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; int rc; EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); rc = efx_nic_flush_queues(efx); if (rc && EFX_WORKAROUND_7803(efx)) { /* Schedule a reset to recover from the flush failure. The * descriptor caches reference memory we're about to free, * but falcon_reconfigure_mac_wrapper() won't reconnect * the MACs because of the pending reset. */ netif_err(efx, drv, efx->net_dev, "Resetting to recover from flush failure\n"); efx_schedule_reset(efx, RESET_TYPE_ALL); } else if (rc) { netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); } else { netif_dbg(efx, drv, efx->net_dev, "successfully flushed all queues\n"); } efx_for_each_channel(channel, efx) { /* RX packet processing is pipelined, so wait for the * NAPI handler to complete. At least event queue 0 * might be kept active by non-data events, so don't * use napi_synchronize() but actually disable NAPI * temporarily. */ if (efx_channel_has_rx_queue(channel)) { efx_stop_eventq(channel); efx_start_eventq(channel); } efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); } } static void efx_remove_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; netif_dbg(channel->efx, drv, channel->efx->net_dev, "destroy chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } static void efx_remove_channels(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_remove_channel(channel); } int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i, next_buffer_table = 0; int rc = 0; /* Not all channels should be reallocated. We must avoid * reallocating their buffer table entries. */ efx_for_each_channel(channel, efx) { struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; if (channel->type->copy) continue; next_buffer_table = max(next_buffer_table, channel->eventq.index + channel->eventq.entries); efx_for_each_channel_rx_queue(rx_queue, channel) next_buffer_table = max(next_buffer_table, rx_queue->rxd.index + rx_queue->rxd.entries); efx_for_each_channel_tx_queue(tx_queue, channel) next_buffer_table = max(next_buffer_table, tx_queue->txd.index + tx_queue->txd.entries); } efx_stop_all(efx); efx_stop_interrupts(efx, true); /* Clone channels (where possible) */ memset(other_channel, 0, sizeof(other_channel)); for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; if (channel->type->copy) channel = channel->type->copy(channel); if (!channel) { rc = -ENOMEM; goto out; } other_channel[i] = channel; } /* Swap entry counts and channel pointers */ old_rxq_entries = efx->rxq_entries; old_txq_entries = efx->txq_entries; efx->rxq_entries = rxq_entries; efx->txq_entries = txq_entries; for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; efx->channel[i] = other_channel[i]; other_channel[i] = channel; } /* Restart buffer table allocation */ efx->next_buffer_table = next_buffer_table; for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; if (!channel->type->copy) continue; rc = efx_probe_channel(channel); if (rc) goto rollback; efx_init_napi_channel(efx->channel[i]); } out: /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { channel = other_channel[i]; if (channel && channel->type->copy) { efx_fini_napi_channel(channel); efx_remove_channel(channel); kfree(channel); } } efx_start_interrupts(efx, true); efx_start_all(efx); return rc; rollback: /* Swap back */ efx->rxq_entries = old_rxq_entries; efx->txq_entries = old_txq_entries; for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; efx->channel[i] = other_channel[i]; other_channel[i] = channel; } goto out; } void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); } static const struct efx_channel_type efx_default_channel_type = { .pre_probe = efx_channel_dummy_op_int, .get_name = efx_get_channel_name, .copy = efx_copy_channel, .keep_eventq = false, }; int efx_channel_dummy_op_int(struct efx_channel *channel) { return 0; } /************************************************************************** * * Port handling * **************************************************************************/ /* This ensures that the kernel is kept informed (via * netif_carrier_on/off) of the link status, and also maintains the * link status's stop on the port's TX queue. */ void efx_link_status_changed(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure * that no events are triggered between unregister_netdev() and the * driver unloading. A more general condition is that NETDEV_CHANGE * can only be generated between NETDEV_UP and NETDEV_DOWN */ if (!netif_running(efx->net_dev)) return; if (link_state->up != netif_carrier_ok(efx->net_dev)) { efx->n_link_state_changes++; if (link_state->up) netif_carrier_on(efx->net_dev); else netif_carrier_off(efx->net_dev); } /* Status message for kernel log */ if (link_state->up) netif_info(efx, link, efx->net_dev, "link up at %uMbps %s-duplex (MTU %d)%s\n", link_state->speed, link_state->fd ? "full" : "half", efx->net_dev->mtu, (efx->promiscuous ? " [PROMISC]" : "")); else netif_info(efx, link, efx->net_dev, "link down\n"); } void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) { efx->link_advertising = advertising; if (advertising) { if (advertising & ADVERTISED_Pause) efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); else efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); if (advertising & ADVERTISED_Asym_Pause) efx->wanted_fc ^= EFX_FC_TX; } } void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) { efx->wanted_fc = wanted_fc; if (efx->link_advertising) { if (wanted_fc & EFX_FC_RX) efx->link_advertising |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); else efx->link_advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (wanted_fc & EFX_FC_TX) efx->link_advertising ^= ADVERTISED_Asym_Pause; } } static void efx_fini_port(struct efx_nic *efx); /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed * through phy_op->set_settings(), and pushed asynchronously to the MAC * through efx_monitor(). * * Callers must hold the mac_lock */ int __efx_reconfigure_port(struct efx_nic *efx) { enum efx_phy_mode phy_mode; int rc; WARN_ON(!mutex_is_locked(&efx->mac_lock)); /* Serialise the promiscuous flag with efx_set_rx_mode. */ netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); /* Disable PHY transmit in mac level loopbacks */ phy_mode = efx->phy_mode; if (LOOPBACK_INTERNAL(efx)) efx->phy_mode |= PHY_MODE_TX_DISABLED; else efx->phy_mode &= ~PHY_MODE_TX_DISABLED; rc = efx->type->reconfigure_port(efx); if (rc) efx->phy_mode = phy_mode; return rc; } /* Reinitialise the MAC to pick up new PHY settings, even if the port is * disabled. */ int efx_reconfigure_port(struct efx_nic *efx) { int rc; EFX_ASSERT_RESET_SERIALISED(efx); mutex_lock(&efx->mac_lock); rc = __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); return rc; } /* Asynchronous work item for changing MAC promiscuity and multicast * hash. Avoid a drain/rx_ingress enable by reconfiguring the current * MAC directly. */ static void efx_mac_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); mutex_lock(&efx->mac_lock); if (efx->port_enabled) efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); } static int efx_probe_port(struct efx_nic *efx) { int rc; netif_dbg(efx, probe, efx->net_dev, "create port\n"); if (phy_flash_cfg) efx->phy_mode = PHY_MODE_SPECIAL; /* Connect up MAC/PHY operations table */ rc = efx->type->probe_port(efx); if (rc) return rc; /* Initialise MAC address to permanent address */ memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); return 0; } static int efx_init_port(struct efx_nic *efx) { int rc; netif_dbg(efx, drv, efx->net_dev, "init port\n"); mutex_lock(&efx->mac_lock); rc = efx->phy_op->init(efx); if (rc) goto fail1; efx->port_initialized = true; /* Reconfigure the MAC before creating dma queues (required for * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ efx->type->reconfigure_mac(efx); /* Ensure the PHY advertises the correct flow control settings */ rc = efx->phy_op->reconfigure(efx); if (rc) goto fail2; mutex_unlock(&efx->mac_lock); return 0; fail2: efx->phy_op->fini(efx); fail1: mutex_unlock(&efx->mac_lock); return rc; } static void efx_start_port(struct efx_nic *efx) { netif_dbg(efx, ifup, efx->net_dev, "start port\n"); BUG_ON(efx->port_enabled); mutex_lock(&efx->mac_lock); efx->port_enabled = true; /* efx_mac_work() might have been scheduled after efx_stop_port(), * and then cancelled by efx_flush_all() */ efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); } /* Prevent efx_mac_work() and efx_monitor() from working */ static void efx_stop_port(struct efx_nic *efx) { netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); mutex_lock(&efx->mac_lock); efx->port_enabled = false; mutex_unlock(&efx->mac_lock); /* Serialise against efx_set_multicast_list() */ netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); } static void efx_fini_port(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); if (!efx->port_initialized) return; efx->phy_op->fini(efx); efx->port_initialized = false; efx->link_state.up = false; efx_link_status_changed(efx); } static void efx_remove_port(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); efx->type->remove_port(efx); } /************************************************************************** * * NIC handling * **************************************************************************/ /* This configures the PCI device to enable I/O and DMA. */ static int efx_init_io(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; int rc; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); rc = pci_enable_device(pci_dev); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to enable PCI device\n"); goto fail1; } pci_set_master(pci_dev); /* Set the PCI DMA mask. Try all possibilities from our * genuine mask down to 32 bits, because some architectures * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit * masks event though they reject 46 bit masks. */ while (dma_mask > 0x7fffffffUL) { if (pci_dma_supported(pci_dev, dma_mask)) { rc = pci_set_dma_mask(pci_dev, dma_mask); if (rc == 0) break; } dma_mask >>= 1; } if (rc) { netif_err(efx, probe, efx->net_dev, "could not find a suitable DMA mask\n"); goto fail2; } netif_dbg(efx, probe, efx->net_dev, "using DMA mask %llx\n", (unsigned long long) dma_mask); rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); if (rc) { /* pci_set_consistent_dma_mask() is not *allowed* to * fail with a mask that pci_set_dma_mask() accepted, * but just in case... */ netif_err(efx, probe, efx->net_dev, "failed to set consistent DMA mask\n"); goto fail2; } efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); if (rc) { netif_err(efx, probe, efx->net_dev, "request for memory BAR failed\n"); rc = -EIO; goto fail3; } efx->membase = ioremap_nocache(efx->membase_phys, efx->type->mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", (unsigned long long)efx->membase_phys, efx->type->mem_map_size); rc = -ENOMEM; goto fail4; } netif_dbg(efx, probe, efx->net_dev, "memory BAR at %llx+%x (virtual %p)\n", (unsigned long long)efx->membase_phys, efx->type->mem_map_size, efx->membase); return 0; fail4: pci_release_region(efx->pci_dev, EFX_MEM_BAR); fail3: efx->membase_phys = 0; fail2: pci_disable_device(efx->pci_dev); fail1: return rc; } static void efx_fini_io(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); if (efx->membase) { iounmap(efx->membase); efx->membase = NULL; } if (efx->membase_phys) { pci_release_region(efx->pci_dev, EFX_MEM_BAR); efx->membase_phys = 0; } pci_disable_device(efx->pci_dev); } static unsigned int efx_wanted_parallelism(struct efx_nic *efx) { cpumask_var_t thread_mask; unsigned int count; int cpu; if (rss_cpus) { count = rss_cpus; } else { if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { netif_warn(efx, probe, efx->net_dev, "RSS disabled due to allocation failure\n"); return 1; } count = 0; for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, thread_mask)) { ++count; cpumask_or(thread_mask, thread_mask, topology_thread_cpumask(cpu)); } } free_cpumask_var(thread_mask); } /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && count > efx_vf_size(efx)) { netif_warn(efx, probe, efx->net_dev, "Reducing number of RSS channels from %u to %u for " "VF support. Increase vf-msix-limit to use more " "channels on the PF.\n", count, efx_vf_size(efx)); count = efx_vf_size(efx); } return count; } static int efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) { #ifdef CONFIG_RFS_ACCEL unsigned int i; int rc; efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); if (!efx->net_dev->rx_cpu_rmap) return -ENOMEM; for (i = 0; i < efx->n_rx_channels; i++) { rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, xentries[i].vector); if (rc) { free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; return rc; } } #endif return 0; } /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ static int efx_probe_interrupts(struct efx_nic *efx) { unsigned int max_channels = min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS); unsigned int extra_channels = 0; unsigned int i, j; int rc; for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) if (efx->extra_channel_type[i]) ++extra_channels; if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { struct msix_entry xentries[EFX_MAX_CHANNELS]; unsigned int n_channels; n_channels = efx_wanted_parallelism(efx); if (separate_tx_channels) n_channels *= 2; n_channels += extra_channels; n_channels = min(n_channels, max_channels); for (i = 0; i < n_channels; i++) xentries[i].entry = i; rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); if (rc > 0) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" " available (%d < %u).\n", rc, n_channels); netif_err(efx, drv, efx->net_dev, "WARNING: Performance may be reduced.\n"); EFX_BUG_ON_PARANOID(rc >= n_channels); n_channels = rc; rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); } if (rc == 0) { efx->n_channels = n_channels; if (n_channels > extra_channels) n_channels -= extra_channels; if (separate_tx_channels) { efx->n_tx_channels = max(n_channels / 2, 1U); efx->n_rx_channels = max(n_channels - efx->n_tx_channels, 1U); } else { efx->n_tx_channels = n_channels; efx->n_rx_channels = n_channels; } rc = efx_init_rx_cpu_rmap(efx, xentries); if (rc) { pci_disable_msix(efx->pci_dev); return rc; } for (i = 0; i < efx->n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; } else { /* Fall back to single channel MSI */ efx->interrupt_mode = EFX_INT_MODE_MSI; netif_err(efx, drv, efx->net_dev, "could not enable MSI-X\n"); } } /* Try single interrupt MSI */ if (efx->interrupt_mode == EFX_INT_MODE_MSI) { efx->n_channels = 1; efx->n_rx_channels = 1; efx->n_tx_channels = 1; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; } else { netif_err(efx, drv, efx->net_dev, "could not enable MSI\n"); efx->interrupt_mode = EFX_INT_MODE_LEGACY; } } /* Assume legacy interrupts */ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; efx->legacy_irq = efx->pci_dev->irq; } /* Assign extra channels if possible */ j = efx->n_channels; for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { if (!efx->extra_channel_type[i]) continue; if (efx->interrupt_mode != EFX_INT_MODE_MSIX || efx->n_channels <= extra_channels) { efx->extra_channel_type[i]->handle_no_channel(efx); } else { --j; efx_get_channel(efx, j)->type = efx->extra_channel_type[i]; } } /* RSS might be usable on VFs even if it is disabled on the PF */ efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? efx->n_rx_channels : efx_vf_size(efx)); return 0; } /* Enable interrupts, then probe and start the event queues */ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); efx_for_each_channel(channel, efx) { if (!channel->type->keep_eventq || !may_keep_eventq) efx_init_eventq(channel); efx_start_eventq(channel); } efx_mcdi_mode_event(efx); } static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; efx_mcdi_mode_poll(efx); efx_nic_disable_interrupts(efx); if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); efx->legacy_irq_enabled = false; } efx_for_each_channel(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); efx_stop_eventq(channel); if (!channel->type->keep_eventq || !may_keep_eventq) efx_fini_eventq(channel); } } static void efx_remove_interrupts(struct efx_nic *efx) { struct efx_channel *channel; /* Remove MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) channel->irq = 0; pci_disable_msi(efx->pci_dev); pci_disable_msix(efx->pci_dev); /* Remove legacy interrupt */ efx->legacy_irq = 0; } static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* We need to adjust the TX queue numbers if we have separate * RX-only and TX-only channels. */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->queue -= (efx->tx_channel_offset * EFX_TXQ_TYPES); } } static int efx_probe_nic(struct efx_nic *efx) { size_t i; int rc; netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); /* Carry out hardware-type specific initialisation */ rc = efx->type->probe(efx); if (rc) return rc; /* Determine the number of channels and queues by trying to hook * in MSI-X interrupts. */ rc = efx_probe_interrupts(efx); if (rc) goto fail; efx->type->dimension_resources(efx); if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); /* Initialise the interrupt moderation settings */ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, true); return 0; fail: efx->type->remove(efx); return rc; } static void efx_remove_nic(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); efx_remove_interrupts(efx); efx->type->remove(efx); } /************************************************************************** * * NIC startup/shutdown * *************************************************************************/ static int efx_probe_all(struct efx_nic *efx) { int rc; rc = efx_probe_nic(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); goto fail1; } rc = efx_probe_port(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create port\n"); goto fail2; } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create filter tables\n"); goto fail3; } rc = efx_probe_channels(efx); if (rc) goto fail4; return 0; fail4: efx_remove_filters(efx); fail3: efx_remove_port(efx); fail2: efx_remove_nic(efx); fail1: return rc; } /* Called after previous invocation(s) of efx_stop_all, restarts the port, * kernel transmit queues and NAPI processing, and ensures that the port is * scheduled to be reconfigured. This function is safe to call multiple * times when the NIC is in any state. */ static void efx_start_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ if (efx->port_enabled) return; if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) return; if (!netif_running(efx->net_dev)) return; efx_start_port(efx); efx_start_datapath(efx); /* Start the hardware monitor if there is one. Otherwise (we're link * event driven), we have to poll the PHY because after an event queue * flush, we could have a missed a link state change */ if (efx->type->monitor != NULL) { queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); } else { mutex_lock(&efx->mac_lock); if (efx->phy_op->poll(efx)) efx_link_status_changed(efx); mutex_unlock(&efx->mac_lock); } efx->type->start_stats(efx); } /* Flush all delayed work. Should only be called when no more delayed work * will be scheduled. This doesn't flush pending online resets (efx_reset), * since we're holding the rtnl_lock at this point. */ static void efx_flush_all(struct efx_nic *efx) { /* Make sure the hardware monitor and event self-test are stopped */ cancel_delayed_work_sync(&efx->monitor_work); efx_selftest_async_cancel(efx); /* Stop scheduled port reconfigurations */ cancel_work_sync(&efx->mac_work); } /* Quiesce hardware and software without bringing the link down. * Safe to call multiple times, when the nic and interface is in any * state. The caller is guaranteed to subsequently be in a position * to modify any hardware and software state they see fit without * taking locks. */ static void efx_stop_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); /* port_enabled can be read safely under the rtnl lock */ if (!efx->port_enabled) return; efx->type->stop_stats(efx); efx_stop_port(efx); /* Flush efx_mac_work(), refill_workqueue, monitor_work */ efx_flush_all(efx); /* Stop the kernel transmit interface late, so the watchdog * timer isn't ticking over the flush */ netif_tx_disable(efx->net_dev); efx_stop_datapath(efx); } static void efx_remove_all(struct efx_nic *efx) { efx_remove_channels(efx); efx_remove_filters(efx); efx_remove_port(efx); efx_remove_nic(efx); } /************************************************************************** * * Interrupt moderation * **************************************************************************/ static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) { if (usecs == 0) return 0; if (usecs * 1000 < quantum_ns) return 1; /* never round down to 0 */ return usecs * 1000 / quantum_ns; } /* Set interrupt moderation parameters */ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, bool rx_may_override_tx) { struct efx_channel *channel; unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * efx->timer_quantum_ns, 1000); unsigned int tx_ticks; unsigned int rx_ticks; EFX_ASSERT_RESET_SERIALISED(efx); if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) return -EINVAL; tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && !rx_may_override_tx) { netif_err(efx, drv, efx->net_dev, "Channels are shared. " "RX and TX IRQ moderation must be equal\n"); return -EINVAL; } efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } return 0; } void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive) { /* We must round up when converting ticks to microseconds * because we round down when converting the other way. */ *rx_adaptive = efx->irq_rx_adaptive; *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * efx->timer_quantum_ns, 1000); /* If channels are shared between RX and TX, so is IRQ * moderation. Otherwise, IRQ moderation is the same for all * TX channels and is not adaptive. */ if (efx->tx_channel_offset == 0) *tx_usecs = *rx_usecs; else *tx_usecs = DIV_ROUND_UP( efx->channel[efx->tx_channel_offset]->irq_moderation * efx->timer_quantum_ns, 1000); } /************************************************************************** * * Hardware monitor * **************************************************************************/ /* Run periodically off the general workqueue */ static void efx_monitor(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, monitor_work.work); netif_vdbg(efx, timer, efx->net_dev, "hardware monitor executing on CPU %d\n", raw_smp_processor_id()); BUG_ON(efx->type->monitor == NULL); /* If the mac_lock is already held then it is likely a port * reconfiguration is already in place, which will likely do * most of the work of monitor() anyway. */ if (mutex_trylock(&efx->mac_lock)) { if (efx->port_enabled) efx->type->monitor(efx); mutex_unlock(&efx->mac_lock); } queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); } /************************************************************************** * * ioctls * *************************************************************************/ /* Net device ioctl * Context: process, rtnl_lock() held. */ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); EFX_ASSERT_RESET_SERIALISED(efx); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && (data->phy_id & 0xfc00) == 0x0400) data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; return mdio_mii_ioctl(&efx->mdio, data, cmd); } /************************************************************************** * * NAPI interface * **************************************************************************/ static void efx_init_napi_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); } static void efx_init_napi(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_init_napi_channel(channel); } static void efx_fini_napi_channel(struct efx_channel *channel) { if (channel->napi_dev) netif_napi_del(&channel->napi_str); channel->napi_dev = NULL; } static void efx_fini_napi(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_fini_napi_channel(channel); } /************************************************************************** * * Kernel netpoll interface * *************************************************************************/ #ifdef CONFIG_NET_POLL_CONTROLLER /* Although in the common case interrupts will be disabled, this is not * guaranteed. However, all our work happens inside the NAPI callback, * so no locking is required. */ static void efx_netpoll(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_schedule_channel(channel); } #endif /************************************************************************** * * Kernel net device interface * *************************************************************************/ /* Context: process, rtnl_lock() held. */ static int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); EFX_ASSERT_RESET_SERIALISED(efx); netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", raw_smp_processor_id()); if (efx->state == STATE_DISABLED) return -EIO; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) return -EIO; /* Notify the kernel of the link state polled during driver load, * before the monitor starts running */ efx_link_status_changed(efx); efx_start_all(efx); efx_selftest_async_start(efx); return 0; } /* Context: process, rtnl_lock() held. * Note that the kernel will ignore our return code; this method * should really be a void. */ static int efx_net_stop(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", raw_smp_processor_id()); if (efx->state != STATE_DISABLED) { /* Stop the device and flush all the channels */ efx_stop_all(efx); } return 0; } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx); stats->rx_packets = mac_stats->rx_packets; stats->tx_packets = mac_stats->tx_packets; stats->rx_bytes = mac_stats->rx_bytes; stats->tx_bytes = mac_stats->tx_bytes; stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; stats->multicast = mac_stats->rx_multicast; stats->collisions = mac_stats->tx_collision; stats->rx_length_errors = (mac_stats->rx_gtjumbo + mac_stats->rx_length_error); stats->rx_crc_errors = mac_stats->rx_bad; stats->rx_frame_errors = mac_stats->rx_align_error; stats->rx_fifo_errors = mac_stats->rx_overflow; stats->rx_missed_errors = mac_stats->rx_missed; stats->tx_window_errors = mac_stats->tx_late_collision; stats->rx_errors = (stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors + mac_stats->rx_symbol_error); stats->tx_errors = (stats->tx_window_errors + mac_stats->tx_bad); spin_unlock_bh(&efx->stats_lock); return stats; } /* Context: netif_tx_lock held, BHs disabled. */ static void efx_watchdog(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); netif_err(efx, tx_err, efx->net_dev, "TX stuck with port_enabled=%d: resetting channels\n", efx->port_enabled); efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); } /* Context: process, rtnl_lock() held. */ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) { struct efx_nic *efx = netdev_priv(net_dev); EFX_ASSERT_RESET_SERIALISED(efx); if (new_mtu > EFX_MAX_MTU) return -EINVAL; efx_stop_all(efx); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); mutex_lock(&efx->mac_lock); /* Reconfigure the MAC before enabling the dma queues so that * the RX buffers don't overflow */ net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); return 0; } static int efx_set_mac_address(struct net_device *net_dev, void *data) { struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; char *new_addr = addr->sa_data; EFX_ASSERT_RESET_SERIALISED(efx); if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, "invalid ethernet MAC address requested: %pM\n", new_addr); return -EADDRNOTAVAIL; } memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); efx_sriov_mac_address_changed(efx); /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); return 0; } /* Context: netif_addr_lock held, BHs disabled. */ static void efx_set_rx_mode(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct netdev_hw_addr *ha; union efx_multicast_hash *mc_hash = &efx->multicast_hash; u32 crc; int bit; efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); /* Build multicast hash table */ if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { memset(mc_hash, 0xff, sizeof(*mc_hash)); } else { memset(mc_hash, 0x00, sizeof(*mc_hash)); netdev_for_each_mc_addr(ha, net_dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); set_bit_le(bit, mc_hash->byte); } /* Broadcast packets go through the multicast hash filter. * ether_crc_le() of the broadcast address is 0xbe2612ff * so we always add bit 0xff to the mask. */ set_bit_le(0xff, mc_hash->byte); } if (efx->port_enabled) queue_work(efx->workqueue, &efx->mac_work); /* Otherwise efx_start_port() will do this */ } static int efx_set_features(struct net_device *net_dev, netdev_features_t data) { struct efx_nic *efx = netdev_priv(net_dev); /* If disabling RX n-tuple filtering, clear existing filters */ if (net_dev->features & ~data & NETIF_F_NTUPLE) efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); return 0; } static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, #ifdef CONFIG_SFC_SRIOV .ndo_set_vf_mac = efx_sriov_set_vf_mac, .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif }; static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); efx_mtd_rename(efx); efx_set_channel_names(efx); } static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *net_dev = ptr; if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) efx_update_name(netdev_priv(net_dev)); return NOTIFY_DONE; } static struct notifier_block efx_netdev_notifier = { .notifier_call = efx_netdev_event, }; static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; struct efx_channel *channel; int rc; net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); rtnl_lock(); rc = dev_alloc_name(net_dev, net_dev->name); if (rc < 0) goto fail_locked; efx_update_name(efx); rc = register_netdevice(net_dev); if (rc) goto fail_locked; efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ netif_carrier_off(net_dev); rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to init net dev attributes\n"); goto fail_registered; } return 0; fail_locked: rtnl_unlock(); netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); return rc; fail_registered: unregister_netdev(net_dev); return rc; } static void efx_unregister_netdev(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; if (!efx->net_dev) return; BUG_ON(netdev_priv(efx->net_dev) != efx); /* Free up any skbs still remaining. This has to happen before * we try to unregister the netdev as running their destructors * may be needed to get the device ref. count to 0. */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) efx_release_tx_buffers(tx_queue); } strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); unregister_netdev(efx->net_dev); } /************************************************************************** * * Device reset and suspend * **************************************************************************/ /* Tears down the entire software state and most of the hardware state * before reset. */ void efx_reset_down(struct efx_nic *efx, enum reset_type method) { EFX_ASSERT_RESET_SERIALISED(efx); efx_stop_all(efx); mutex_lock(&efx->mac_lock); efx_stop_interrupts(efx, false); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) efx->phy_op->fini(efx); efx->type->fini(efx); } /* This function will always ensure that the locks acquired in * efx_reset_down() are released. A failure return code indicates * that we were unable to reinitialise the hardware, and the * driver should be disabled. If ok is false, then the rx and tx * engines are not restarted, pending a RESET_DISABLE. */ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) { int rc; EFX_ASSERT_RESET_SERIALISED(efx); rc = efx->type->init(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); goto fail; } if (!ok) goto fail; if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { rc = efx->phy_op->init(efx); if (rc) goto fail; if (efx->phy_op->reconfigure(efx)) netif_err(efx, drv, efx->net_dev, "could not restore PHY settings\n"); } efx->type->reconfigure_mac(efx); efx_start_interrupts(efx, false); efx_restore_filters(efx); efx_sriov_reset(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); return 0; fail: efx->port_initialized = false; mutex_unlock(&efx->mac_lock); return rc; } /* Reset the NIC using the specified method. Note that the reset may * fail, in which case the card will be left in an unusable state. * * Caller must hold the rtnl_lock. */ int efx_reset(struct efx_nic *efx, enum reset_type method) { int rc, rc2; bool disabled; netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", RESET_TYPE(method)); netif_device_detach(efx->net_dev); efx_reset_down(efx, method); rc = efx->type->reset(efx, method); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); goto out; } /* Clear flags for the scopes we covered. We assume the NIC and * driver are now quiescent so that there is no race here. */ efx->reset_pending &= -(1 << (method + 1)); /* Reinitialise bus-mastering, which may have been turned off before * the reset was scheduled. This is still appropriate, even in the * RESET_TYPE_DISABLE since this driver generally assumes the hardware * can respond to requests. */ pci_set_master(efx->pci_dev); out: /* Leave device stopped if necessary */ disabled = rc || method == RESET_TYPE_DISABLE; rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; if (!rc) rc = rc2; } if (disabled) { dev_close(efx->net_dev); netif_err(efx, drv, efx->net_dev, "has been disabled\n"); efx->state = STATE_DISABLED; } else { netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); netif_device_attach(efx->net_dev); } return rc; } /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); unsigned long pending = ACCESS_ONCE(efx->reset_pending); if (!pending) return; /* If we're not RUNNING then don't reset. Leave the reset_pending * flags set so that efx_pci_probe_main will be retried */ if (efx->state != STATE_RUNNING) { netif_info(efx, drv, efx->net_dev, "scheduled reset quenched. NIC not RUNNING\n"); return; } rtnl_lock(); (void)efx_reset(efx, fls(pending) - 1); rtnl_unlock(); } void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; switch (type) { case RESET_TYPE_INVISIBLE: case RESET_TYPE_ALL: case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); break; default: method = efx->type->map_reset_reason(type); netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset for %s\n", RESET_TYPE(method), RESET_TYPE(type)); break; } set_bit(method, &efx->reset_pending); /* efx_process_channel() will no longer read events once a * reset is scheduled. So switch back to poll'd MCDI completions. */ efx_mcdi_mode_poll(efx); queue_work(reset_workqueue, &efx->reset_work); } /************************************************************************** * * List of NICs we support * **************************************************************************/ /* PCI device ID table */ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), .driver_data = (unsigned long) &falcon_a1_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000B), .driver_data = (unsigned long) &falcon_b0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ .driver_data = (unsigned long) &siena_a0_nic_type}, {0} /* end of list */ }; /************************************************************************** * * Dummy PHY/MAC operations * * Can be used for some unimplemented operations * Needed so all function pointers are valid and do not have to be tested * before use * **************************************************************************/ int efx_port_dummy_op_int(struct efx_nic *efx) { return 0; } void efx_port_dummy_op_void(struct efx_nic *efx) {} static bool efx_port_dummy_op_poll(struct efx_nic *efx) { return false; } static const struct efx_phy_operations efx_dummy_phy_operations = { .init = efx_port_dummy_op_int, .reconfigure = efx_port_dummy_op_int, .poll = efx_port_dummy_op_poll, .fini = efx_port_dummy_op_void, }; /************************************************************************** * * Data housekeeping * **************************************************************************/ /* This zeroes out and then fills in the invariants in a struct * efx_nic (including all sub-structures). */ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, struct pci_dev *pci_dev, struct net_device *net_dev) { int i; /* Initialise common structures */ memset(efx, 0, sizeof(*efx)); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); #endif INIT_WORK(&efx->reset_work, efx_reset_work); INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); efx->pci_dev = pci_dev; efx->msg_enable = debug; efx->state = STATE_INIT; strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; spin_lock_init(&efx->stats_lock); mutex_init(&efx->mac_lock); efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; INIT_WORK(&efx->mac_work, efx_mac_work); init_waitqueue_head(&efx->flush_wq); for (i = 0; i < EFX_MAX_CHANNELS; i++) { efx->channel[i] = efx_alloc_channel(efx, i, NULL); if (!efx->channel[i]) goto fail; } efx->type = type; EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); /* Would be good to use the net_dev name, but we're too early */ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", pci_name(pci_dev)); efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); if (!efx->workqueue) goto fail; return 0; fail: efx_fini_struct(efx); return -ENOMEM; } static void efx_fini_struct(struct efx_nic *efx) { int i; for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); if (efx->workqueue) { destroy_workqueue(efx->workqueue); efx->workqueue = NULL; } } /************************************************************************** * * PCI interface * **************************************************************************/ /* Main body of final NIC shutdown code * This is called only at module unload (or hotplug removal). */ static void efx_pci_remove_main(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; #endif efx_stop_interrupts(efx, false); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); efx_fini_napi(efx); efx_remove_all(efx); } /* Final NIC shutdown * This is called only at module unload (or hotplug removal). */ static void efx_pci_remove(struct pci_dev *pci_dev) { struct efx_nic *efx; efx = pci_get_drvdata(pci_dev); if (!efx) return; /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); efx->state = STATE_FINI; dev_close(efx->net_dev); /* Allow any queued efx_resets() to complete */ rtnl_unlock(); efx_stop_interrupts(efx, false); efx_sriov_fini(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); /* Wait for any scheduled resets to complete. No more will be * scheduled from this point because efx_stop_all() has been * called, we are no longer registered with driverlink, and * the net_device's have been removed. */ cancel_work_sync(&efx->reset_work); efx_pci_remove_main(efx); efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); pci_set_drvdata(pci_dev, NULL); efx_fini_struct(efx); free_netdev(efx->net_dev); }; /* NIC VPD information * Called during probe to display the part number of the * installed NIC. VPD is potentially very large but this should * always appear within the first 512 bytes. */ #define SFC_VPD_LEN 512 static void efx_print_product_vpd(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; char vpd_data[SFC_VPD_LEN]; ssize_t vpd_size; int i, j; /* Get the vpd data from the device */ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); if (vpd_size <= 0) { netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); return; } /* Get the Read only section */ i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); if (i < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; } j = pci_vpd_lrdt_size(&vpd_data[i]); i += PCI_VPD_LRDT_TAG_SIZE; if (i + j > vpd_size) j = vpd_size - i; /* Get the Part number */ i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); if (i < 0) { netif_err(efx, drv, efx->net_dev, "Part number not found\n"); return; } j = pci_vpd_info_field_size(&vpd_data[i]); i += PCI_VPD_INFO_FLD_HDR_SIZE; if (i + j > vpd_size) { netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); return; } netif_info(efx, drv, efx->net_dev, "Part Number : %.*s\n", j, &vpd_data[i]); } /* Main body of NIC initialisation * This is called at module load (or hotplug insertion, theoretically). */ static int efx_pci_probe_main(struct efx_nic *efx) { int rc; /* Do start-of-day initialisation */ rc = efx_probe_all(efx); if (rc) goto fail1; efx_init_napi(efx); rc = efx->type->init(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise NIC\n"); goto fail3; } rc = efx_init_port(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise port\n"); goto fail4; } rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; efx_start_interrupts(efx, false); return 0; fail5: efx_fini_port(efx); fail4: efx->type->fini(efx); fail3: efx_fini_napi(efx); efx_remove_all(efx); fail1: return rc; } /* NIC initialisation * * This is called at module load (or hotplug insertion, * theoretically). It sets up PCI mappings, resets the NIC, * sets up and registers the network devices with the kernel and hooks * the interrupt service routine. It does not prepare the device for * transmission; this is left to the first time one of the network * interfaces is brought up (i.e. efx_net_open). */ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *entry) { const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; struct net_device *net_dev; struct efx_nic *efx; int rc; /* Allocate and initialise a struct net_device and struct efx_nic */ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); if (type->offload_features & NETIF_F_V6_CSUM) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); /* All offloads can be toggled */ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; efx = netdev_priv(net_dev); pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); rc = efx_init_struct(efx, type, pci_dev, net_dev); if (rc) goto fail1; netif_info(efx, probe, efx->net_dev, "Solarflare NIC detected\n"); efx_print_product_vpd(efx); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); if (rc) goto fail2; rc = efx_pci_probe_main(efx); /* Serialise against efx_reset(). No more resets will be * scheduled since efx_stop_all() has been called, and we have * not and never have been registered. */ cancel_work_sync(&efx->reset_work); if (rc) goto fail3; /* If there was a scheduled reset during probe, the NIC is * probably hosed anyway. */ if (efx->reset_pending) { rc = -EIO; goto fail4; } /* Switch to the running state before we expose the device to the OS, * so that dev_open()|efx_start_all() will actually start the device */ efx->state = STATE_RUNNING; rc = efx_register_netdev(efx); if (rc) goto fail4; rc = efx_sriov_init(efx); if (rc) netif_err(efx, probe, efx->net_dev, "SR-IOV can't be enabled rc %d\n", rc); netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); /* Try to create MTDs, but allow this to fail */ rtnl_lock(); rc = efx_mtd_probe(efx); rtnl_unlock(); if (rc) netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); return 0; fail4: efx_pci_remove_main(efx); fail3: efx_fini_io(efx); fail2: efx_fini_struct(efx); fail1: WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); return rc; } static int efx_pm_freeze(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); efx->state = STATE_FINI; netif_device_detach(efx->net_dev); efx_stop_all(efx); efx_stop_interrupts(efx, false); return 0; } static int efx_pm_thaw(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); efx->state = STATE_INIT; efx_start_interrupts(efx, false); mutex_lock(&efx->mac_lock); efx->phy_op->reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); netif_device_attach(efx->net_dev); efx->state = STATE_RUNNING; efx->type->resume_wol(efx); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ queue_work(reset_workqueue, &efx->reset_work); return 0; } static int efx_pm_poweroff(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct efx_nic *efx = pci_get_drvdata(pci_dev); efx->type->fini(efx); efx->reset_pending = 0; pci_save_state(pci_dev); return pci_set_power_state(pci_dev, PCI_D3hot); } /* Used for both resume and restore */ static int efx_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct efx_nic *efx = pci_get_drvdata(pci_dev); int rc; rc = pci_set_power_state(pci_dev, PCI_D0); if (rc) return rc; pci_restore_state(pci_dev); rc = pci_enable_device(pci_dev); if (rc) return rc; pci_set_master(efx->pci_dev); rc = efx->type->reset(efx, RESET_TYPE_ALL); if (rc) return rc; rc = efx->type->init(efx); if (rc) return rc; efx_pm_thaw(dev); return 0; } static int efx_pm_suspend(struct device *dev) { int rc; efx_pm_freeze(dev); rc = efx_pm_poweroff(dev); if (rc) efx_pm_resume(dev); return rc; } static const struct dev_pm_ops efx_pm_ops = { .suspend = efx_pm_suspend, .resume = efx_pm_resume, .freeze = efx_pm_freeze, .thaw = efx_pm_thaw, .poweroff = efx_pm_poweroff, .restore = efx_pm_resume, }; static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, }; /************************************************************************** * * Kernel module interface * *************************************************************************/ module_param(interrupt_mode, uint, 0444); MODULE_PARM_DESC(interrupt_mode, "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); static int __init efx_init_module(void) { int rc; printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); rc = register_netdevice_notifier(&efx_netdev_notifier); if (rc) goto err_notifier; rc = efx_init_sriov(); if (rc) goto err_sriov; reset_workqueue = create_singlethread_workqueue("sfc_reset"); if (!reset_workqueue) { rc = -ENOMEM; goto err_reset; } rc = pci_register_driver(&efx_pci_driver); if (rc < 0) goto err_pci; return 0; err_pci: destroy_workqueue(reset_workqueue); err_reset: efx_fini_sriov(); err_sriov: unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; } static void __exit efx_exit_module(void) { printk(KERN_INFO "Solarflare NET driver unloading\n"); pci_unregister_driver(&efx_pci_driver); destroy_workqueue(reset_workqueue); efx_fini_sriov(); unregister_netdevice_notifier(&efx_netdev_notifier); } module_init(efx_init_module); module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown <mbrown@fensystems.co.uk>"); MODULE_DESCRIPTION("Solarflare Communications network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table);
gpl-2.0
SciAps/android-dm3730-kernel
drivers/uwb/rsv.c
3980
27535
/* * UWB reservation management. * * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/uwb.h> #include <linux/slab.h> #include <linux/random.h> #include "uwb-internal.h" static void uwb_rsv_timer(unsigned long arg); static const char *rsv_states[] = { [UWB_RSV_STATE_NONE] = "none ", [UWB_RSV_STATE_O_INITIATED] = "o initiated ", [UWB_RSV_STATE_O_PENDING] = "o pending ", [UWB_RSV_STATE_O_MODIFIED] = "o modified ", [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", [UWB_RSV_STATE_T_PENDING] = "t pending ", [UWB_RSV_STATE_T_DENIED] = "t denied ", [UWB_RSV_STATE_T_RESIZED] = "t resized ", [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", }; static const char *rsv_types[] = { [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", [UWB_DRP_TYPE_HARD] = "hard", [UWB_DRP_TYPE_SOFT] = "soft", [UWB_DRP_TYPE_PRIVATE] = "private", [UWB_DRP_TYPE_PCA] = "pca", }; bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) { static const bool has_two_drp_ies[] = { [UWB_RSV_STATE_O_INITIATED] = false, [UWB_RSV_STATE_O_PENDING] = false, [UWB_RSV_STATE_O_MODIFIED] = false, [UWB_RSV_STATE_O_ESTABLISHED] = false, [UWB_RSV_STATE_O_TO_BE_MOVED] = false, [UWB_RSV_STATE_O_MOVE_COMBINING] = false, [UWB_RSV_STATE_O_MOVE_REDUCING] = false, [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, [UWB_RSV_STATE_T_ACCEPTED] = false, [UWB_RSV_STATE_T_CONFLICT] = false, [UWB_RSV_STATE_T_PENDING] = false, [UWB_RSV_STATE_T_DENIED] = false, [UWB_RSV_STATE_T_RESIZED] = false, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, }; return has_two_drp_ies[rsv->state]; } /** * uwb_rsv_state_str - return a string for a reservation state * @state: the reservation state. */ const char *uwb_rsv_state_str(enum uwb_rsv_state state) { if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) return "unknown"; return rsv_states[state]; } EXPORT_SYMBOL_GPL(uwb_rsv_state_str); /** * uwb_rsv_type_str - return a string for a reservation type * @type: the reservation type */ const char *uwb_rsv_type_str(enum uwb_drp_type type) { if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) return "invalid"; return rsv_types[type]; } EXPORT_SYMBOL_GPL(uwb_rsv_type_str); void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) { struct device *dev = &rsv->rc->uwb_dev.dev; struct uwb_dev_addr devaddr; char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); if (rsv->target.type == UWB_RSV_TARGET_DEV) devaddr = rsv->target.dev->dev_addr; else devaddr = rsv->target.devaddr; uwb_dev_addr_print(target, sizeof(target), &devaddr); dev_dbg(dev, "rsv %s %s -> %s: %s\n", text, owner, target, uwb_rsv_state_str(rsv->state)); } static void uwb_rsv_release(struct kref *kref) { struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); kfree(rsv); } void uwb_rsv_get(struct uwb_rsv *rsv) { kref_get(&rsv->kref); } void uwb_rsv_put(struct uwb_rsv *rsv) { kref_put(&rsv->kref, uwb_rsv_release); } /* * Get a free stream index for a reservation. * * If the target is a DevAddr (e.g., a WUSB cluster reservation) then * the stream is allocated from a pool of per-RC stream indexes, * otherwise a unique stream index for the target is selected. */ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; int stream; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return -EINVAL; } stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); if (stream >= UWB_NUM_STREAMS) return -EBUSY; rsv->stream = stream; set_bit(stream, streams_bm); dev_dbg(dev, "get stream %d\n", rsv->stream); return 0; } static void uwb_rsv_put_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return; } clear_bit(rsv->stream, streams_bm); dev_dbg(dev, "put stream %d\n", rsv->stream); } void uwb_rsv_backoff_win_timer(unsigned long arg) { struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); struct device *dev = &rc->uwb_dev.dev; bow->can_reserve_extra_mases = true; if (bow->total_expired <= 4) { bow->total_expired++; } else { /* after 4 backoff window has expired we can exit from * the backoff procedure */ bow->total_expired = 0; bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; } dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); /* try to relocate all the "to be moved" relocations */ uwb_rsv_handle_drp_avail_change(rc); } void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; unsigned timeout_us; dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); bow->can_reserve_extra_mases = false; if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) return; bow->window <<= 1; bow->n = random32() & (bow->window - 1); dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); /* reset the timer associated variables */ timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; bow->total_expired = 0; mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); } static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) { int sframes = UWB_MAX_LOST_BEACONS; /* * Multicast reservations can become established within 1 * super frame and should not be terminated if no response is * received. */ if (rsv->is_multicast) { if (rsv->state == UWB_RSV_STATE_O_INITIATED || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) sframes = 1; if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) sframes = 0; } if (sframes > 0) { /* * Add an additional 2 superframes to account for the * time to send the SET DRP IE command. */ unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); } else del_timer(&rsv->timer); } /* * Update a reservations state, and schedule an update of the * transmitted DRP IEs. */ static void uwb_rsv_state_update(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { rsv->state = new_state; rsv->ie_valid = false; uwb_rsv_dump("SU", rsv); uwb_rsv_stroke_timer(rsv); uwb_rsv_sched_update(rsv->rc); } static void uwb_rsv_callback(struct uwb_rsv *rsv) { if (rsv->callback) rsv->callback(rsv); } void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { struct uwb_rsv_move *mv = &rsv->mv; if (rsv->state == new_state) { switch (rsv->state) { case UWB_RSV_STATE_O_ESTABLISHED: case UWB_RSV_STATE_O_MOVE_EXPANDING: case UWB_RSV_STATE_O_MOVE_COMBINING: case UWB_RSV_STATE_O_MOVE_REDUCING: case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: case UWB_RSV_STATE_NONE: uwb_rsv_stroke_timer(rsv); break; default: /* Expecting a state transition so leave timer as-is. */ break; } return; } uwb_rsv_dump("SC", rsv); switch (new_state) { case UWB_RSV_STATE_NONE: uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_INITIATED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); break; case UWB_RSV_STATE_O_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); break; case UWB_RSV_STATE_O_MODIFIED: /* in the companion there are the MASes to drop */ bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->state == UWB_RSV_STATE_O_MODIFIED || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { uwb_drp_avail_release(rsv->rc, &mv->companion_mas); rsv->needs_release_companion_mas = false; } uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_MOVE_EXPANDING: rsv->needs_release_companion_mas = true; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); break; case UWB_RSV_STATE_O_MOVE_COMBINING: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); rsv->mas.safe += mv->companion_mas.safe; rsv->mas.unsafe += mv->companion_mas.unsafe; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); break; case UWB_RSV_STATE_O_MOVE_REDUCING: bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); rsv->needs_release_companion_mas = true; rsv->mas.safe = mv->final_mas.safe; rsv->mas.unsafe = mv->final_mas.unsafe; bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); break; case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_T_DENIED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); break; case UWB_RSV_STATE_T_CONFLICT: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); break; case UWB_RSV_STATE_T_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: rsv->needs_release_companion_mas = true; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); break; default: dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", uwb_rsv_state_str(new_state), new_state); } } static void uwb_rsv_handle_timeout_work(struct work_struct *work) { struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, handle_timeout_work); struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); uwb_rsv_dump("TO", rsv); switch (rsv->state) { case UWB_RSV_STATE_O_INITIATED: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_EXPANDING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_COMBINING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_REDUCING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->is_multicast) goto unlock; break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: /* * The time out could be for the main or of the * companion DRP, assume it's for the companion and * drop that first. A further time out is required to * drop the main. */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); goto unlock; default: break; } uwb_rsv_remove(rsv); unlock: mutex_unlock(&rc->rsvs_mutex); } static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) { struct uwb_rsv *rsv; rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); if (!rsv) return NULL; INIT_LIST_HEAD(&rsv->rc_node); INIT_LIST_HEAD(&rsv->pal_node); kref_init(&rsv->kref); init_timer(&rsv->timer); rsv->timer.function = uwb_rsv_timer; rsv->timer.data = (unsigned long)rsv; rsv->rc = rc; INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); return rsv; } /** * uwb_rsv_create - allocate and initialize a UWB reservation structure * @rc: the radio controller * @cb: callback to use when the reservation completes or terminates * @pal_priv: data private to the PAL to be passed in the callback * * The callback is called when the state of the reservation changes from: * * - pending to accepted * - pending to denined * - accepted to terminated * - pending to terminated */ struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) { struct uwb_rsv *rsv; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->callback = cb; rsv->pal_priv = pal_priv; return rsv; } EXPORT_SYMBOL_GPL(uwb_rsv_create); void uwb_rsv_remove(struct uwb_rsv *rsv) { uwb_rsv_dump("RM", rsv); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); if (rsv->needs_release_companion_mas) uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); uwb_drp_avail_release(rsv->rc, &rsv->mas); if (uwb_rsv_is_owner(rsv)) uwb_rsv_put_stream(rsv); uwb_dev_put(rsv->owner); if (rsv->target.type == UWB_RSV_TARGET_DEV) uwb_dev_put(rsv->target.dev); list_del_init(&rsv->rc_node); uwb_rsv_put(rsv); } /** * uwb_rsv_destroy - free a UWB reservation structure * @rsv: the reservation to free * * The reservation must already be terminated. */ void uwb_rsv_destroy(struct uwb_rsv *rsv) { uwb_rsv_put(rsv); } EXPORT_SYMBOL_GPL(uwb_rsv_destroy); /** * usb_rsv_establish - start a reservation establishment * @rsv: the reservation * * The PAL should fill in @rsv's owner, target, type, max_mas, * min_mas, max_interval and is_multicast fields. If the target is a * uwb_dev it must be referenced. * * The reservation's callback will be called when the reservation is * accepted, denied or times out. */ int uwb_rsv_establish(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct uwb_mas_bm available; int ret; mutex_lock(&rc->rsvs_mutex); ret = uwb_rsv_get_stream(rsv); if (ret) goto out; rsv->tiebreaker = random32() & 1; /* get available mas bitmap */ uwb_drp_available(rc, &available); ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); if (ret == UWB_RSV_ALLOC_NOT_FOUND) { ret = -EBUSY; uwb_rsv_put_stream(rsv); goto out; } ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); if (ret != 0) { uwb_rsv_put_stream(rsv); goto out; } uwb_rsv_get(rsv); list_add_tail(&rsv->rc_node, &rc->reservations); rsv->owner = &rc->uwb_dev; uwb_dev_get(rsv->owner); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); out: mutex_unlock(&rc->rsvs_mutex); return ret; } EXPORT_SYMBOL_GPL(uwb_rsv_establish); /** * uwb_rsv_modify - modify an already established reservation * @rsv: the reservation to modify * @max_mas: new maximum MAS to reserve * @min_mas: new minimum MAS to reserve * @max_interval: new max_interval to use * * FIXME: implement this once there are PALs that use it. */ int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) { return -ENOSYS; } EXPORT_SYMBOL_GPL(uwb_rsv_modify); /* * move an already established reservation (rc->rsvs_mutex must to be * taken when tis function is called) */ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) { struct uwb_rc *rc = rsv->rc; struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; struct uwb_rsv_move *mv; int ret = 0; if (bow->can_reserve_extra_mases == false) return -EBUSY; mv = &rsv->mv; if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { /* We want to move the reservation */ bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); } } else { dev_dbg(dev, "new allocation not found\n"); } return ret; } /* It will try to move every reservation in state O_ESTABLISHED giving * to the MAS allocator algorithm an availability that is the real one * plus the allocation already established from the reservation. */ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct uwb_rsv *rsv; struct uwb_mas_bm mas; if (bow->can_reserve_extra_mases == false) return; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { uwb_drp_available(rc, &mas); bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_rsv_try_move(rsv, &mas); } } } /** * uwb_rsv_terminate - terminate an established reservation * @rsv: the reservation to terminate * * A reservation is terminated by removing the DRP IE from the beacon, * the other end will consider the reservation to be terminated when * it does not see the DRP IE for at least mMaxLostBeacons. * * If applicable, the reference to the target uwb_dev will be released. */ void uwb_rsv_terminate(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); mutex_unlock(&rc->rsvs_mutex); } EXPORT_SYMBOL_GPL(uwb_rsv_terminate); /** * uwb_rsv_accept - accept a new reservation from a peer * @rsv: the reservation * @cb: call back for reservation changes * @pal_priv: data to be passed in the above call back * * Reservation requests from peers are denied unless a PAL accepts it * by calling this function. * * The PAL call uwb_rsv_destroy() for all accepted reservations before * calling uwb_pal_unregister(). */ void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) { uwb_rsv_get(rsv); rsv->callback = cb; rsv->pal_priv = pal_priv; rsv->state = UWB_RSV_STATE_T_ACCEPTED; } EXPORT_SYMBOL_GPL(uwb_rsv_accept); /* * Is a received DRP IE for this reservation? */ static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_dev_addr *rsv_src; int stream; stream = uwb_ie_drp_stream_index(drp_ie); if (rsv->stream != stream) return false; switch (rsv->target.type) { case UWB_RSV_TARGET_DEVADDR: return rsv->stream == stream; case UWB_RSV_TARGET_DEV: if (uwb_ie_drp_owner(drp_ie)) rsv_src = &rsv->owner->dev_addr; else rsv_src = &rsv->target.dev->dev_addr; return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; } return false; } static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; struct uwb_pal *pal; enum uwb_rsv_state state; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->rc = rc; rsv->owner = src; uwb_dev_get(rsv->owner); rsv->target.type = UWB_RSV_TARGET_DEV; rsv->target.dev = &rc->uwb_dev; uwb_dev_get(&rc->uwb_dev); rsv->type = uwb_ie_drp_type(drp_ie); rsv->stream = uwb_ie_drp_stream_index(drp_ie); uwb_drp_ie_to_bm(&rsv->mas, drp_ie); /* * See if any PALs are interested in this reservation. If not, * deny the request. */ rsv->state = UWB_RSV_STATE_T_DENIED; mutex_lock(&rc->uwb_dev.mutex); list_for_each_entry(pal, &rc->pals, node) { if (pal->new_rsv) pal->new_rsv(pal, rsv); if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) break; } mutex_unlock(&rc->uwb_dev.mutex); list_add_tail(&rsv->rc_node, &rc->reservations); state = rsv->state; rsv->state = UWB_RSV_STATE_NONE; /* FIXME: do something sensible here */ if (state == UWB_RSV_STATE_T_ACCEPTED && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { /* FIXME: do something sensible here */ } else { uwb_rsv_set_state(rsv, state); } return rsv; } /** * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations * @rsv: the reservation. * @mas: returns the available MAS. * * The usable MAS of a reservation may be less than the negotiated MAS * if alien BPs are present. */ void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) { bitmap_zero(mas->bm, UWB_NUM_MAS); bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); } EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); /** * uwb_rsv_find - find a reservation for a received DRP IE. * @rc: the radio controller * @src: source of the DRP IE * @drp_ie: the DRP IE * * If the reservation cannot be found and the DRP IE is from a peer * attempting to establish a new reservation, create a new reservation * and add it to the list. */ struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (uwb_rsv_match(rsv, src, drp_ie)) return rsv; } if (uwb_ie_drp_owner(drp_ie)) return uwb_rsv_new_target(rc, src, drp_ie); return NULL; } /* * Go through all the reservations and check for timeouts and (if * necessary) update their DRP IEs. * * FIXME: look at building the SET_DRP_IE command here rather than * having to rescan the list in uwb_rc_send_all_drp_ie(). */ static bool uwb_rsv_update_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; bool ie_updated = false; list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (!rsv->ie_valid) { uwb_drp_ie_update(rsv); ie_updated = true; } } return ie_updated; } void uwb_rsv_queue_update(struct uwb_rc *rc) { unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); } /** * uwb_rsv_sched_update - schedule an update of the DRP IEs * @rc: the radio controller. * * To improve performance and ensure correctness with [ECMA-368] the * number of SET-DRP-IE commands that are done are limited. * * DRP IEs update come from two sources: DRP events from the hardware * which all occur at the beginning of the superframe ('syncronous' * events) and reservation establishment/termination requests from * PALs or timers ('asynchronous' events). * * A delayed work ensures that all the synchronous events result in * one SET-DRP-IE command. * * Additional logic (the set_drp_ie_pending and rsv_updated_postponed * flags) will prevent an asynchrous event starting a SET-DRP-IE * command if one is currently awaiting a response. * * FIXME: this does leave a window where an asynchrous event can delay * the SET-DRP-IE for a synchronous event by one superframe. */ void uwb_rsv_sched_update(struct uwb_rc *rc) { spin_lock_bh(&rc->rsvs_lock); if (!delayed_work_pending(&rc->rsv_update_work)) { if (rc->set_drp_ie_pending > 0) { rc->set_drp_ie_pending++; goto unlock; } uwb_rsv_queue_update(rc); } unlock: spin_unlock_bh(&rc->rsvs_lock); } /* * Update DRP IEs and, if necessary, the DRP Availability IE and send * the updated IEs to the radio controller. */ static void uwb_rsv_update_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work.work); bool ie_updated; mutex_lock(&rc->rsvs_mutex); ie_updated = uwb_rsv_update_all(rc); if (!rc->drp_avail.ie_valid) { uwb_drp_avail_ie_update(rc); ie_updated = true; } if (ie_updated && (rc->set_drp_ie_pending == 0)) uwb_rc_send_all_drp_ie(rc); mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_alien_bp_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_alien_bp_work.work); struct uwb_rsv *rsv; mutex_lock(&rc->rsvs_mutex); list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { rsv->callback(rsv); } } mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_timer(unsigned long arg) { struct uwb_rsv *rsv = (struct uwb_rsv *)arg; queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); } /** * uwb_rsv_remove_all - remove all reservations * @rc: the radio controller * * A DRP IE update is not done. */ void uwb_rsv_remove_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); del_timer_sync(&rsv->timer); } /* Cancel any postponed update. */ rc->set_drp_ie_pending = 0; mutex_unlock(&rc->rsvs_mutex); cancel_delayed_work_sync(&rc->rsv_update_work); flush_workqueue(rc->rsv_workq); mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { uwb_rsv_remove(rsv); } mutex_unlock(&rc->rsvs_mutex); } void uwb_rsv_init(struct uwb_rc *rc) { INIT_LIST_HEAD(&rc->reservations); INIT_LIST_HEAD(&rc->cnflt_alien_list); mutex_init(&rc->rsvs_mutex); spin_lock_init(&rc->rsvs_lock); INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); rc->bow.can_reserve_extra_mases = true; rc->bow.total_expired = 0; rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; init_timer(&rc->bow.timer); rc->bow.timer.function = uwb_rsv_backoff_win_timer; rc->bow.timer.data = (unsigned long)&rc->bow; bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); } int uwb_rsv_setup(struct uwb_rc *rc) { char name[16]; snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); rc->rsv_workq = create_singlethread_workqueue(name); if (rc->rsv_workq == NULL) return -ENOMEM; return 0; } void uwb_rsv_cleanup(struct uwb_rc *rc) { uwb_rsv_remove_all(rc); destroy_workqueue(rc->rsv_workq); }
gpl-2.0
lani11/Potsy_Kernel
drivers/isdn/hisax/telespci.c
4236
9147
/* $Id: telespci.c,v 2.23.2.3 2004/01/13 14:31:26 keil Exp $ * * low level stuff for Teles PCI isdn cards * * Author Ton van Rosmalen * Karsten Keil * Copyright by Ton van Rosmalen * by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hscx.h" #include "isdnl1.h" #include <linux/pci.h> static const char *telespci_revision = "$Revision: 2.23.2.3 $"; #define ZORAN_PO_RQ_PEN 0x02000000 #define ZORAN_PO_WR 0x00800000 #define ZORAN_PO_GID0 0x00000000 #define ZORAN_PO_GID1 0x00100000 #define ZORAN_PO_GREG0 0x00000000 #define ZORAN_PO_GREG1 0x00010000 #define ZORAN_PO_DMASK 0xFF #define WRITE_ADDR_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG0) #define READ_DATA_ISAC (ZORAN_PO_GID0 | ZORAN_PO_GREG1) #define WRITE_DATA_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG1) #define WRITE_ADDR_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG0) #define READ_DATA_HSCX (ZORAN_PO_GID1 | ZORAN_PO_GREG1) #define WRITE_DATA_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG1) #define ZORAN_WAIT_NOBUSY do { \ portdata = readl(adr + 0x200); \ } while (portdata & ZORAN_PO_RQ_PEN) static inline u_char readisac(void __iomem *adr, u_char off) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for ISAC */ writel(WRITE_ADDR_ISAC | off, adr + 0x200); ZORAN_WAIT_NOBUSY; /* read data from ISAC */ writel(READ_DATA_ISAC, adr + 0x200); ZORAN_WAIT_NOBUSY; return((u_char)(portdata & ZORAN_PO_DMASK)); } static inline void writeisac(void __iomem *adr, u_char off, u_char data) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for ISAC */ writel(WRITE_ADDR_ISAC | off, adr + 0x200); ZORAN_WAIT_NOBUSY; /* write data to ISAC */ writel(WRITE_DATA_ISAC | data, adr + 0x200); ZORAN_WAIT_NOBUSY; } static inline u_char readhscx(void __iomem *adr, int hscx, u_char off) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for HSCX */ writel(WRITE_ADDR_HSCX | ((hscx ? 0x40:0) + off), adr + 0x200); ZORAN_WAIT_NOBUSY; /* read data from HSCX */ writel(READ_DATA_HSCX, adr + 0x200); ZORAN_WAIT_NOBUSY; return ((u_char)(portdata & ZORAN_PO_DMASK)); } static inline void writehscx(void __iomem *adr, int hscx, u_char off, u_char data) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for HSCX */ writel(WRITE_ADDR_HSCX | ((hscx ? 0x40:0) + off), adr + 0x200); ZORAN_WAIT_NOBUSY; /* write data to HSCX */ writel(WRITE_DATA_HSCX | data, adr + 0x200); ZORAN_WAIT_NOBUSY; } static inline void read_fifo_isac(void __iomem *adr, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* read data from ISAC */ for (i = 0; i < size; i++) { /* set address for ISAC fifo */ writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200); ZORAN_WAIT_NOBUSY; writel(READ_DATA_ISAC, adr + 0x200); ZORAN_WAIT_NOBUSY; data[i] = (u_char)(portdata & ZORAN_PO_DMASK); } } static void write_fifo_isac(void __iomem *adr, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* write data to ISAC */ for (i = 0; i < size; i++) { /* set address for ISAC fifo */ writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200); ZORAN_WAIT_NOBUSY; writel(WRITE_DATA_ISAC | data[i], adr + 0x200); ZORAN_WAIT_NOBUSY; } } static inline void read_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* read data from HSCX */ for (i = 0; i < size; i++) { /* set address for HSCX fifo */ writel(WRITE_ADDR_HSCX |(hscx ? 0x5F:0x1F), adr + 0x200); ZORAN_WAIT_NOBUSY; writel(READ_DATA_HSCX, adr + 0x200); ZORAN_WAIT_NOBUSY; data[i] = (u_char) (portdata & ZORAN_PO_DMASK); } } static inline void write_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size) { unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* write data to HSCX */ for (i = 0; i < size; i++) { /* set address for HSCX fifo */ writel(WRITE_ADDR_HSCX |(hscx ? 0x5F:0x1F), adr + 0x200); ZORAN_WAIT_NOBUSY; writel(WRITE_DATA_HSCX | data[i], adr + 0x200); ZORAN_WAIT_NOBUSY; udelay(10); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readisac(cs->hw.teles0.membase, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writeisac(cs->hw.teles0.membase, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { read_fifo_isac(cs->hw.teles0.membase, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { write_fifo_isac(cs->hw.teles0.membase, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readhscx(cs->hw.teles0.membase, hscx, offset)); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writehscx(cs->hw.teles0.membase, hscx, offset, value); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg) #define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data) #define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #include "hscx_irq.c" static irqreturn_t telespci_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char hval, ival; u_long flags; spin_lock_irqsave(&cs->lock, flags); hval = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA); if (hval) hscx_int_main(cs, hval); ival = readisac(cs->hw.teles0.membase, ISAC_ISTA); if ((hval | ival) == 0) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (ival) isac_interrupt(cs, ival); /* Clear interrupt register for Zoran PCI controller */ writel(0x70000000, cs->hw.teles0.membase + 0x3C); writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0); writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_telespci(struct IsdnCardState *cs) { iounmap(cs->hw.teles0.membase); } static int TelesPCI_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: return(0); case CARD_RELEASE: release_io_telespci(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithscxisac(cs, 3); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } static struct pci_dev *dev_tel __devinitdata = NULL; int __devinit setup_telespci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, telespci_revision); printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELESPCI) return (0); if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { if (pci_enable_device(dev_tel)) return(0); cs->irq = dev_tel->irq; if (!cs->irq) { printk(KERN_WARNING "Teles: No IRQ for PCI card found\n"); return(0); } cs->hw.teles0.membase = ioremap(pci_resource_start(dev_tel, 0), PAGE_SIZE); printk(KERN_INFO "Found: Zoran, base-address: 0x%llx, irq: 0x%x\n", (unsigned long long)pci_resource_start(dev_tel, 0), dev_tel->irq); } else { printk(KERN_WARNING "TelesPCI: No PCI card found\n"); return(0); } /* Initialize Zoran PCI controller */ writel(0x00000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x7BFFFFFF, cs->hw.teles0.membase + 0x2C); writel(0x70000000, cs->hw.teles0.membase + 0x3C); writel(0x61000000, cs->hw.teles0.membase + 0x40); /* writel(0x00800000, cs->hw.teles0.membase + 0x200); */ printk(KERN_INFO "HiSax: Teles PCI config irq:%d mem:%p\n", cs->irq, cs->hw.teles0.membase); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &TelesPCI_card_msg; cs->irq_func = &telespci_interrupt; cs->irq_flags |= IRQF_SHARED; ISACVersion(cs, "TelesPCI:"); if (HscxVersion(cs, "TelesPCI:")) { printk(KERN_WARNING "TelesPCI: wrong HSCX versions check IO/MEM addresses\n"); release_io_telespci(cs); return (0); } return (1); }
gpl-2.0
Tommy-Geenexus/android_kernel_sony_msm8974_togari_5.x
arch/unicore32/kernel/signal.c
4492
15127
/* * linux/arch/unicore32/kernel/signal.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/freezer.h> #include <linux/uaccess.h> #include <linux/tracehook.h> #include <linux/elf.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* * For UniCore syscalls, we encode the syscall number into the instruction. */ #define SWI_SYS_SIGRETURN (0xff000000) /* error number for new abi */ #define SWI_SYS_RT_SIGRETURN (0xff000000 | (__NR_rt_sigreturn)) #define SWI_SYS_RESTART (0xff000000 | (__NR_restart_syscall)) #define KERN_SIGRETURN_CODE (KUSER_VECPAGE_BASE + 0x00000500) #define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) const unsigned long sigreturn_codes[3] = { SWI_SYS_SIGRETURN, SWI_SYS_RT_SIGRETURN, }; const unsigned long syscall_restart_code[2] = { SWI_SYS_RESTART, /* swi __NR_restart_syscall */ 0x69efc004, /* ldr pc, [sp], #4 */ }; /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ struct sigframe { struct ucontext uc; unsigned long retcode[2]; }; struct rt_sigframe { struct siginfo info; struct sigframe sig; }; static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) { sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); } err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00); err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01); err |= __get_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02); err |= __get_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03); err |= __get_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04); err |= __get_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05); err |= __get_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06); err |= __get_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07); err |= __get_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08); err |= __get_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09); err |= __get_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10); err |= __get_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11); err |= __get_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12); err |= __get_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13); err |= __get_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14); err |= __get_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15); err |= __get_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16); err |= __get_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17); err |= __get_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18); err |= __get_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19); err |= __get_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20); err |= __get_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21); err |= __get_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22); err |= __get_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23); err |= __get_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24); err |= __get_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25); err |= __get_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26); err |= __get_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp); err |= __get_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip); err |= __get_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp); err |= __get_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr); err |= __get_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc); err |= __get_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr); err |= !valid_user_regs(regs); return err; } asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->UCreg_sp & 7) goto badframe; frame = (struct rt_sigframe __user *)regs->UCreg_sp; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) goto badframe; if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->UCreg_sp) == -EFAULT) goto badframe; return regs->UCreg_00; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { int err = 0; err |= __put_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00); err |= __put_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01); err |= __put_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02); err |= __put_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03); err |= __put_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04); err |= __put_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05); err |= __put_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06); err |= __put_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07); err |= __put_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08); err |= __put_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09); err |= __put_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10); err |= __put_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11); err |= __put_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12); err |= __put_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13); err |= __put_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14); err |= __put_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15); err |= __put_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16); err |= __put_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17); err |= __put_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18); err |= __put_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19); err |= __put_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20); err |= __put_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21); err |= __put_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22); err |= __put_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23); err |= __put_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24); err |= __put_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25); err |= __put_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26); err |= __put_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp); err |= __put_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip); err |= __put_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp); err |= __put_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr); err |= __put_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc); err |= __put_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr); err |= __put_user(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no); err |= __put_user(current->thread.error_code, &sf->uc.uc_mcontext.error_code); err |= __put_user(current->thread.address, &sf->uc.uc_mcontext.fault_address); err |= __put_user(set->sig[0], &sf->uc.uc_mcontext.oldmask); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); return err; } static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) { unsigned long sp = regs->UCreg_sp; void __user *frame; /* * This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; /* * ATPCS B01 mandates 8-byte alignment */ frame = (void __user *)((sp - framesize) & ~7); /* * Check that we can actually write to the signal frame. */ if (!access_ok(VERIFY_WRITE, frame, framesize)) frame = NULL; return frame; } static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long __user *rc, void __user *frame, int usig) { unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; unsigned long asr = regs->UCreg_asr & ~PSR_f; unsigned int idx = 0; if (ka->sa.sa_flags & SA_SIGINFO) idx += 1; if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; retcode = KERN_SIGRETURN_CODE + (idx << 2); regs->UCreg_00 = usig; regs->UCreg_sp = (unsigned long)frame; regs->UCreg_lr = retcode; regs->UCreg_pc = handler; regs->UCreg_asr = asr; return 0; } static int setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ err |= __put_user(0x5ac3c35a, &frame->uc.uc_flags); err |= setup_sigframe(frame, regs, set); if (err == 0) err |= setup_return(regs, ka, frame->retcode, frame, usig); return err; } static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); stack_t stack; int err = 0; if (!frame) return 1; err |= copy_siginfo_to_user(&frame->info, info); err |= __put_user(0, &frame->sig.uc.uc_flags); err |= __put_user(NULL, &frame->sig.uc.uc_link); memset(&stack, 0, sizeof(stack)); stack.ss_sp = (void __user *)current->sas_ss_sp; stack.ss_flags = sas_ss_flags(regs->UCreg_sp); stack.ss_size = current->sas_ss_size; err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) err |= setup_return(regs, ka, frame->sig.retcode, frame, usig); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. */ regs->UCreg_01 = (unsigned long)&frame->info; regs->UCreg_02 = (unsigned long)&frame->sig.uc; } return err; } static inline void setup_syscall_restart(struct pt_regs *regs) { regs->UCreg_00 = regs->UCreg_ORIG_00; regs->UCreg_pc -= 4; } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, int syscall) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; sigset_t blocked; int usig = sig; int ret; /* * If we were from a system call, check for system call restarting... */ if (syscall) { switch (regs->UCreg_00) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->UCreg_00 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->UCreg_00 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: setup_syscall_restart(regs); } } /* * translate the signal */ if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) usig = thread->exec_domain->signal_invmap[usig]; /* * Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(usig, ka, info, oldset, regs); else ret = setup_frame(usig, ka, oldset, regs); /* * Check that the resulting registers are actually sane. */ ret |= !valid_user_regs(regs); if (ret != 0) { force_sigsegv(sig, tsk); return ret; } /* * Block the signal if we were successful. */ sigorsets(&blocked, &tsk->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, sig); set_current_blocked(&blocked); return 0; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs, int syscall) { struct k_sigaction ka; siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (try_to_freeze()) goto no_signal; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } return; } no_signal: /* * No signal to deliver to the process - restart the syscall. */ if (syscall) { if (regs->UCreg_00 == -ERESTART_RESTARTBLOCK) { u32 __user *usp; regs->UCreg_sp -= 4; usp = (u32 __user *)regs->UCreg_sp; if (put_user(regs->UCreg_pc, usp) == 0) { regs->UCreg_pc = KERN_RESTART_CODE; } else { regs->UCreg_sp += 4; force_sigsegv(0, current); } } if (regs->UCreg_00 == -ERESTARTNOHAND || regs->UCreg_00 == -ERESTARTSYS || regs->UCreg_00 == -ERESTARTNOINTR) { setup_syscall_restart(regs); } /* If there's no signal to deliver, we just put the saved * sigmask back. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) { if (thread_flags & _TIF_SIGPENDING) do_signal(regs, syscall); if (thread_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } } /* * Copy signal return handlers into the vector page, and * set sigreturn to be a pointer to these. */ void __init early_signal_init(void) { memcpy((void *)kuser_vecpage_to_vectors(KERN_SIGRETURN_CODE), sigreturn_codes, sizeof(sigreturn_codes)); memcpy((void *)kuser_vecpage_to_vectors(KERN_RESTART_CODE), syscall_restart_code, sizeof(syscall_restart_code)); /* Need not to flush icache, since early_trap_init will do it last. */ }
gpl-2.0
kennethlyn/parallella-lcd-linux
arch/arm/mach-footbridge/ebsa285-pci.c
4748
1067
/* * linux/arch/arm/mach-footbridge/ebsa285-pci.c * * PCI bios-type initialisation for PCI machines * * Bits taken from various places. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->vendor == PCI_VENDOR_ID_CONTAQ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693) switch (PCI_FUNC(dev->devfn)) { case 1: return 14; case 2: return 15; case 3: return 12; } return irqmap_ebsa285[(slot + pin) & 3]; } static struct hw_pci ebsa285_pci __initdata = { .map_irq = ebsa285_map_irq, .nr_controllers = 1, .ops = &dc21285_ops, .setup = dc21285_setup, .preinit = dc21285_preinit, .postinit = dc21285_postinit, }; static int __init ebsa285_init_pci(void) { if (machine_is_ebsa285()) pci_common_init(&ebsa285_pci); return 0; } subsys_initcall(ebsa285_init_pci);
gpl-2.0
lodr/codeaurora_kernel_msm
arch/arm/mach-ixp4xx/wg302v2-setup.c
5004
2574
/* * arch/arm/mach-ixp4xx/wg302-setup.c * * Board setup for the Netgear WG302 v2 and WAG302 v2 * * Copyright (C) 2007 Imre Kaloz <Kaloz@openwrt.org> * * based on coyote-setup.c: * Copyright (C) 2003-2005 MontaVista Software, Inc. * * Author: Imre Kaloz <kaloz@openwrt.org> * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> static struct flash_platform_data wg302v2_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource wg302v2_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device wg302v2_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &wg302v2_flash_data, }, .num_resources = 1, .resource = &wg302v2_flash_resource, }; static struct resource wg302v2_uart_resource = { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port wg302v2_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device wg302v2_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = wg302v2_uart_data, }, .num_resources = 1, .resource = &wg302v2_uart_resource, }; static struct platform_device *wg302v2_devices[] __initdata = { &wg302v2_flash, &wg302v2_uart, }; static void __init wg302v2_init(void) { ixp4xx_sys_init(); wg302v2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); wg302v2_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1; *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; *IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0; platform_add_devices(wg302v2_devices, ARRAY_SIZE(wg302v2_devices)); } #ifdef CONFIG_MACH_WG302V2 MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2") /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .atag_offset = 0x100, .init_machine = wg302v2_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END #endif
gpl-2.0
DirtyDroidX/kernel_jactive
arch/um/drivers/vde_user.c
5004
2398
/* * Copyright (C) 2007 Luca Bigliardi (shammash@artha.org). * Licensed under the GPL. */ #include <stddef.h> #include <errno.h> #include <libvdeplug.h> #include "net_user.h" #include "um_malloc.h" #include "vde.h" static int vde_user_init(void *data, void *dev) { struct vde_data *pri = data; VDECONN *conn = NULL; int err = -EINVAL; pri->dev = dev; conn = vde_open(pri->vde_switch, pri->descr, pri->args); if (conn == NULL) { err = -errno; printk(UM_KERN_ERR "vde_user_init: vde_open failed, " "errno = %d\n", errno); return err; } printk(UM_KERN_INFO "vde backend - connection opened\n"); pri->conn = conn; return 0; } static int vde_user_open(void *data) { struct vde_data *pri = data; if (pri->conn != NULL) return vde_datafd(pri->conn); printk(UM_KERN_WARNING "vde_open - we have no VDECONN to open"); return -EINVAL; } static void vde_remove(void *data) { struct vde_data *pri = data; if (pri->conn != NULL) { printk(UM_KERN_INFO "vde backend - closing connection\n"); vde_close(pri->conn); pri->conn = NULL; kfree(pri->args); pri->args = NULL; return; } printk(UM_KERN_WARNING "vde_remove - we have no VDECONN to remove"); } const struct net_user_info vde_user_info = { .init = vde_user_init, .open = vde_user_open, .close = NULL, .remove = vde_remove, .add_address = NULL, .delete_address = NULL, .mtu = ETH_MAX_PACKET, .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, }; void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init) { struct vde_open_args *args; vpri->args = uml_kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL); if (vpri->args == NULL) { printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args " "allocation failed"); return; } args = vpri->args; args->port = init->port; args->group = init->group; args->mode = init->mode ? init->mode : 0700; args->port ? printk("port %d", args->port) : printk("undefined port"); } int vde_user_read(void *conn, void *buf, int len) { VDECONN *vconn = conn; int rv; if (vconn == NULL) return 0; rv = vde_recv(vconn, buf, len, 0); if (rv < 0) { if (errno == EAGAIN) return 0; return -errno; } else if (rv == 0) return -ENOTCONN; return rv; } int vde_user_write(void *conn, void *buf, int len) { VDECONN *vconn = conn; if (vconn == NULL) return 0; return vde_send(vconn, buf, len, 0); }
gpl-2.0
Sinsoftomorrow/android_kernel_lge_g3
drivers/gpu/drm/radeon/r600_blit_kms.c
5260
24257
/* * Copyright 2009 Advanced Micro Devices, Inc. * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon.h" #include "r600d.h" #include "r600_blit_shaders.h" #include "radeon_blit_common.h" /* emits 21 on rv770+, 23 on r600 */ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; h = ALIGN(h, 8); if (h < 8) h = 8; cb_color_info = CB_FORMAT(format) | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); radeon_ring_write(ring, 2 << 0); } radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (pitch << 0) | (slice << 10)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, cb_color_info); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); } /* emits 5dw */ static void cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, sync_type); radeon_ring_write(ring, cp_coher_size); radeon_ring_write(ring, mc_addr >> 8); radeon_ring_write(ring, 10); /* poll interval */ } /* emits 21dw + 1 surface sync = 26dw */ static void set_shaders(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; u32 sq_pgm_resources; /* setup shader regs */ sq_pgm_resources = (1 << 0); /* VS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); /* PS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources | (1 << 28)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 2); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } /* emits 9 + 1 sync (5) = 14*/ static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | SQ_VTXC_STRIDE(16); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); #endif radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0x460); radeon_ring_write(ring, gpu_addr & 0xffffffff); radeon_ring_write(ring, 48 - 1); radeon_ring_write(ring, sq_vtx_constant_word2); radeon_ring_write(ring, 1 << 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30); if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(rdev, PACKET3_VC_ACTION_ENA, 48, gpu_addr); } /* emits 9 */ static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; if (h < 1) h = 1; sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) | S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) | S_038000_TEX_WIDTH(w - 1); sq_tex_resource_word1 = S_038004_DATA_FORMAT(format); sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1); sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) | S_038010_DST_SEL_X(SQ_SEL_X) | S_038010_DST_SEL_Y(SQ_SEL_Y) | S_038010_DST_SEL_Z(SQ_SEL_Z) | S_038010_DST_SEL_W(SQ_SEL_W); cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, size, gpu_addr); radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_tex_resource_word0); radeon_ring_write(ring, sq_tex_resource_word1); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, sq_tex_resource_word4); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30); } /* emits 12 */ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); } /* emits 10 */ static void draw_auto(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, DI_PT_RECTLIST); radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 2) | #endif DI_INDEX_SIZE_16_BIT); radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(ring, 1); radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); radeon_ring_write(ring, 3); radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); } /* emits 14 */ static void set_default_state(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; u64 gpu_addr; int dwords; switch (rdev->family) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) sq_config = 0; else sq_config = VC_ENABLE; sq_config |= (DX9_CONSTS | ALU_INST_PREFER_VECTOR | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2) | ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | NUM_VS_GPRS(num_vs_gprs) | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | NUM_VS_THREADS(num_vs_threads) | NUM_GS_THREADS(num_gs_threads) | NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(ring, dwords); /* SQ config */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6)); radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_config); radeon_ring_write(ring, sq_gpr_resource_mgmt_1); radeon_ring_write(ring, sq_gpr_resource_mgmt_2); radeon_ring_write(ring, sq_thread_resource_mgmt); radeon_ring_write(ring, sq_stack_resource_mgmt_1); radeon_ring_write(ring, sq_stack_resource_mgmt_2); } #define I2F_MAX_BITS 15 #define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1) #define I2F_SHIFT (24 - I2F_MAX_BITS) /* * Converts unsigned integer into 32-bit IEEE floating point representation. * Conversion is not universal and only works for the range from 0 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary, * I2F_MAX_BITS can be increased, but that will add to the loop iterations * and slow us down. Conversion is done by shifting the input and counting * down until the first 1 reaches bit position 23. The resulting counter * and the shifted input are, respectively, the exponent and the fraction. * The sign is always zero. */ static uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; WARN_ON_ONCE(input > I2F_MAX_INPUT); if ((input & I2F_MAX_INPUT) == 0) result = 0; else { exponent = 126 + I2F_MAX_BITS; fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT; for (i = 0; i < I2F_MAX_BITS; i++) { if (fraction & 0x800000) break; else { fraction = fraction << 1; exponent = exponent - 1; } } result = exponent << 23 | (fraction & 0x7fffff); } return result; } int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; rdev->r600_blit.primitives.set_render_target = set_render_target; rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; rdev->r600_blit.primitives.set_shaders = set_shaders; rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; rdev->r600_blit.primitives.set_scissors = set_scissors; rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 76; /* set_render_target emits 2 extra dwords on rv6xx */ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) rdev->r600_blit.ring_size_per_loop += 2; rdev->r600_blit.max_dim = 8192; /* pin copy shader into vram if already initialized */ if (rdev->r600_blit.shader_obj) goto done; mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) rdev->r600_blit.state_len = r7xx_default_size; else rdev->r600_blit.state_len = r6xx_default_size; dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } obj_size = dwords * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; obj_size += r6xx_vs_size * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.ps_offset = obj_size; obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); return r; } DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } if (rdev->family >= CHIP_RV770) memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len * 4); else memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); for (i = 0; i < r6xx_vs_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); done: r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_gpu_addr); radeon_bo_unreserve(rdev->r600_blit.shader_obj); if (r) { dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } void r600_blit_fini(struct radeon_device *rdev) { int r; radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy * it when it becomes idle. */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (!r) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } radeon_bo_unref(&rdev->r600_blit.shader_obj); } static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size) { int r; r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->r600_blit.vb_ib, size); if (r) { DRM_ERROR("failed to get IB for vertex buffer\n"); return r; } rdev->r600_blit.vb_total = size; rdev->r600_blit.vb_used = 0; return 0; } static void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); } static unsigned r600_blit_create_rect(unsigned num_gpu_pages, int *width, int *height, int max_dim) { unsigned max_pages; unsigned pages = num_gpu_pages; int w, h; if (num_gpu_pages == 0) { /* not supposed to be called with no pages, but just in case */ h = 0; w = 0; pages = 0; WARN_ON(1); } else { int rect_order = 2; h = RECT_UNIT_H; while (num_gpu_pages / rect_order) { h *= 2; rect_order *= 4; if (h >= max_dim) { h = max_dim; break; } } max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H); if (pages > max_pages) pages = max_pages; w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h; w = (w / RECT_UNIT_W) * RECT_UNIT_W; pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H); BUG_ON(pages == 0); } DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages); /* return width and height only of the caller wants it */ if (height) *height = h; if (width) *width = w; return pages; } int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; int ring_size; int num_loops = 0; int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; /* num loops */ while (num_gpu_pages) { num_gpu_pages -= r600_blit_create_rect(num_gpu_pages, NULL, NULL, rdev->r600_blit.max_dim); num_loops++; } /* 48 bytes for vertex per loop */ r = r600_vb_ib_get(rdev, (num_loops*48)+256); if (r) return r; /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; ring_size += rdev->r600_blit.ring_size_common; r = radeon_ring_lock(rdev, ring, ring_size); if (r) return r; rdev->r600_blit.primitives.set_default_state(rdev); rdev->r600_blit.primitives.set_shaders(rdev); return 0; } void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) { int r; if (rdev->r600_blit.vb_ib) r600_vb_ib_put(rdev); if (fence) r = radeon_fence_emit(rdev, fence); radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); } void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, unsigned num_gpu_pages) { u64 vb_gpu_addr; u32 *vb; DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, num_gpu_pages, rdev->r600_blit.vb_used); vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); while (num_gpu_pages) { int w, h; unsigned size_in_bytes; unsigned pages_per_loop = r600_blit_create_rect(num_gpu_pages, &w, &h, rdev->r600_blit.max_dim); size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; DRM_DEBUG("rectangle w=%d h=%d\n", w, h); if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); } vb[0] = 0; vb[1] = 0; vb[2] = 0; vb[3] = 0; vb[4] = 0; vb[5] = i2f(h); vb[6] = 0; vb[7] = i2f(h); vb[8] = i2f(w); vb[9] = i2f(h); vb[10] = i2f(w); vb[11] = i2f(h); rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, w, h, w, src_gpu_addr, size_in_bytes); rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, w, h, dst_gpu_addr); rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); rdev->r600_blit.primitives.draw_auto(rdev); rdev->r600_blit.primitives.cp_set_surface_sync(rdev, PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, size_in_bytes, dst_gpu_addr); vb += 12; rdev->r600_blit.vb_used += 4*12; src_gpu_addr += size_in_bytes; dst_gpu_addr += size_in_bytes; num_gpu_pages -= pages_per_loop; } }
gpl-2.0
tudorsirb/lge_kernel_p700
drivers/gpu/drm/nouveau/nvc0_pm.c
5260
9847
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_bios.h" #include "nouveau_pm.h" static u32 read_div(struct drm_device *, int, u32, u32); static u32 read_pll(struct drm_device *, u32); static u32 read_vco(struct drm_device *dev, u32 dsrc) { u32 ssrc = nv_rd32(dev, dsrc); if (!(ssrc & 0x00000100)) return read_pll(dev, 0x00e800); return read_pll(dev, 0x00e820); } static u32 read_pll(struct drm_device *dev, u32 pll) { u32 ctrl = nv_rd32(dev, pll + 0); u32 coef = nv_rd32(dev, pll + 4); u32 P = (coef & 0x003f0000) >> 16; u32 N = (coef & 0x0000ff00) >> 8; u32 M = (coef & 0x000000ff) >> 0; u32 sclk, doff; if (!(ctrl & 0x00000001)) return 0; switch (pll & 0xfff000) { case 0x00e000: sclk = 27000; P = 1; break; case 0x137000: doff = (pll - 0x137000) / 0x20; sclk = read_div(dev, doff, 0x137120, 0x137140); break; case 0x132000: switch (pll) { case 0x132000: sclk = read_pll(dev, 0x132020); break; case 0x132020: sclk = read_div(dev, 0, 0x137320, 0x137330); break; default: return 0; } break; default: return 0; } return sclk * N / M / P; } static u32 read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl) { u32 ssrc = nv_rd32(dev, dsrc + (doff * 4)); u32 sctl = nv_rd32(dev, dctl + (doff * 4)); switch (ssrc & 0x00000003) { case 0: if ((ssrc & 0x00030000) != 0x00030000) return 27000; return 108000; case 2: return 100000; case 3: if (sctl & 0x80000000) { u32 sclk = read_vco(dev, dsrc + (doff * 4)); u32 sdiv = (sctl & 0x0000003f) + 2; return (sclk * 2) / sdiv; } return read_vco(dev, dsrc + (doff * 4)); default: return 0; } } static u32 read_mem(struct drm_device *dev) { u32 ssel = nv_rd32(dev, 0x1373f0); if (ssel & 0x00000001) return read_div(dev, 0, 0x137300, 0x137310); return read_pll(dev, 0x132000); } static u32 read_clk(struct drm_device *dev, int clk) { u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4)); u32 ssel = nv_rd32(dev, 0x137100); u32 sclk, sdiv; if (ssel & (1 << clk)) { if (clk < 7) sclk = read_pll(dev, 0x137000 + (clk * 0x20)); else sclk = read_pll(dev, 0x1370e0); sdiv = ((sctl & 0x00003f00) >> 8) + 2; } else { sclk = read_div(dev, clk, 0x137160, 0x1371d0); sdiv = ((sctl & 0x0000003f) >> 0) + 2; } if (sctl & 0x80000000) return (sclk * 2) / sdiv; return sclk; } int nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) { perflvl->shader = read_clk(dev, 0x00); perflvl->core = perflvl->shader / 2; perflvl->memory = read_mem(dev); perflvl->rop = read_clk(dev, 0x01); perflvl->hub07 = read_clk(dev, 0x02); perflvl->hub06 = read_clk(dev, 0x07); perflvl->hub01 = read_clk(dev, 0x08); perflvl->copy = read_clk(dev, 0x09); perflvl->daemon = read_clk(dev, 0x0c); perflvl->vdec = read_clk(dev, 0x0e); return 0; } struct nvc0_pm_clock { u32 freq; u32 ssel; u32 mdiv; u32 dsrc; u32 ddiv; u32 coef; }; struct nvc0_pm_state { struct nvc0_pm_clock eng[16]; }; static u32 calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv) { u32 div = min((ref * 2) / freq, (u32)65); if (div < 2) div = 2; *ddiv = div - 2; return (ref * 2) / div; } static u32 calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv) { u32 sclk; /* use one of the fixed frequencies if possible */ *ddiv = 0x00000000; switch (freq) { case 27000: case 108000: *dsrc = 0x00000000; if (freq == 108000) *dsrc |= 0x00030000; return freq; case 100000: *dsrc = 0x00000002; return freq; default: *dsrc = 0x00000003; break; } /* otherwise, calculate the closest divider */ sclk = read_vco(dev, clk); if (clk < 7) sclk = calc_div(dev, clk, sclk, freq, ddiv); return sclk; } static u32 calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef) { struct pll_lims limits; int N, M, P, ret; ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits); if (ret) return 0; limits.refclk = read_div(dev, clk, 0x137120, 0x137140); if (!limits.refclk) return 0; ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P); if (ret <= 0) return 0; *coef = (P << 16) | (N << 8) | M; return ret; } /* A (likely rather simplified and incomplete) view of the clock tree * * Key: * * S: source select * D: divider * P: pll * F: switch * * Engine clocks: * * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref * * Not all registers exist for all clocks. For example: clocks >= 8 don't * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do * they have the divider at 1371d0, though the source selection at 137160 * still exists. You must use the divider at 137250 for these instead. * * Memory clock: * * TBD, read_mem() above is likely very wrong... * */ static int calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq) { u32 src0, div0, div1D, div1P = 0; u32 clk0, clk1 = 0; /* invalid clock domain */ if (!freq) return 0; /* first possible path, using only dividers */ clk0 = calc_src(dev, clk, freq, &src0, &div0); clk0 = calc_div(dev, clk, clk0, freq, &div1D); /* see if we can get any closer using PLLs */ if (clk0 != freq && (0x00004387 & (1 << clk))) { if (clk < 7) clk1 = calc_pll(dev, clk, freq, &info->coef); else clk1 = read_pll(dev, 0x1370e0); clk1 = calc_div(dev, clk, clk1, freq, &div1P); } /* select the method which gets closest to target freq */ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) { info->dsrc = src0; if (div0) { info->ddiv |= 0x80000000; info->ddiv |= div0 << 8; info->ddiv |= div0; } if (div1D) { info->mdiv |= 0x80000000; info->mdiv |= div1D; } info->ssel = 0; info->freq = clk0; } else { if (div1P) { info->mdiv |= 0x80000000; info->mdiv |= div1P << 8; } info->ssel = (1 << clk); info->freq = clk1; } return 0; } void * nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_pm_state *info; int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); /* NFI why this is still in the performance table, the ROPCs appear * to get their clock from clock 2 ("hub07", actually hub05 on this * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP * are always the same freq with the binary driver even when the * performance table says they should differ. */ if (dev_priv->chipset == 0xd9) perflvl->rop = 0; if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) || (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) || (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) || (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) || (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) || (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) || (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) || (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) { kfree(info); return ERR_PTR(ret); } return info; } static void prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info) { /* program dividers at 137160/1371d0 first */ if (clk < 7 && !info->ssel) { nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc); } /* switch clock to non-pll mode */ nv_mask(dev, 0x137100, (1 << clk), 0x00000000); nv_wait(dev, 0x137100, (1 << clk), 0x00000000); /* reprogram pll */ if (clk < 7) { /* make sure it's disabled first... */ u32 base = 0x137000 + (clk * 0x20); u32 ctrl = nv_rd32(dev, base + 0x00); if (ctrl & 0x00000001) { nv_mask(dev, base + 0x00, 0x00000004, 0x00000000); nv_mask(dev, base + 0x00, 0x00000001, 0x00000000); } /* program it to new values, if necessary */ if (info->ssel) { nv_wr32(dev, base + 0x04, info->coef); nv_mask(dev, base + 0x00, 0x00000001, 0x00000001); nv_wait(dev, base + 0x00, 0x00020000, 0x00020000); nv_mask(dev, base + 0x00, 0x00020004, 0x00000004); } } /* select pll/non-pll mode, and program final clock divider */ nv_mask(dev, 0x137100, (1 << clk), info->ssel); nv_wait(dev, 0x137100, (1 << clk), info->ssel); nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); } int nvc0_pm_clocks_set(struct drm_device *dev, void *data) { struct nvc0_pm_state *info = data; int i; for (i = 0; i < 16; i++) { if (!info->eng[i].freq) continue; prog_clk(dev, i, &info->eng[i]); } kfree(info); return 0; }
gpl-2.0
Phoenix-CJ23/stockkernel
net/netfilter/ipset/ip_set_getport.c
7308
3479
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Get Layer-4 data from the packets */ #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/sctp.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/export.h> /* We must handle non-linear skbs */ static bool get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, bool src, __be16 *port, u8 *proto) { switch (protocol) { case IPPROTO_TCP: { struct tcphdr _tcph; const struct tcphdr *th; th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph); if (th == NULL) /* No choice either */ return false; *port = src ? th->source : th->dest; break; } case IPPROTO_SCTP: { sctp_sctphdr_t _sh; const sctp_sctphdr_t *sh; sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); if (sh == NULL) /* No choice either */ return false; *port = src ? sh->source : sh->dest; break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: { struct udphdr _udph; const struct udphdr *uh; uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph); if (uh == NULL) /* No choice either */ return false; *port = src ? uh->source : uh->dest; break; } case IPPROTO_ICMP: { struct icmphdr _ich; const struct icmphdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16)htons((ic->type << 8) | ic->code); break; } case IPPROTO_ICMPV6: { struct icmp6hdr _ich; const struct icmp6hdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16) htons((ic->icmp6_type << 8) | ic->icmp6_code); break; } default: break; } *proto = protocol; return true; } bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { const struct iphdr *iph = ip_hdr(skb); unsigned int protooff = ip_hdrlen(skb); int protocol = iph->protocol; /* See comments at tcp_match in ip_tables.c */ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET)) return false; return get_port(skb, protocol, protooff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip4_port); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { int protoff; u8 nexthdr; __be16 frag_off; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (protoff < 0) return false; return get_port(skb, nexthdr, protoff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip6_port); #endif bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port) { bool ret; u8 proto; switch (pf) { case NFPROTO_IPV4: ret = ip_set_get_ip4_port(skb, src, port, &proto); break; case NFPROTO_IPV6: ret = ip_set_get_ip6_port(skb, src, port, &proto); break; default: return false; } if (!ret) return ret; switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: return true; default: return false; } } EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
gpl-2.0
netico-solutions/linux_3.2.0_android_4.2.2
drivers/tty/serial/mfd.c
141
37365
/* * mfd.c: driver for High Speed UART device of Intel Medfield platform * * Refer pxa.c, 8250.c and some other drivers in drivers/serial/ * * (C) Copyright 2010 Intel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ /* Notes: * 1. DMA channel allocation: 0/1 channel are assigned to port 0, * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans * are used for RX, odd chans for TX * * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always * asserted, only when the HW is reset the DDCD and DDSR will * be triggered */ #include <linux/module.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/slab.h> #include <linux/serial_reg.h> #include <linux/circ_buf.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial_mfd.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/pm_runtime.h> #define HSU_DMA_BUF_SIZE 2048 #define chan_readl(chan, offset) readl(chan->reg + offset) #define chan_writel(chan, offset, val) writel(val, chan->reg + offset) #define mfd_readl(obj, offset) readl(obj->reg + offset) #define mfd_writel(obj, offset, val) writel(val, obj->reg + offset) static int hsu_dma_enable; module_param(hsu_dma_enable, int, 0); MODULE_PARM_DESC(hsu_dma_enable, "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode."); struct hsu_dma_buffer { u8 *buf; dma_addr_t dma_addr; u32 dma_size; u32 ofs; }; struct hsu_dma_chan { u32 id; enum dma_data_direction dirt; struct uart_hsu_port *uport; void __iomem *reg; }; struct uart_hsu_port { struct uart_port port; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned int lsr_break_flag; char name[12]; int index; struct device *dev; struct hsu_dma_chan *txc; struct hsu_dma_chan *rxc; struct hsu_dma_buffer txbuf; struct hsu_dma_buffer rxbuf; int use_dma; /* flag for DMA/PIO */ int running; int dma_tx_on; }; /* Top level data structure of HSU */ struct hsu_port { void __iomem *reg; unsigned long paddr; unsigned long iolen; u32 irq; struct uart_hsu_port port[3]; struct hsu_dma_chan chans[10]; struct dentry *debugfs; }; static inline unsigned int serial_in(struct uart_hsu_port *up, int offset) { unsigned int val; if (offset > UART_MSR) { offset <<= 2; val = readl(up->port.membase + offset); } else val = (unsigned int)readb(up->port.membase + offset); return val; } static inline void serial_out(struct uart_hsu_port *up, int offset, int value) { if (offset > UART_MSR) { offset <<= 2; writel(value, up->port.membase + offset); } else { unsigned char val = value & 0xff; writeb(val, up->port.membase + offset); } } #ifdef CONFIG_DEBUG_FS #define HSU_REGS_BUFSIZE 1024 static int hsu_show_regs_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t port_show_regs(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct uart_hsu_port *up = file->private_data; char *buf; u32 len = 0; ssize_t ret; buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL); if (!buf) return 0; len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MFD HSU port[%d] regs:\n", up->index); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "=================================\n"); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "IER: \t\t0x%08x\n", serial_in(up, UART_IER)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "PS: \t\t0x%08x\n", serial_in(up, UART_PS)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV)); if (len > HSU_REGS_BUFSIZE) len = HSU_REGS_BUFSIZE; ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } static ssize_t dma_show_regs(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hsu_dma_chan *chan = file->private_data; char *buf; u32 len = 0; ssize_t ret; buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL); if (!buf) return 0; len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MFD HSU DMA channel [%d] regs:\n", chan->id); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "=================================\n"); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR)); len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR)); if (len > HSU_REGS_BUFSIZE) len = HSU_REGS_BUFSIZE; ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } static const struct file_operations port_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = port_show_regs, .llseek = default_llseek, }; static const struct file_operations dma_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = dma_show_regs, .llseek = default_llseek, }; static int hsu_debugfs_init(struct hsu_port *hsu) { int i; char name[32]; hsu->debugfs = debugfs_create_dir("hsu", NULL); if (!hsu->debugfs) return -ENOMEM; for (i = 0; i < 3; i++) { snprintf(name, sizeof(name), "port_%d_regs", i); debugfs_create_file(name, S_IFREG | S_IRUGO, hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops); } for (i = 0; i < 6; i++) { snprintf(name, sizeof(name), "dma_chan_%d_regs", i); debugfs_create_file(name, S_IFREG | S_IRUGO, hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops); } return 0; } static void hsu_debugfs_remove(struct hsu_port *hsu) { if (hsu->debugfs) debugfs_remove_recursive(hsu->debugfs); } #else static inline int hsu_debugfs_init(struct hsu_port *hsu) { return 0; } static inline void hsu_debugfs_remove(struct hsu_port *hsu) { } #endif /* CONFIG_DEBUG_FS */ static void serial_hsu_enable_ms(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); } void hsu_dma_tx(struct uart_hsu_port *up) { struct circ_buf *xmit = &up->port.state->xmit; struct hsu_dma_buffer *dbuf = &up->txbuf; int count; /* test_and_set_bit may be better, but anyway it's in lock protected mode */ if (up->dma_tx_on) return; /* Update the circ buf info */ xmit->tail += dbuf->ofs; xmit->tail &= UART_XMIT_SIZE - 1; up->port.icount.tx += dbuf->ofs; dbuf->ofs = 0; /* Disable the channel */ chan_writel(up->txc, HSU_CH_CR, 0x0); if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) { dma_sync_single_for_device(up->port.dev, dbuf->dma_addr, dbuf->dma_size, DMA_TO_DEVICE); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); dbuf->ofs = count; /* Reprogram the channel */ chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail); chan_writel(up->txc, HSU_CH_D0TSR, count); /* Reenable the channel */ chan_writel(up->txc, HSU_CH_DCR, 0x1 | (0x1 << 8) | (0x1 << 16) | (0x1 << 24)); up->dma_tx_on = 1; chan_writel(up->txc, HSU_CH_CR, 0x1); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); } /* The buffer is already cache coherent */ void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf) { dbuf->ofs = 0; chan_writel(rxc, HSU_CH_BSR, 32); chan_writel(rxc, HSU_CH_MOTSR, 4); chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr); chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size); chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8) | (0x1 << 16) | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ ); chan_writel(rxc, HSU_CH_CR, 0x3); } /* Protected by spin_lock_irqsave(port->lock) */ static void serial_hsu_start_tx(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); if (up->use_dma) { hsu_dma_tx(up); } else if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } static void serial_hsu_stop_tx(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); struct hsu_dma_chan *txc = up->txc; if (up->use_dma) chan_writel(txc, HSU_CH_CR, 0x0); else if (up->ier & UART_IER_THRI) { up->ier &= ~UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } /* This is always called in spinlock protected mode, so * modify timeout timer is safe here */ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts) { struct hsu_dma_buffer *dbuf = &up->rxbuf; struct hsu_dma_chan *chan = up->rxc; struct uart_port *port = &up->port; struct tty_struct *tty = port->state->port.tty; int count; if (!tty) return; /* * First need to know how many is already transferred, * then check if its a timeout DMA irq, and return * the trail bytes out, push them up and reenable the * channel */ /* Timeout IRQ, need wait some time, see Errata 2 */ if (int_sts & 0xf00) udelay(2); /* Stop the channel */ chan_writel(chan, HSU_CH_CR, 0x0); count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr; if (!count) { /* Restart the channel before we leave */ chan_writel(chan, HSU_CH_CR, 0x3); return; } dma_sync_single_for_cpu(port->dev, dbuf->dma_addr, dbuf->dma_size, DMA_FROM_DEVICE); /* * Head will only wrap around when we recycle * the DMA buffer, and when that happens, we * explicitly set tail to 0. So head will * always be greater than tail. */ tty_insert_flip_string(tty, dbuf->buf, count); port->icount.rx += count; dma_sync_single_for_device(up->port.dev, dbuf->dma_addr, dbuf->dma_size, DMA_FROM_DEVICE); /* Reprogram the channel */ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr); chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size); chan_writel(chan, HSU_CH_DCR, 0x1 | (0x1 << 8) | (0x1 << 16) | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ ); tty_flip_buffer_push(tty); chan_writel(chan, HSU_CH_CR, 0x3); } static void serial_hsu_stop_rx(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); struct hsu_dma_chan *chan = up->rxc; if (up->use_dma) chan_writel(chan, HSU_CH_CR, 0x2); else { up->ier &= ~UART_IER_RLSI; up->port.read_status_mask &= ~UART_LSR_DR; serial_out(up, UART_IER, up->ier); } } static inline void receive_chars(struct uart_hsu_port *up, int *status) { struct tty_struct *tty = up->port.state->port.tty; unsigned int ch, flag; unsigned int max_count = 256; if (!tty) return; do { ch = serial_in(up, UART_RX); flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE))) { dev_warn(up->dev, "We really rush into ERR/BI case" "status = 0x%02x", *status); /* For statistics only */ if (*status & UART_LSR_BI) { *status &= ~(UART_LSR_FE | UART_LSR_PE); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char; } else if (*status & UART_LSR_PE) up->port.icount.parity++; else if (*status & UART_LSR_FE) up->port.icount.frame++; if (*status & UART_LSR_OE) up->port.icount.overrun++; /* Mask off conditions which should be ignored. */ *status &= up->port.read_status_mask; #ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE if (up->port.cons && up->port.cons->index == up->port.line) { /* Recover the break flag from console xmit */ *status |= up->lsr_break_flag; up->lsr_break_flag = 0; } #endif if (*status & UART_LSR_BI) { flag = TTY_BREAK; } else if (*status & UART_LSR_PE) flag = TTY_PARITY; else if (*status & UART_LSR_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) goto ignore_char; uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ignore_char: *status = serial_in(up, UART_LSR); } while ((*status & UART_LSR_DR) && max_count--); tty_flip_buffer_push(tty); } static void transmit_chars(struct uart_hsu_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { serial_out(up, UART_TX, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { serial_hsu_stop_tx(&up->port); return; } /* The IRQ is for TX FIFO half-empty */ count = up->port.fifosize / 2; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) serial_hsu_stop_tx(&up->port); } static inline void check_modem_status(struct uart_hsu_port *up) { int status; status = serial_in(up, UART_MSR); if ((status & UART_MSR_ANY_DELTA) == 0) return; if (status & UART_MSR_TERI) up->port.icount.rng++; if (status & UART_MSR_DDSR) up->port.icount.dsr++; /* We may only get DDCD when HW init and reset */ if (status & UART_MSR_DDCD) uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); /* Will start/stop_tx accordingly */ if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); wake_up_interruptible(&up->port.state->port.delta_msr_wait); } /* * This handles the interrupt from one port. */ static irqreturn_t port_irq(int irq, void *dev_id) { struct uart_hsu_port *up = dev_id; unsigned int iir, lsr; unsigned long flags; if (unlikely(!up->running)) return IRQ_NONE; spin_lock_irqsave(&up->port.lock, flags); if (up->use_dma) { lsr = serial_in(up, UART_LSR); if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE))) dev_warn(up->dev, "Got lsr irq while using DMA, lsr = 0x%2x\n", lsr); check_modem_status(up); spin_unlock_irqrestore(&up->port.lock, flags); return IRQ_HANDLED; } iir = serial_in(up, UART_IIR); if (iir & UART_IIR_NO_INT) { spin_unlock_irqrestore(&up->port.lock, flags); return IRQ_NONE; } lsr = serial_in(up, UART_LSR); if (lsr & UART_LSR_DR) receive_chars(up, &lsr); check_modem_status(up); /* lsr will be renewed during the receive_chars */ if (lsr & UART_LSR_THRE) transmit_chars(up); spin_unlock_irqrestore(&up->port.lock, flags); return IRQ_HANDLED; } static inline void dma_chan_irq(struct hsu_dma_chan *chan) { struct uart_hsu_port *up = chan->uport; unsigned long flags; u32 int_sts; spin_lock_irqsave(&up->port.lock, flags); if (!up->use_dma || !up->running) goto exit; /* * No matter what situation, need read clear the IRQ status * There is a bug, see Errata 5, HSD 2900918 */ int_sts = chan_readl(chan, HSU_CH_SR); /* Rx channel */ if (chan->dirt == DMA_FROM_DEVICE) hsu_dma_rx(up, int_sts); /* Tx channel */ if (chan->dirt == DMA_TO_DEVICE) { chan_writel(chan, HSU_CH_CR, 0x0); up->dma_tx_on = 0; hsu_dma_tx(up); } exit: spin_unlock_irqrestore(&up->port.lock, flags); return; } static irqreturn_t dma_irq(int irq, void *dev_id) { struct hsu_port *hsu = dev_id; u32 int_sts, i; int_sts = mfd_readl(hsu, HSU_GBL_DMAISR); /* Currently we only have 6 channels may be used */ for (i = 0; i < 6; i++) { if (int_sts & 0x1) dma_chan_irq(&hsu->chans[i]); int_sts >>= 1; } return IRQ_HANDLED; } static unsigned int serial_hsu_tx_empty(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned long flags; unsigned int ret; spin_lock_irqsave(&up->port.lock, flags); ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; spin_unlock_irqrestore(&up->port.lock, flags); return ret; } static unsigned int serial_hsu_get_mctrl(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned char status; unsigned int ret; status = serial_in(up, UART_MSR); ret = 0; if (status & UART_MSR_DCD) ret |= TIOCM_CAR; if (status & UART_MSR_RI) ret |= TIOCM_RNG; if (status & UART_MSR_DSR) ret |= TIOCM_DSR; if (status & UART_MSR_CTS) ret |= TIOCM_CTS; return ret; } static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned char mcr = 0; if (mctrl & TIOCM_RTS) mcr |= UART_MCR_RTS; if (mctrl & TIOCM_DTR) mcr |= UART_MCR_DTR; if (mctrl & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (mctrl & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; mcr |= up->mcr; serial_out(up, UART_MCR, mcr); } static void serial_hsu_break_ctl(struct uart_port *port, int break_state) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) up->lcr |= UART_LCR_SBC; else up->lcr &= ~UART_LCR_SBC; serial_out(up, UART_LCR, up->lcr); spin_unlock_irqrestore(&up->port.lock, flags); } /* * What special to do: * 1. chose the 64B fifo mode * 2. start dma or pio depends on configuration * 3. we only allocate dma memory when needed */ static int serial_hsu_startup(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned long flags; pm_runtime_get_sync(up->dev); /* * Clear the FIFO buffers and disable them. * (they will be reenabled in set_termios()) */ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); /* Clear the interrupt registers. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); /* Now, initialize the UART, default is 8n1 */ serial_out(up, UART_LCR, UART_LCR_WLEN8); spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl |= TIOCM_OUT2; serial_hsu_set_mctrl(&up->port, up->port.mctrl); /* * Finally, enable interrupts. Note: Modem status interrupts * are set via set_termios(), which will be occurring imminently * anyway, so we don't enable them here. */ if (!up->use_dma) up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE; else up->ier = 0; serial_out(up, UART_IER, up->ier); spin_unlock_irqrestore(&up->port.lock, flags); /* DMA init */ if (up->use_dma) { struct hsu_dma_buffer *dbuf; struct circ_buf *xmit = &port->state->xmit; up->dma_tx_on = 0; /* First allocate the RX buffer */ dbuf = &up->rxbuf; dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL); if (!dbuf->buf) { up->use_dma = 0; goto exit; } dbuf->dma_addr = dma_map_single(port->dev, dbuf->buf, HSU_DMA_BUF_SIZE, DMA_FROM_DEVICE); dbuf->dma_size = HSU_DMA_BUF_SIZE; /* Start the RX channel right now */ hsu_dma_start_rx_chan(up->rxc, dbuf); /* Next init the TX DMA */ dbuf = &up->txbuf; dbuf->buf = xmit->buf; dbuf->dma_addr = dma_map_single(port->dev, dbuf->buf, UART_XMIT_SIZE, DMA_TO_DEVICE); dbuf->dma_size = UART_XMIT_SIZE; /* This should not be changed all around */ chan_writel(up->txc, HSU_CH_BSR, 32); chan_writel(up->txc, HSU_CH_MOTSR, 4); dbuf->ofs = 0; } exit: /* And clear the interrupt registers again for luck. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); up->running = 1; return 0; } static void serial_hsu_shutdown(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned long flags; /* Disable interrupts from this port */ up->ier = 0; serial_out(up, UART_IER, 0); up->running = 0; spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl &= ~TIOCM_OUT2; serial_hsu_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* Disable break condition and FIFOs */ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); pm_runtime_put(up->dev); } static void serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; u32 ps, mul; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } /* CMSPAR isn't supported by this driver */ termios->c_cflag &= ~CMSPAR; if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; /* * The base clk is 50Mhz, and the baud rate come from: * baud = 50M * MUL / (DIV * PS * DLAB) * * For those basic low baud rate we can get the direct * scalar from 2746800, like 115200 = 2746800/24. For those * higher baud rate, we handle them case by case, mainly by * adjusting the MUL/PS registers, and DIV register is kept * as default value 0x3d09 to make things simple */ baud = uart_get_baud_rate(port, termios, old, 0, 4000000); quot = 1; ps = 0x10; mul = 0x3600; switch (baud) { case 3500000: mul = 0x3345; ps = 0xC; break; case 1843200: mul = 0x2400; break; case 3000000: case 2500000: case 2000000: case 1500000: case 1000000: case 500000: /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */ mul = baud / 500000 * 0x9C4; break; default: /* Use uart_get_divisor to get quot for other baud rates */ quot = 0; } if (!quot) quot = uart_get_divisor(port, baud); if ((up->port.uartclk / quot) < (2400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B; else if ((up->port.uartclk / quot) < (230400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B; else fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B; fcr |= UART_FCR_HSU_64B_FIFO; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* Update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* Characters to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* Ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * CTS flow control flag and modem status interrupts, disable * MSI by default */ up->ier &= ~UART_IER_MSI; if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); if (termios->c_cflag & CRTSCTS) up->mcr |= UART_MCR_AFE | UART_MCR_RTS; else up->mcr &= ~UART_MCR_AFE; serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ serial_out(up, UART_LCR, cval); /* reset DLAB */ serial_out(up, UART_MUL, mul); /* set MUL */ serial_out(up, UART_PS, ps); /* set PS */ up->lcr = cval; /* Save LCR */ serial_hsu_set_mctrl(&up->port, up->port.mctrl); serial_out(up, UART_FCR, fcr); spin_unlock_irqrestore(&up->port.lock, flags); } static void serial_hsu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { } static void serial_hsu_release_port(struct uart_port *port) { } static int serial_hsu_request_port(struct uart_port *port) { return 0; } static void serial_hsu_config_port(struct uart_port *port, int flags) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); up->port.type = PORT_MFD; } static int serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser) { /* We don't want the core code to modify any port params */ return -EINVAL; } static const char * serial_hsu_type(struct uart_port *port) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); return up->name; } /* Mainly for uart console use */ static struct uart_hsu_port *serial_hsu_ports[3]; static struct uart_driver serial_hsu_reg; #ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) /* Wait for transmitter & holding register to empty */ static inline void wait_for_xmitr(struct uart_hsu_port *up) { unsigned int status, tmout = 1000; /* Wait up to 1ms for the character to be sent. */ do { status = serial_in(up, UART_LSR); if (status & UART_LSR_BI) up->lsr_break_flag = UART_LSR_BI; if (--tmout == 0) break; udelay(1); } while (!(status & BOTH_EMPTY)); /* Wait up to 1s for flow control if necessary */ if (up->port.flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout && ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) udelay(1); } } static void serial_hsu_console_putchar(struct uart_port *port, int ch) { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); wait_for_xmitr(up); serial_out(up, UART_TX, ch); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console_lock must be held when we get here. */ static void serial_hsu_console_write(struct console *co, const char *s, unsigned int count) { struct uart_hsu_port *up = serial_hsu_ports[co->index]; unsigned long flags; unsigned int ier; int locked = 1; local_irq_save(flags); if (up->port.sysrq) locked = 0; else if (oops_in_progress) { locked = spin_trylock(&up->port.lock); } else spin_lock(&up->port.lock); /* First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, 0); uart_console_write(&up->port, s, count, serial_hsu_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static struct console serial_hsu_console; static int __init serial_hsu_console_setup(struct console *co, char *options) { struct uart_hsu_port *up; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; if (co->index == -1 || co->index >= serial_hsu_reg.nr) co->index = 0; up = serial_hsu_ports[co->index]; if (!up) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); ret = uart_set_options(&up->port, co, baud, parity, bits, flow); return ret; } static struct console serial_hsu_console = { .name = "ttyMFD", .write = serial_hsu_console_write, .device = uart_console_device, .setup = serial_hsu_console_setup, .flags = CON_PRINTBUFFER, .index = 2, .data = &serial_hsu_reg, }; #endif struct uart_ops serial_hsu_pops = { .tx_empty = serial_hsu_tx_empty, .set_mctrl = serial_hsu_set_mctrl, .get_mctrl = serial_hsu_get_mctrl, .stop_tx = serial_hsu_stop_tx, .start_tx = serial_hsu_start_tx, .stop_rx = serial_hsu_stop_rx, .enable_ms = serial_hsu_enable_ms, .break_ctl = serial_hsu_break_ctl, .startup = serial_hsu_startup, .shutdown = serial_hsu_shutdown, .set_termios = serial_hsu_set_termios, .pm = serial_hsu_pm, .type = serial_hsu_type, .release_port = serial_hsu_release_port, .request_port = serial_hsu_request_port, .config_port = serial_hsu_config_port, .verify_port = serial_hsu_verify_port, }; static struct uart_driver serial_hsu_reg = { .owner = THIS_MODULE, .driver_name = "MFD serial", .dev_name = "ttyMFD", .major = TTY_MAJOR, .minor = 128, .nr = 3, }; #ifdef CONFIG_PM static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state) { void *priv = pci_get_drvdata(pdev); struct uart_hsu_port *up; /* Make sure this is not the internal dma controller */ if (priv && (pdev->device != 0x081E)) { up = priv; uart_suspend_port(&serial_hsu_reg, &up->port); } pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int serial_hsu_resume(struct pci_dev *pdev) { void *priv = pci_get_drvdata(pdev); struct uart_hsu_port *up; int ret; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) dev_warn(&pdev->dev, "HSU: can't re-enable device, try to continue\n"); if (priv && (pdev->device != 0x081E)) { up = priv; uart_resume_port(&serial_hsu_reg, &up->port); } return 0; } #else #define serial_hsu_suspend NULL #define serial_hsu_resume NULL #endif #ifdef CONFIG_PM_RUNTIME static int serial_hsu_runtime_idle(struct device *dev) { int err; err = pm_schedule_suspend(dev, 500); if (err) return -EBUSY; return 0; } static int serial_hsu_runtime_suspend(struct device *dev) { return 0; } static int serial_hsu_runtime_resume(struct device *dev) { return 0; } #else #define serial_hsu_runtime_idle NULL #define serial_hsu_runtime_suspend NULL #define serial_hsu_runtime_resume NULL #endif static const struct dev_pm_ops serial_hsu_pm_ops = { .runtime_suspend = serial_hsu_runtime_suspend, .runtime_resume = serial_hsu_runtime_resume, .runtime_idle = serial_hsu_runtime_idle, }; /* temp global pointer before we settle down on using one or four PCI dev */ static struct hsu_port *phsu; static int serial_hsu_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct uart_hsu_port *uport; int index, ret; printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n", pdev->vendor, pdev->device); switch (pdev->device) { case 0x081B: index = 0; break; case 0x081C: index = 1; break; case 0x081D: index = 2; break; case 0x081E: /* internal DMA controller */ index = 3; break; default: dev_err(&pdev->dev, "HSU: out of index!"); return -ENODEV; } ret = pci_enable_device(pdev); if (ret) return ret; if (index == 3) { /* DMA controller */ ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu); if (ret) { dev_err(&pdev->dev, "can not get IRQ\n"); goto err_disable; } pci_set_drvdata(pdev, phsu); } else { /* UART port 0~2 */ uport = &phsu->port[index]; uport->port.irq = pdev->irq; uport->port.dev = &pdev->dev; uport->dev = &pdev->dev; ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport); if (ret) { dev_err(&pdev->dev, "can not get IRQ\n"); goto err_disable; } uart_add_one_port(&serial_hsu_reg, &uport->port); #ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE if (index == 2) { register_console(&serial_hsu_console); uport->port.cons = &serial_hsu_console; } #endif pci_set_drvdata(pdev, uport); } pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; err_disable: pci_disable_device(pdev); return ret; } static void hsu_global_init(void) { struct hsu_port *hsu; struct uart_hsu_port *uport; struct hsu_dma_chan *dchan; int i, ret; hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL); if (!hsu) return; /* Get basic io resource and map it */ hsu->paddr = 0xffa28000; hsu->iolen = 0x1000; if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global"))) pr_warning("HSU: error in request mem region\n"); hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen); if (!hsu->reg) { pr_err("HSU: error in ioremap\n"); ret = -ENOMEM; goto err_free_region; } /* Initialise the 3 UART ports */ uport = hsu->port; for (i = 0; i < 3; i++) { uport->port.type = PORT_MFD; uport->port.iotype = UPIO_MEM; uport->port.mapbase = (resource_size_t)hsu->paddr + HSU_PORT_REG_OFFSET + i * HSU_PORT_REG_LENGTH; uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET + i * HSU_PORT_REG_LENGTH; sprintf(uport->name, "hsu_port%d", i); uport->port.fifosize = 64; uport->port.ops = &serial_hsu_pops; uport->port.line = i; uport->port.flags = UPF_IOREMAP; /* set the scalable maxim support rate to 2746800 bps */ uport->port.uartclk = 115200 * 24 * 16; uport->running = 0; uport->txc = &hsu->chans[i * 2]; uport->rxc = &hsu->chans[i * 2 + 1]; serial_hsu_ports[i] = uport; uport->index = i; if (hsu_dma_enable & (1<<i)) uport->use_dma = 1; else uport->use_dma = 0; uport++; } /* Initialise 6 dma channels */ dchan = hsu->chans; for (i = 0; i < 6; i++) { dchan->id = i; dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; dchan->uport = &hsu->port[i/2]; dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET + i * HSU_DMA_CHANS_REG_LENGTH; dchan++; } phsu = hsu; hsu_debugfs_init(hsu); return; err_free_region: release_mem_region(hsu->paddr, hsu->iolen); kfree(hsu); return; } static void serial_hsu_remove(struct pci_dev *pdev) { void *priv = pci_get_drvdata(pdev); struct uart_hsu_port *up; if (!priv) return; pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); /* For port 0/1/2, priv is the address of uart_hsu_port */ if (pdev->device != 0x081E) { up = priv; uart_remove_one_port(&serial_hsu_reg, &up->port); } pci_set_drvdata(pdev, NULL); free_irq(pdev->irq, priv); pci_disable_device(pdev); } /* First 3 are UART ports, and the 4th is the DMA */ static const struct pci_device_id pci_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) }, {}, }; static struct pci_driver hsu_pci_driver = { .name = "HSU serial", .id_table = pci_ids, .probe = serial_hsu_probe, .remove = __devexit_p(serial_hsu_remove), .suspend = serial_hsu_suspend, .resume = serial_hsu_resume, .driver = { .pm = &serial_hsu_pm_ops, }, }; static int __init hsu_pci_init(void) { int ret; hsu_global_init(); ret = uart_register_driver(&serial_hsu_reg); if (ret) return ret; return pci_register_driver(&hsu_pci_driver); } static void __exit hsu_pci_exit(void) { pci_unregister_driver(&hsu_pci_driver); uart_unregister_driver(&serial_hsu_reg); hsu_debugfs_remove(phsu); kfree(phsu); } module_init(hsu_pci_init); module_exit(hsu_pci_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:medfield-hsu");
gpl-2.0
calixtolinux/Calixto-AM335x-Linux3.2-Versa-EVM-V1
drivers/staging/et131x/et131x.c
141
160829
/* * Agere Systems Inc. * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/pci.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/io.h> #include <asm/system.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/phy.h> #include "et131x.h" MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " "for the ET1310 by Agere Systems"); /* EEPROM defines */ #define MAX_NUM_REGISTER_POLLS 1000 #define MAX_NUM_WRITE_RETRIES 2 /* MAC defines */ #define COUNTER_WRAP_16_BIT 0x10000 #define COUNTER_WRAP_12_BIT 0x1000 /* PCI defines */ #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ /* ISR defines */ /* * For interrupts, normal running is: * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, * watchdog_interrupt & txdma_xfer_done * * In both cases, when flow control is enabled for either Tx or bi-direction, * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the * buffer rings are running low. */ #define INT_MASK_DISABLE 0xffffffff /* NOTE: Masking out MAC_STAT Interrupt for now... * #define INT_MASK_ENABLE 0xfff6bf17 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 */ #define INT_MASK_ENABLE 0xfffebf17 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 /* General defines */ /* Packet and header sizes */ #define NIC_MIN_PACKET_SIZE 60 /* Multicast list size */ #define NIC_MAX_MCAST_LIST 128 /* Supported Filters */ #define ET131X_PACKET_TYPE_DIRECTED 0x0001 #define ET131X_PACKET_TYPE_MULTICAST 0x0002 #define ET131X_PACKET_TYPE_BROADCAST 0x0004 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 /* Tx Timeout */ #define ET131X_TX_TIMEOUT (1 * HZ) #define NIC_SEND_HANG_THRESHOLD 0 /* MP_TCB flags */ #define fMP_DEST_MULTI 0x00000001 #define fMP_DEST_BROAD 0x00000002 /* MP_ADAPTER flags */ #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 /* MP_SHARED flags */ #define fMP_ADAPTER_LOWER_POWER 0x00200000 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 /* Some offsets in PCI config space that are actually used. */ #define ET1310_PCI_MAX_PYLD 0x4C #define ET1310_PCI_MAC_ADDRESS 0xA4 #define ET1310_PCI_EEPROM_STATUS 0xB2 #define ET1310_PCI_ACK_NACK 0xC0 #define ET1310_PCI_REPLAY 0xC2 #define ET1310_PCI_L0L1LATENCY 0xCF /* PCI Product IDs */ #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ /* Define order of magnitude converter */ #define NANO_IN_A_MICRO 1000 #define PARM_RX_NUM_BUFS_DEF 4 #define PARM_RX_TIME_INT_DEF 10 #define PARM_RX_MEM_END_DEF 0x2bc #define PARM_TX_TIME_INT_DEF 40 #define PARM_TX_NUM_BUFS_DEF 4 #define PARM_DMA_CACHE_DEF 0 /* RX defines */ #define USE_FBR0 1 #define FBR_CHUNKS 32 #define MAX_DESC_PER_RING_RX 1024 /* number of RFDs - default and min */ #ifdef USE_FBR0 #define RFD_LOW_WATER_MARK 40 #define NIC_DEFAULT_NUM_RFD 1024 #define NUM_FBRS 2 #else #define RFD_LOW_WATER_MARK 20 #define NIC_DEFAULT_NUM_RFD 256 #define NUM_FBRS 1 #endif #define NIC_MIN_NUM_RFD 64 #define NUM_PACKETS_HANDLED 256 #define ALCATEL_MULTICAST_PKT 0x01000000 #define ALCATEL_BROADCAST_PKT 0x02000000 /* typedefs for Free Buffer Descriptors */ struct fbr_desc { u32 addr_lo; u32 addr_hi; u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ }; /* Packet Status Ring Descriptors * * Word 0: * * top 16 bits are from the Alcatel Status Word as enumerated in * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) * * 0: hp hash pass * 1: ipa IP checksum assist * 2: ipp IP checksum pass * 3: tcpa TCP checksum assist * 4: tcpp TCP checksum pass * 5: wol WOL Event * 6: rxmac_error RXMAC Error Indicator * 7: drop Drop packet * 8: ft Frame Truncated * 9: jp Jumbo Packet * 10: vp VLAN Packet * 11-15: unused * 16: asw_prev_pkt_dropped e.g. IFG too small on previous * 17: asw_RX_DV_event short receive event detected * 18: asw_false_carrier_event bad carrier since last good packet * 19: asw_code_err one or more nibbles signalled as errors * 20: asw_CRC_err CRC error * 21: asw_len_chk_err frame length field incorrect * 22: asw_too_long frame length > 1518 bytes * 23: asw_OK valid CRC + no code error * 24: asw_multicast has a multicast address * 25: asw_broadcast has a broadcast address * 26: asw_dribble_nibble spurious bits after EOP * 27: asw_control_frame is a control frame * 28: asw_pause_frame is a pause frame * 29: asw_unsupported_op unsupported OP code * 30: asw_VLAN_tag VLAN tag detected * 31: asw_long_evt Rx long event * * Word 1: * 0-15: length length in bytes * 16-25: bi Buffer Index * 26-27: ri Ring Index * 28-31: reserved */ struct pkt_stat_desc { u32 word0; u32 word1; }; /* Typedefs for the RX DMA status word */ /* * rx status word 0 holds part of the status bits of the Rx DMA engine * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word * which contains the Free Buffer ring 0 and 1 available offset. * * bit 0-9 FBR1 offset * bit 10 Wrap flag for FBR1 * bit 16-25 FBR0 offset * bit 26 Wrap flag for FBR0 */ /* * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word * which contains the Packet Status Ring available offset. * * bit 0-15 reserved * bit 16-27 PSRoffset * bit 28 PSRwrap * bit 29-31 unused */ /* * struct rx_status_block is a structure representing the status of the Rx * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 */ struct rx_status_block { u32 word0; u32 word1; }; /* * Structure for look-up table holding free buffer ring pointers, addresses * and state. */ struct fbr_lookup { void *virt[MAX_DESC_PER_RING_RX]; void *buffer1[MAX_DESC_PER_RING_RX]; void *buffer2[MAX_DESC_PER_RING_RX]; u32 bus_high[MAX_DESC_PER_RING_RX]; u32 bus_low[MAX_DESC_PER_RING_RX]; void *ring_virtaddr; dma_addr_t ring_physaddr; void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; uint64_t real_physaddr; uint64_t offset; u32 local_full; u32 num_entries; u32 buffsize; }; /* * struct rx_ring is the sructure representing the adaptor's local * reference(s) to the rings * ****************************************************************************** * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 * and index 1 to refer to FRB0 ****************************************************************************** */ struct rx_ring { struct fbr_lookup *fbr[NUM_FBRS]; void *ps_ring_virtaddr; dma_addr_t ps_ring_physaddr; u32 local_psr_full; u32 psr_num_entries; struct rx_status_block *rx_status_block; dma_addr_t rx_status_bus; /* RECV */ struct list_head recv_list; u32 num_ready_recv; u32 num_rfd; bool unfinished_receives; /* lookaside lists */ struct kmem_cache *recv_lookaside; }; /* TX defines */ /* * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 * * 0-15: length of packet * 16-27: VLAN tag * 28: VLAN CFI * 29-31: VLAN priority * * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 * * 0: last packet in the sequence * 1: first packet in the sequence * 2: interrupt the processor when this pkt sent * 3: Control word - no packet data * 4: Issue half-duplex backpressure : XON/XOFF * 5: send pause frame * 6: Tx frame has error * 7: append CRC * 8: MAC override * 9: pad packet * 10: Packet is a Huge packet * 11: append VLAN tag * 12: IP checksum assist * 13: TCP checksum assist * 14: UDP checksum assist */ /* struct tx_desc represents each descriptor on the ring */ struct tx_desc { u32 addr_hi; u32 addr_lo; u32 len_vlan; /* control words how to xmit the */ u32 flags; /* data (detailed above) */ }; /* * The status of the Tx DMA engine it sits in free memory, and is pointed to * by 0x101c / 0x1020. This is a DMA10 type */ /* TCB (Transmit Control Block: Host Side) */ struct tcb { struct tcb *next; /* Next entry in ring */ u32 flags; /* Our flags for the packet */ u32 count; /* Used to spot stuck/lost packets */ u32 stale; /* Used to spot stuck/lost packets */ struct sk_buff *skb; /* Network skb we are tied to */ u32 index; /* Ring indexes */ u32 index_start; }; /* Structure representing our local reference(s) to the ring */ struct tx_ring { /* TCB (Transmit Control Block) memory and lists */ struct tcb *tcb_ring; /* List of TCBs that are ready to be used */ struct tcb *tcb_qhead; struct tcb *tcb_qtail; /* list of TCBs that are currently being sent. NOTE that access to all * three of these (including used) are controlled via the * TCBSendQLock. This lock should be secured prior to incementing / * decrementing used, or any queue manipulation on send_head / * tail */ struct tcb *send_head; struct tcb *send_tail; int used; /* The actual descriptor ring */ struct tx_desc *tx_desc_ring; dma_addr_t tx_desc_ring_pa; /* send_idx indicates where we last wrote to in the descriptor ring. */ u32 send_idx; /* The location of the write-back status block */ u32 *tx_status; dma_addr_t tx_status_pa; /* Packets since the last IRQ: used for interrupt coalescing */ int since_irq; }; /* ADAPTER defines */ /* * Do not change these values: if changed, then change also in respective * TXdma and Rxdma engines */ #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ #define NUM_TCB 64 /* * These values are all superseded by registry entries to facilitate tuning. * Once the desired performance has been achieved, the optimal registry values * should be re-populated to these #defines: */ #define TX_ERROR_PERIOD 1000 #define LO_MARK_PERCENT_FOR_PSR 15 #define LO_MARK_PERCENT_FOR_RX 15 /* RFD (Receive Frame Descriptor) */ struct rfd { struct list_head list_node; struct sk_buff *skb; u32 len; /* total size of receive frame */ u16 bufferindex; u8 ringindex; }; /* Flow Control */ #define FLOW_BOTH 0 #define FLOW_TXONLY 1 #define FLOW_RXONLY 2 #define FLOW_NONE 3 /* Struct to define some device statistics */ struct ce_stats { /* MIB II variables * * NOTE: atomic_t types are only guaranteed to store 24-bits; if we * MUST have 32, then we'll need another way to perform atomic * operations */ u32 unicast_pkts_rcvd; atomic_t unicast_pkts_xmtd; u32 multicast_pkts_rcvd; atomic_t multicast_pkts_xmtd; u32 broadcast_pkts_rcvd; atomic_t broadcast_pkts_xmtd; u32 rcvd_pkts_dropped; /* Tx Statistics. */ u32 tx_underflows; u32 tx_collisions; u32 tx_excessive_collisions; u32 tx_first_collisions; u32 tx_late_collisions; u32 tx_max_pkt_errs; u32 tx_deferred; /* Rx Statistics. */ u32 rx_overflows; u32 rx_length_errs; u32 rx_align_errs; u32 rx_crc_errs; u32 rx_code_violations; u32 rx_other_errs; u32 synchronous_iterations; u32 interrupt_status; }; /* The private adapter structure */ struct et131x_adapter { struct net_device *netdev; struct pci_dev *pdev; struct mii_bus *mii_bus; struct phy_device *phydev; struct work_struct task; /* Flags that indicate current state of the adapter */ u32 flags; /* local link state, to determine if a state change has occurred */ int link; /* Configuration */ u8 rom_addr[ETH_ALEN]; u8 addr[ETH_ALEN]; bool has_eeprom; u8 eeprom_data[2]; /* Spinlocks */ spinlock_t lock; spinlock_t tcb_send_qlock; spinlock_t tcb_ready_qlock; spinlock_t send_hw_lock; spinlock_t rcv_lock; spinlock_t rcv_pend_lock; spinlock_t fbr_lock; spinlock_t phy_lock; /* Packet Filter and look ahead size */ u32 packet_filter; /* multicast list */ u32 multicast_addr_count; u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; /* Pointer to the device's PCI register space */ struct address_map __iomem *regs; /* Registry parameters */ u8 wanted_flow; /* Flow we want for 802.3x flow control */ u32 registry_jumbo_packet; /* Max supported ethernet packet size */ /* Derived from the registry: */ u8 flowcontrol; /* flow control validated by the far-end */ /* Minimize init-time */ struct timer_list error_timer; /* variable putting the phy into coma mode when boot up with no cable * plugged in after 5 seconds */ u8 boot_coma; /* Next two used to save power information at power down. This * information will be used during power up to set up parts of Power * Management in JAGCore */ u16 pdown_speed; u8 pdown_duplex; /* Tx Memory Variables */ struct tx_ring tx_ring; /* Rx Memory Variables */ struct rx_ring rx_ring; /* Stats */ struct ce_stats stats; struct net_device_stats net_stats; }; /* EEPROM functions */ static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) { u32 reg; int i; /* * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and * bits 7,1:0 both equal to 1, at least once after reset. * Subsequent operations need only to check that bits 1:0 are equal * to 1 prior to starting a single byte read/write */ for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { /* Read registers grouped in DWORD1 */ if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) return -EIO; /* I2C idle and Phy Queue Avail both true */ if ((reg & 0x3000) == 0x3000) { if (status) *status = reg; return reg & 0xFF; } } return -ETIMEDOUT; } /** * eeprom_write - Write a byte to the ET1310's EEPROM * @adapter: pointer to our private adapter structure * @addr: the address to write * @data: the value to write * * Returns 1 for a successful write. */ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) { struct pci_dev *pdev = adapter->pdev; int index = 0; int retries; int err = 0; int i2c_wack = 0; int writeok = 0; u32 status; u32 val = 0; /* * For an EEPROM, an I2C single byte write is defined as a START * condition followed by the device address, EEPROM address, one byte * of data and a STOP condition. The STOP condition will trigger the * EEPROM's internally timed write cycle to the nonvolatile memory. * All inputs are disabled during this write cycle and the EEPROM will * not respond to any access until the internal write is complete. */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the * type of EEPROM being accessed (1=two byte addressing, 0=one * byte addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) return -EIO; i2c_wack = 1; /* Prepare EEPROM address for Step 3 */ for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { /* Write the address to the LBCIF Address Register */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) break; /* * Write the data to the LBCIF Data Register (the I2C write * will begin). */ if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) break; /* * Monitor bit 1:0 of the LBCIF Status Register. When bits * 1:0 are both equal to 1, the I2C write has completed and the * internal write cycle of the EEPROM is about to start. * (bits 1:0 = 01 is a legal state while waiting from both * equal to 1, but bits 1:0 = 10 is invalid and implies that * something is broken). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return 0; /* * Check bit 3 of the LBCIF Status Register. If equal to 1, * an error has occurred.Don't break here if we are revision * 1, this is so we do a blind write for load bug. */ if ((status & LBCIF_STATUS_GENERAL_ERROR) && adapter->pdev->revision == 0) break; /* * Check bit 2 of the LBCIF Status Register. If equal to 1 an * ACK error has occurred on the address phase of the write. * This could be due to an actual hardware failure or the * EEPROM may still be in its internal write cycle from a * previous write. This write operation was ignored and must be *repeated later. */ if (status & LBCIF_STATUS_ACK_ERROR) { /* * This could be due to an actual hardware failure * or the EEPROM may still be in its internal write * cycle from a previous write. This write operation * was ignored and must be repeated later. */ udelay(10); continue; } writeok = 1; break; } /* * Set bit 6 of the LBCIF Control Register = 0. */ udelay(10); while (i2c_wack) { if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) writeok = 0; /* Do read until internal ACK_ERROR goes away meaning write * completed */ do { pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr); do { pci_read_config_dword(pdev, LBCIF_DATA_REGISTER, &val); } while ((val & 0x00010000) == 0); } while (val & 0x00040000); if ((val & 0xFF00) != 0xC000 || index == 10000) break; index++; } return writeok ? 0 : -EIO; } /** * eeprom_read - Read a byte from the ET1310's EEPROM * @adapter: pointer to our private adapter structure * @addr: the address from which to read * @pdata: a pointer to a byte in which to store the value of the read * @eeprom_id: the ID of the EEPROM * @addrmode: how the EEPROM is to be accessed * * Returns 1 for a successful read */ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) { struct pci_dev *pdev = adapter->pdev; int err; u32 status; /* * A single byte read is similar to the single byte write, with the * exception of the data flow: */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the type * of EEPROM being accessed (1=two byte addressing, 0=one byte * addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) return -EIO; /* * Write the address to the LBCIF Address Register (I2C read will * begin). */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) return -EIO; /* * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure * has occurred). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return err; /* * Regardless of error status, read data byte from LBCIF Data * Register. */ *pdata = err; /* * Check bit 2 of the LBCIF Status Register. If = 1, * then an error has occurred. */ return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; } int et131x_init_eeprom(struct et131x_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; u8 eestatus; /* We first need to check the EEPROM Status code located at offset * 0xB2 of config space */ pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); /* THIS IS A WORKAROUND: * I need to call this function twice to get my card in a * LG M1 Express Dual running. I tried also a msleep before this * function, because I thougth there could be some time condidions * but it didn't work. Call the whole function twice also work. */ if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { dev_err(&pdev->dev, "Could not read PCI config space for EEPROM Status\n"); return -EIO; } /* Determine if the error(s) we care about are present. If they are * present we need to fail. */ if (eestatus & 0x4C) { int write_failed = 0; if (pdev->revision == 0x01) { int i; static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; /* Re-write the first 4 bytes if we have an eeprom * present and the revision id is 1, this fixes the * corruption seen with 1310 B Silicon */ for (i = 0; i < 3; i++) if (eeprom_write(adapter, i, eedata[i]) < 0) write_failed = 1; } if (pdev->revision != 0x01 || write_failed) { dev_err(&pdev->dev, "Fatal EEPROM Status Error - 0x%04x\n", eestatus); /* This error could mean that there was an error * reading the eeprom or that the eeprom doesn't exist. * We will treat each case the same and not try to * gather additional information that normally would * come from the eeprom, like MAC Address */ adapter->has_eeprom = 0; return -EIO; } } adapter->has_eeprom = 1; /* Read the EEPROM for information regarding LED behavior. Refer to * ET1310_phy.c, et131x_xcvr_init(), for its use. */ eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); if (adapter->eeprom_data[0] != 0xcd) /* Disable all optional features */ adapter->eeprom_data[1] = 0x00; return 0; } /** * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. * @adapter: pointer to our adapter structure */ void et131x_rx_dma_enable(struct et131x_adapter *adapter) { /* Setup the receive dma configuration register for normal operation */ u32 csr = 0x2000; /* FBR1 enable */ if (adapter->rx_ring.fbr[0]->buffsize == 4096) csr |= 0x0800; else if (adapter->rx_ring.fbr[0]->buffsize == 8192) csr |= 0x1000; else if (adapter->rx_ring.fbr[0]->buffsize == 16384) csr |= 0x1800; #ifdef USE_FBR0 csr |= 0x0400; /* FBR0 enable */ if (adapter->rx_ring.fbr[1]->buffsize == 256) csr |= 0x0100; else if (adapter->rx_ring.fbr[1]->buffsize == 512) csr |= 0x0200; else if (adapter->rx_ring.fbr[1]->buffsize == 1024) csr |= 0x0300; #endif writel(csr, &adapter->regs->rxdma.csr); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { udelay(5); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { dev_err(&adapter->pdev->dev, "RX Dma failed to exit halt state. CSR 0x%08x\n", csr); } } } /** * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 * @adapter: pointer to our adapter structure */ void et131x_rx_dma_disable(struct et131x_adapter *adapter) { u32 csr; /* Setup the receive dma configuration register */ writel(0x00002001, &adapter->regs->rxdma.csr); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ udelay(5); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) == 0) dev_err(&adapter->pdev->dev, "RX Dma failed to enter halt state. CSR 0x%08x\n", csr); } } /** * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. * @adapter: pointer to our adapter structure * * Mainly used after a return to the D0 (full-power) state from a lower state. */ void et131x_tx_dma_enable(struct et131x_adapter *adapter) { /* Setup the transmit dma configuration register for normal * operation */ writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), &adapter->regs->txdma.csr); } static inline void add_10bit(u32 *v, int n) { *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); } static inline void add_12bit(u32 *v, int n) { *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); } /** * nic_rx_pkts - Checks the hardware for available packets * @adapter: pointer to our adapter * * Returns rfd, a pointer to our MPRFD. * * Checks the hardware for available packets, using completion ring * If packets are available, it gets an RFD from the recv_list, attaches * the packet to it, puts the RFD in the RecvPendList, and also returns * the pointer to the RFD. */ /* MAC functions */ /** * et1310_config_mac_regs1 - Initialize the first part of MAC regs * @adapter: pointer to our adapter structure */ void et1310_config_mac_regs1(struct et131x_adapter *adapter) { struct mac_regs __iomem *macregs = &adapter->regs->mac; u32 station1; u32 station2; u32 ipg; /* First we need to reset everything. Write to MAC configuration * register 1 to perform reset. */ writel(0xC00F0000, &macregs->cfg1); /* Next lets configure the MAC Inter-packet gap register */ ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ ipg |= 0x50 << 8; /* ifg enforce 0x50 */ writel(ipg, &macregs->ipg); /* Next lets configure the MAC Half Duplex register */ /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ writel(0x00A1F037, &macregs->hfdp); /* Next lets configure the MAC Interface Control register */ writel(0, &macregs->if_ctrl); /* Let's move on to setting up the mii management configuration */ writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */ /* Next lets configure the MAC Station Address register. These * values are read from the EEPROM during initialization and stored * in the adapter structure. We write what is stored in the adapter * structure to the MAC Station Address registers high and low. This * station address is used for generating and checking pause control * packets. */ station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | adapter->addr[2]; writel(station1, &macregs->station_addr_1); writel(station2, &macregs->station_addr_2); /* Max ethernet packet in bytes that will passed by the mac without * being truncated. Allow the MAC to pass 4 more than our max packet * size. This is 4 for the Ethernet CRC. * * Packets larger than (registry_jumbo_packet) that do not contain a * VLAN ID will be dropped by the Rx function. */ writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); /* clear out MAC config reset */ writel(0, &macregs->cfg1); } /** * et1310_config_mac_regs2 - Initialize the second part of MAC regs * @adapter: pointer to our adapter structure */ void et1310_config_mac_regs2(struct et131x_adapter *adapter) { int32_t delay = 0; struct mac_regs __iomem *mac = &adapter->regs->mac; struct phy_device *phydev = adapter->phydev; u32 cfg1; u32 cfg2; u32 ifctrl; u32 ctl; ctl = readl(&adapter->regs->txmac.ctl); cfg1 = readl(&mac->cfg1); cfg2 = readl(&mac->cfg2); ifctrl = readl(&mac->if_ctrl); /* Set up the if mode bits */ cfg2 &= ~0x300; if (phydev && phydev->speed == SPEED_1000) { cfg2 |= 0x200; /* Phy mode bit */ ifctrl &= ~(1 << 24); } else { cfg2 |= 0x100; ifctrl |= (1 << 24); } /* We need to enable Rx/Tx */ cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; /* Initialize loop back to off */ cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); if (adapter->flowcontrol == FLOW_RXONLY || adapter->flowcontrol == FLOW_BOTH) cfg1 |= CFG1_RX_FLOW; writel(cfg1, &mac->cfg1); /* Now we need to initialize the MAC Configuration 2 register */ /* preamble 7, check length, huge frame off, pad crc, crc enable full duplex off */ cfg2 |= 0x7016; cfg2 &= ~0x0021; /* Turn on duplex if needed */ if (phydev && phydev->duplex == DUPLEX_FULL) cfg2 |= 0x01; ifctrl &= ~(1 << 26); if (phydev && phydev->duplex == DUPLEX_HALF) ifctrl |= (1<<26); /* Enable ghd */ writel(ifctrl, &mac->if_ctrl); writel(cfg2, &mac->cfg2); do { udelay(10); delay++; cfg1 = readl(&mac->cfg1); } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); if (delay == 100) { dev_warn(&adapter->pdev->dev, "Syncd bits did not respond correctly cfg1 word 0x%08x\n", cfg1); } /* Enable txmac */ ctl |= 0x09; /* TX mac enable, FC disable */ writel(ctl, &adapter->regs->txmac.ctl); /* Ready to start the RXDMA/TXDMA engine */ if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); } } /** * et1310_in_phy_coma - check if the device is in phy coma * @adapter: pointer to our adapter structure * * Returns 0 if the device is not in phy coma, 1 if it is in phy coma */ int et1310_in_phy_coma(struct et131x_adapter *adapter) { u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; } void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; uint32_t nIndex; uint32_t result; uint32_t hash1 = 0; uint32_t hash2 = 0; uint32_t hash3 = 0; uint32_t hash4 = 0; u32 pm_csr; /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision * the multi-cast LIST. If it is NOT specified, (and "ALL" is not * specified) then we should pass NO multi-cast addresses to the * driver. */ if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { /* Loop through our multicast array and set up the device */ for (nIndex = 0; nIndex < adapter->multicast_addr_count; nIndex++) { result = ether_crc(6, adapter->multicast_list[nIndex]); result = (result & 0x3F800000) >> 23; if (result < 32) { hash1 |= (1 << result); } else if ((31 < result) && (result < 64)) { result -= 32; hash2 |= (1 << result); } else if ((63 < result) && (result < 96)) { result -= 64; hash3 |= (1 << result); } else { result -= 96; hash4 |= (1 << result); } } } /* Write out the new hash to the device */ pm_csr = readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(hash1, &rxmac->multi_hash1); writel(hash2, &rxmac->multi_hash2); writel(hash3, &rxmac->multi_hash3); writel(hash4, &rxmac->multi_hash4); } } void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; u32 uni_pf1; u32 uni_pf2; u32 uni_pf3; u32 pm_csr; /* Set up unicast packet filter reg 3 to be the first two octets of * the MAC address for both address * * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the * MAC address for second address * * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the * MAC address for first address */ uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | adapter->addr[1]; uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | adapter->addr[5]; uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | adapter->addr[5]; pm_csr = readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(uni_pf1, &rxmac->uni_pf_addr1); writel(uni_pf2, &rxmac->uni_pf_addr2); writel(uni_pf3, &rxmac->uni_pf_addr3); } } void et1310_config_rxmac_regs(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; struct phy_device *phydev = adapter->phydev; u32 sa_lo; u32 sa_hi = 0; u32 pf_ctrl = 0; /* Disable the MAC while it is being configured (also disable WOL) */ writel(0x8, &rxmac->ctrl); /* Initialize WOL to disabled. */ writel(0, &rxmac->crc0); writel(0, &rxmac->crc12); writel(0, &rxmac->crc34); /* We need to set the WOL mask0 - mask4 next. We initialize it to * its default Values of 0x00000000 because there are not WOL masks * as of this time. */ writel(0, &rxmac->mask0_word0); writel(0, &rxmac->mask0_word1); writel(0, &rxmac->mask0_word2); writel(0, &rxmac->mask0_word3); writel(0, &rxmac->mask1_word0); writel(0, &rxmac->mask1_word1); writel(0, &rxmac->mask1_word2); writel(0, &rxmac->mask1_word3); writel(0, &rxmac->mask2_word0); writel(0, &rxmac->mask2_word1); writel(0, &rxmac->mask2_word2); writel(0, &rxmac->mask2_word3); writel(0, &rxmac->mask3_word0); writel(0, &rxmac->mask3_word1); writel(0, &rxmac->mask3_word2); writel(0, &rxmac->mask3_word3); writel(0, &rxmac->mask4_word0); writel(0, &rxmac->mask4_word1); writel(0, &rxmac->mask4_word2); writel(0, &rxmac->mask4_word3); /* Lets setup the WOL Source Address */ sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | adapter->addr[5]; writel(sa_lo, &rxmac->sa_lo); sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | adapter->addr[1]; writel(sa_hi, &rxmac->sa_hi); /* Disable all Packet Filtering */ writel(0, &rxmac->pf_ctrl); /* Let's initialize the Unicast Packet filtering address */ if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { et1310_setup_device_for_unicast(adapter); pf_ctrl |= 4; /* Unicast filter */ } else { writel(0, &rxmac->uni_pf_addr1); writel(0, &rxmac->uni_pf_addr2); writel(0, &rxmac->uni_pf_addr3); } /* Let's initialize the Multicast hash */ if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { pf_ctrl |= 2; /* Multicast filter */ et1310_setup_device_for_multicast(adapter); } /* Runt packet filtering. Didn't work in version A silicon. */ pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; pf_ctrl |= 8; /* Fragment filter */ if (adapter->registry_jumbo_packet > 8192) /* In order to transmit jumbo packets greater than 8k, the * FIFO between RxMAC and RxDMA needs to be reduced in size * to (16k - Jumbo packet size). In order to implement this, * we must use "cut through" mode in the RxMAC, which chops * packets down into segments which are (max_size * 16). In * this case we selected 256 bytes, since this is the size of * the PCI-Express TLP's that the 1310 uses. * * seg_en on, fc_en off, size 0x10 */ writel(0x41, &rxmac->mcif_ctrl_max_seg); else writel(0, &rxmac->mcif_ctrl_max_seg); /* Initialize the MCIF water marks */ writel(0, &rxmac->mcif_water_mark); /* Initialize the MIF control */ writel(0, &rxmac->mif_ctrl); /* Initialize the Space Available Register */ writel(0, &rxmac->space_avail); /* Initialize the the mif_ctrl register * bit 3: Receive code error. One or more nibbles were signaled as * errors during the reception of the packet. Clear this * bit in Gigabit, set it in 100Mbit. This was derived * experimentally at UNH. * bit 4: Receive CRC error. The packet's CRC did not match the * internally generated CRC. * bit 5: Receive length check error. Indicates that frame length * field value in the packet does not match the actual data * byte length and is not a type field. * bit 16: Receive frame truncated. * bit 17: Drop packet enable */ if (phydev && phydev->speed == SPEED_100) writel(0x30038, &rxmac->mif_ctrl); else writel(0x30030, &rxmac->mif_ctrl); /* Finally we initialize RxMac to be enabled & WOL disabled. Packet * filter is always enabled since it is where the runt packets are * supposed to be dropped. For version A silicon, runt packet * dropping doesn't work, so it is disabled in the pf_ctrl register, * but we still leave the packet filter on. */ writel(pf_ctrl, &rxmac->pf_ctrl); writel(0x9, &rxmac->ctrl); } void et1310_config_txmac_regs(struct et131x_adapter *adapter) { struct txmac_regs __iomem *txmac = &adapter->regs->txmac; /* We need to update the Control Frame Parameters * cfpt - control frame pause timer set to 64 (0x40) * cfep - control frame extended pause timer set to 0x0 */ if (adapter->flowcontrol == FLOW_NONE) writel(0, &txmac->cf_param); else writel(0x40, &txmac->cf_param); } void et1310_config_macstat_regs(struct et131x_adapter *adapter) { struct macstat_regs __iomem *macstat = &adapter->regs->macstat; /* Next we need to initialize all the macstat registers to zero on * the device. */ writel(0, &macstat->txrx_0_64_byte_frames); writel(0, &macstat->txrx_65_127_byte_frames); writel(0, &macstat->txrx_128_255_byte_frames); writel(0, &macstat->txrx_256_511_byte_frames); writel(0, &macstat->txrx_512_1023_byte_frames); writel(0, &macstat->txrx_1024_1518_byte_frames); writel(0, &macstat->txrx_1519_1522_gvln_frames); writel(0, &macstat->rx_bytes); writel(0, &macstat->rx_packets); writel(0, &macstat->rx_fcs_errs); writel(0, &macstat->rx_multicast_packets); writel(0, &macstat->rx_broadcast_packets); writel(0, &macstat->rx_control_frames); writel(0, &macstat->rx_pause_frames); writel(0, &macstat->rx_unknown_opcodes); writel(0, &macstat->rx_align_errs); writel(0, &macstat->rx_frame_len_errs); writel(0, &macstat->rx_code_errs); writel(0, &macstat->rx_carrier_sense_errs); writel(0, &macstat->rx_undersize_packets); writel(0, &macstat->rx_oversize_packets); writel(0, &macstat->rx_fragment_packets); writel(0, &macstat->rx_jabbers); writel(0, &macstat->rx_drops); writel(0, &macstat->tx_bytes); writel(0, &macstat->tx_packets); writel(0, &macstat->tx_multicast_packets); writel(0, &macstat->tx_broadcast_packets); writel(0, &macstat->tx_pause_frames); writel(0, &macstat->tx_deferred); writel(0, &macstat->tx_excessive_deferred); writel(0, &macstat->tx_single_collisions); writel(0, &macstat->tx_multiple_collisions); writel(0, &macstat->tx_late_collisions); writel(0, &macstat->tx_excessive_collisions); writel(0, &macstat->tx_total_collisions); writel(0, &macstat->tx_pause_honored_frames); writel(0, &macstat->tx_drops); writel(0, &macstat->tx_jabbers); writel(0, &macstat->tx_fcs_errs); writel(0, &macstat->tx_control_frames); writel(0, &macstat->tx_oversize_frames); writel(0, &macstat->tx_undersize_frames); writel(0, &macstat->tx_fragments); writel(0, &macstat->carry_reg1); writel(0, &macstat->carry_reg2); /* Unmask any counters that we want to track the overflow of. * Initially this will be all counters. It may become clear later * that we do not need to track all counters. */ writel(0xFFFFBE32, &macstat->carry_reg1_mask); writel(0xFFFE7E8B, &macstat->carry_reg2_mask); } /** * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC * @adapter: pointer to our private adapter structure * @addr: the address of the transceiver * @reg: the register to read * @value: pointer to a 16-bit value in which the value will be stored * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, u8 reg, u16 *value) { struct mac_regs __iomem *mac = &adapter->regs->mac; int status = 0; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; /* Save a local copy of the registers we are dealing with so we can * set them back */ mii_addr = readl(&mac->mii_mgmt_addr); mii_cmd = readl(&mac->mii_mgmt_cmd); /* Stop the current operation */ writel(0, &mac->mii_mgmt_cmd); /* Set up the register we need to read from on the correct PHY */ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); writel(0x1, &mac->mii_mgmt_cmd); do { udelay(50); delay++; mii_indicator = readl(&mac->mii_mgmt_indicator); } while ((mii_indicator & MGMT_WAIT) && delay < 50); /* If we hit the max delay, we could not read the register */ if (delay == 50) { dev_warn(&adapter->pdev->dev, "reg 0x%08x could not be read\n", reg); dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", mii_indicator); status = -EIO; } /* If we hit here we were able to read the register and we need to * return the value to the caller */ *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; /* Stop the read operation */ writel(0, &mac->mii_mgmt_cmd); /* set the registers we touched back to the state at which we entered * this function */ writel(mii_addr, &mac->mii_mgmt_addr); writel(mii_cmd, &mac->mii_mgmt_cmd); return status; } int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) { struct phy_device *phydev = adapter->phydev; if (!phydev) return -EIO; return et131x_phy_mii_read(adapter, phydev->addr, reg, value); } /** * et131x_mii_write - Write to a PHY register through the MII interface of the MAC * @adapter: pointer to our private adapter structure * @reg: the register to read * @value: 16-bit value to write * * FIXME: one caller in netdev still * * Return 0 on success, errno on failure (as defined in errno.h) */ int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) { struct mac_regs __iomem *mac = &adapter->regs->mac; struct phy_device *phydev = adapter->phydev; int status = 0; u8 addr; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; if (!phydev) return -EIO; addr = phydev->addr; /* Save a local copy of the registers we are dealing with so we can * set them back */ mii_addr = readl(&mac->mii_mgmt_addr); mii_cmd = readl(&mac->mii_mgmt_cmd); /* Stop the current operation */ writel(0, &mac->mii_mgmt_cmd); /* Set up the register we need to write to on the correct PHY */ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); /* Add the value to write to the registers to the mac */ writel(value, &mac->mii_mgmt_ctrl); do { udelay(50); delay++; mii_indicator = readl(&mac->mii_mgmt_indicator); } while ((mii_indicator & MGMT_BUSY) && delay < 100); /* If we hit the max delay, we could not write the register */ if (delay == 100) { u16 tmp; dev_warn(&adapter->pdev->dev, "reg 0x%08x could not be written", reg); dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", mii_indicator); dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", readl(&mac->mii_mgmt_cmd)); et131x_mii_read(adapter, reg, &tmp); status = -EIO; } /* Stop the write operation */ writel(0, &mac->mii_mgmt_cmd); /* * set the registers we touched back to the state at which we entered * this function */ writel(mii_addr, &mac->mii_mgmt_addr); writel(mii_cmd, &mac->mii_mgmt_cmd); return status; } /* Still used from _mac for BIT_READ */ void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action, u16 regnum, u16 bitnum, u8 *value) { u16 reg; u16 mask = 0x0001 << bitnum; /* Read the requested register */ et131x_mii_read(adapter, regnum, &reg); switch (action) { case TRUEPHY_BIT_READ: *value = (reg & mask) >> bitnum; break; case TRUEPHY_BIT_SET: et131x_mii_write(adapter, regnum, reg | mask); break; case TRUEPHY_BIT_CLEAR: et131x_mii_write(adapter, regnum, reg & ~mask); break; default: break; } } void et1310_config_flow_control(struct et131x_adapter *adapter) { struct phy_device *phydev = adapter->phydev; if (phydev->duplex == DUPLEX_HALF) { adapter->flowcontrol = FLOW_NONE; } else { char remote_pause, remote_async_pause; et1310_phy_access_mii_bit(adapter, TRUEPHY_BIT_READ, 5, 10, &remote_pause); et1310_phy_access_mii_bit(adapter, TRUEPHY_BIT_READ, 5, 11, &remote_async_pause); if ((remote_pause == TRUEPHY_BIT_SET) && (remote_async_pause == TRUEPHY_BIT_SET)) { adapter->flowcontrol = adapter->wanted_flow; } else if ((remote_pause == TRUEPHY_BIT_SET) && (remote_async_pause == TRUEPHY_BIT_CLEAR)) { if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_BOTH; else adapter->flowcontrol = FLOW_NONE; } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && (remote_async_pause == TRUEPHY_BIT_CLEAR)) { adapter->flowcontrol = FLOW_NONE; } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && remote_async_pause == TRUEPHY_SET_BIT) */ if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_RXONLY; else adapter->flowcontrol = FLOW_NONE; } } } /** * et1310_update_macstat_host_counters - Update the local copy of the statistics * @adapter: pointer to the adapter structure */ void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) { struct ce_stats *stats = &adapter->stats; struct macstat_regs __iomem *macstat = &adapter->regs->macstat; stats->tx_collisions += readl(&macstat->tx_total_collisions); stats->tx_first_collisions += readl(&macstat->tx_single_collisions); stats->tx_deferred += readl(&macstat->tx_deferred); stats->tx_excessive_collisions += readl(&macstat->tx_multiple_collisions); stats->tx_late_collisions += readl(&macstat->tx_late_collisions); stats->tx_underflows += readl(&macstat->tx_undersize_frames); stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); stats->rx_align_errs += readl(&macstat->rx_align_errs); stats->rx_crc_errs += readl(&macstat->rx_code_errs); stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); stats->rx_overflows += readl(&macstat->rx_oversize_packets); stats->rx_code_violations += readl(&macstat->rx_fcs_errs); stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); stats->rx_other_errs += readl(&macstat->rx_fragment_packets); } /** * et1310_handle_macstat_interrupt * @adapter: pointer to the adapter structure * * One of the MACSTAT counters has wrapped. Update the local copy of * the statistics held in the adapter structure, checking the "wrap" * bit for each counter. */ void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) { u32 carry_reg1; u32 carry_reg2; /* Read the interrupt bits from the register(s). These are Clear On * Write. */ carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); writel(carry_reg1, &adapter->regs->macstat.carry_reg1); writel(carry_reg2, &adapter->regs->macstat.carry_reg2); /* We need to do update the host copy of all the MAC_STAT counters. * For each counter, check it's overflow bit. If the overflow bit is * set, then increment the host version of the count by one complete * revolution of the counter. This routine is called when the counter * block indicates that one of the counters has wrapped. */ if (carry_reg1 & (1 << 14)) adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 8)) adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; if (carry_reg1 & (1 << 7)) adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 2)) adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 6)) adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 3)) adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 0)) adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; if (carry_reg2 & (1 << 16)) adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 15)) adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 6)) adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 8)) adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 5)) adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 4)) adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 2)) adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; } /* PHY functions */ int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); u16 value; int ret; ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); if (ret < 0) return ret; else return value; } int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); return et131x_mii_write(adapter, reg, value); } int et131x_mdio_reset(struct mii_bus *bus) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); return 0; } /** * et1310_phy_power_down - PHY power control * @adapter: device to control * @down: true for off/false for back on * * one hundred, ten, one thousand megs * How would you like to have your LAN accessed * Can't you see that this code processed * Phy power, phy power.. */ void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) { u16 data; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; et131x_mii_write(adapter, MII_BMCR, data); } /** * et131x_xcvr_init - Init the phy if we are setting it into force mode * @adapter: pointer to our private adapter structure * */ void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 imr; u16 isr; u16 lcr2; et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); /* Set the link status interrupt only. Bad behavior when link status * and auto neg are set, we run into a nested interrupt problem */ imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & ET_PHY_INT_MASK_LINKSTAT & ET_PHY_INT_MASK_ENABLE); et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates * link and activity (on for link, blink off for activity). * * NOTE: Some customizations have been added here for specific * vendors; The LED behavior is now determined by vendor data in the * EEPROM. However, the above description is the default. */ if ((adapter->eeprom_data[1] & 0x4) == 0) { et131x_mii_read(adapter, PHY_LED_2, &lcr2); lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); if ((adapter->eeprom_data[1] & 0x8) == 0) lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); et131x_mii_write(adapter, PHY_LED_2, lcr2); } } /** * et131x_configure_global_regs - configure JAGCore global regs * @adapter: pointer to our adapter structure * * Used to configure the global registers on the JAGCore */ void et131x_configure_global_regs(struct et131x_adapter *adapter) { struct global_regs __iomem *regs = &adapter->regs->global; writel(0, &regs->rxq_start_addr); writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); if (adapter->registry_jumbo_packet < 2048) { /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word * block of RAM that the driver can split between Tx * and Rx as it desires. Our default is to split it * 50/50: */ writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); } else if (adapter->registry_jumbo_packet < 8192) { /* For jumbo packets > 2k but < 8k, split 50-50. */ writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); } else { /* 9216 is the only packet size greater than 8k that * is available. The Tx buffer has to be big enough * for one whole packet on the Tx side. We'll make * the Tx 9408, and give the rest to Rx */ writel(0x01b3, &regs->rxq_end_addr); writel(0x01b4, &regs->txq_start_addr); } /* Initialize the loopback register. Disable all loopbacks. */ writel(0, &regs->loopback); /* MSI Register */ writel(0, &regs->msi_config); /* By default, disable the watchdog timer. It will be enabled when * a packet is queued. */ writel(0, &regs->watchdog_timer); } /* PM functions */ /** * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence * @adapter: pointer to our adapter structure */ void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) { struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; struct rx_ring *rx_local = &adapter->rx_ring; struct fbr_desc *fbr_entry; u32 entry; u32 psr_num_des; unsigned long flags; /* Halt RXDMA to perform the reconfigure. */ et131x_rx_dma_disable(adapter); /* Load the completion writeback physical address * * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ writel((u32) ((u64)rx_local->rx_status_bus >> 32), &rx_dma->dma_wb_base_hi); writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); /* Set the address and parameters of the packet status ring into the * 1310's registers */ writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), &rx_dma->psr_base_hi); writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); writel(0, &rx_dma->psr_full_offset); psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, &rx_dma->psr_min_des); spin_lock_irqsave(&adapter->rcv_lock, flags); /* These local variables track the PSR in the adapter structure */ rx_local->local_psr_full = 0; /* Now's the best time to initialize FBR1 contents */ fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } /* Set the address and parameters of Free buffer ring 1 (and 0 if * required) into the 1310's registers */ writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), &rx_dma->fbr1_base_hi); writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); /* This variable tracks the free buffer ring 1 full position, so it * has to match the above. */ rx_local->fbr[0]->local_full = ET_DMA10_WRAP; writel( ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr1_min_des); #ifdef USE_FBR0 /* Now's the best time to initialize FBR0 contents */ fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), &rx_dma->fbr0_base_hi); writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); /* This variable tracks the free buffer ring 0 full position, so it * has to match the above. */ rx_local->fbr[1]->local_full = ET_DMA10_WRAP; writel( ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr0_min_des); #endif /* Program the number of packets we will receive before generating an * interrupt. * For version B silicon, this value gets updated once autoneg is *complete. */ writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); /* The "time_done" is not working correctly to coalesce interrupts * after a given time period, but rather is giving us an interrupt * regardless of whether we have received packets. * This value gets updated once autoneg is complete. */ writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); spin_unlock_irqrestore(&adapter->rcv_lock, flags); } /** * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. * @adapter: pointer to our private adapter structure * * Configure the transmit engine with the ring buffers we have created * and prepare it for use. */ void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) { struct txdma_regs __iomem *txdma = &adapter->regs->txdma; /* Load the hardware with the start of the transmit descriptor ring. */ writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), &txdma->pr_base_hi); writel((u32) adapter->tx_ring.tx_desc_ring_pa, &txdma->pr_base_lo); /* Initialise the transmit DMA engine */ writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); /* Load the completion writeback physical address */ writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), &txdma->dma_wb_base_hi); writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); *adapter->tx_ring.tx_status = 0; writel(0, &txdma->service_request); adapter->tx_ring.send_idx = 0; } /** * et131x_adapter_setup - Set the adapter up as per cassini+ documentation * @adapter: pointer to our private adapter structure * * Returns 0 on success, errno on failure (as defined in errno.h) */ void et131x_adapter_setup(struct et131x_adapter *adapter) { /* Configure the JAGCore */ et131x_configure_global_regs(adapter); et1310_config_mac_regs1(adapter); /* Configure the MMC registers */ /* All we need to do is initialize the Memory Control Register */ writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); et1310_config_rxmac_regs(adapter); et1310_config_txmac_regs(adapter); et131x_config_rx_dma_regs(adapter); et131x_config_tx_dma_regs(adapter); et1310_config_macstat_regs(adapter); et1310_phy_power_down(adapter, 0); et131x_xcvr_init(adapter); } /** * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 * @adapter: pointer to our private adapter structure */ void et131x_soft_reset(struct et131x_adapter *adapter) { /* Disable MAC Core */ writel(0xc00f0000, &adapter->regs->mac.cfg1); /* Set everything to a reset value */ writel(0x7F, &adapter->regs->global.sw_reset); writel(0x000f0000, &adapter->regs->mac.cfg1); writel(0x00000000, &adapter->regs->mac.cfg1); } /** * et131x_enable_interrupts - enable interrupt * @adapter: et131x device * * Enable the appropriate interrupts on the ET131x according to our * configuration */ void et131x_enable_interrupts(struct et131x_adapter *adapter) { u32 mask; /* Enable all global interrupts */ if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) mask = INT_MASK_ENABLE; else mask = INT_MASK_ENABLE_NO_FLOW; writel(mask, &adapter->regs->global.int_mask); } /** * et131x_disable_interrupts - interrupt disable * @adapter: et131x device * * Block all interrupts from the et131x device at the device itself */ void et131x_disable_interrupts(struct et131x_adapter *adapter) { /* Disable all global interrupts */ writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); } /** * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 * @adapter: pointer to our adapter structure */ void et131x_tx_dma_disable(struct et131x_adapter *adapter) { /* Setup the tramsmit dma configuration register */ writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, &adapter->regs->txdma.csr); } /** * et131x_enable_txrx - Enable tx/rx queues * @netdev: device to be enabled */ void et131x_enable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* Enable the Tx and Rx DMA engines (if not already enabled) */ et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); /* Enable device interrupts */ if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) et131x_enable_interrupts(adapter); /* We're ready to move some data, so start the queue */ netif_start_queue(netdev); } /** * et131x_disable_txrx - Disable tx/rx queues * @netdev: device to be disabled */ void et131x_disable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* First thing is to stop the queue */ netif_stop_queue(netdev); /* Stop the Tx and Rx DMA engines */ et131x_rx_dma_disable(adapter); et131x_tx_dma_disable(adapter); /* Disable device interrupts */ et131x_disable_interrupts(adapter); } /** * et131x_init_send - Initialize send data structures * @adapter: pointer to our private adapter structure */ void et131x_init_send(struct et131x_adapter *adapter) { struct tcb *tcb; u32 ct; struct tx_ring *tx_ring; /* Setup some convenience pointers */ tx_ring = &adapter->tx_ring; tcb = adapter->tx_ring.tcb_ring; tx_ring->tcb_qhead = tcb; memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); /* Go through and set up each TCB */ for (ct = 0; ct++ < NUM_TCB; tcb++) /* Set the link pointer in HW TCB to the next TCB in the * chain */ tcb->next = tcb + 1; /* Set the tail pointer */ tcb--; tx_ring->tcb_qtail = tcb; tcb->next = NULL; /* Curr send queue should now be empty */ tx_ring->send_head = NULL; tx_ring->send_tail = NULL; } /** * et1310_enable_phy_coma - called when network cable is unplugged * @adapter: pointer to our adapter structure * * driver receive an phy status change interrupt while in D0 and check that * phy_status is down. * * -- gate off JAGCore; * -- set gigE PHY in Coma mode * -- wake on phy_interrupt; Perform software reset JAGCore, * re-initialize jagcore and gigE PHY * * Add D0-ASPM-PhyLinkDown Support: * -- while in D0, when there is a phy_interrupt indicating phy link * down status, call the MPSetPhyComa routine to enter this active * state power saving mode * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt * indicating linkup status, call the MPDisablePhyComa routine to * restore JAGCore and gigE PHY */ void et1310_enable_phy_coma(struct et131x_adapter *adapter) { unsigned long flags; u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); /* Save the GbE PHY speed and duplex modes. Need to restore this * when cable is plugged back in */ /* * TODO - when PM is re-enabled, check if we need to * perform a similar task as this - * adapter->pdown_speed = adapter->ai_force_speed; * adapter->pdown_duplex = adapter->ai_force_duplex; */ /* Stop sending packets. */ spin_lock_irqsave(&adapter->send_hw_lock, flags); adapter->flags |= fMP_ADAPTER_LOWER_POWER; spin_unlock_irqrestore(&adapter->send_hw_lock, flags); /* Wait for outstanding Receive packets */ et131x_disable_txrx(adapter->netdev); /* Gate off JAGCore 3 clock domains */ pmcsr &= ~ET_PMCSR_INIT; writel(pmcsr, &adapter->regs->global.pm_csr); /* Program gigE PHY in to Coma mode */ pmcsr |= ET_PM_PHY_SW_COMA; writel(pmcsr, &adapter->regs->global.pm_csr); } /** * et1310_disable_phy_coma - Disable the Phy Coma Mode * @adapter: pointer to our adapter structure */ void et1310_disable_phy_coma(struct et131x_adapter *adapter) { u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); /* Disable phy_sw_coma register and re-enable JAGCore clocks */ pmcsr |= ET_PMCSR_INIT; pmcsr &= ~ET_PM_PHY_SW_COMA; writel(pmcsr, &adapter->regs->global.pm_csr); /* Restore the GbE PHY speed and duplex modes; * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY */ /* TODO - when PM is re-enabled, check if we need to * perform a similar task as this - * adapter->ai_force_speed = adapter->pdown_speed; * adapter->ai_force_duplex = adapter->pdown_duplex; */ /* Re-initialize the send structures */ et131x_init_send(adapter); /* Bring the device back to the state it was during init prior to * autonegotiation being complete. This way, when we get the auto-neg * complete interrupt, we can complete init by calling ConfigMacREGS2. */ et131x_soft_reset(adapter); /* setup et1310 as per the documentation ?? */ et131x_adapter_setup(adapter); /* Allow Tx to restart */ adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; et131x_enable_txrx(adapter->netdev); } /* RX functions */ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) { u32 tmp_free_buff_ring = *free_buff_ring; tmp_free_buff_ring++; /* This works for all cases where limit < 1024. The 1023 case works because 1023++ is 1024 which means the if condition is not taken but the carry of the bit into the wrap bit toggles the wrap value correctly */ if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { tmp_free_buff_ring &= ~ET_DMA10_MASK; tmp_free_buff_ring ^= ET_DMA10_WRAP; } /* For the 1023 case */ tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); *free_buff_ring = tmp_free_buff_ring; return tmp_free_buff_ring; } /** * et131x_align_allocated_memory - Align allocated memory on a given boundary * @adapter: pointer to our adapter structure * @phys_addr: pointer to Physical address * @offset: pointer to the offset variable * @mask: correct mask */ void et131x_align_allocated_memory(struct et131x_adapter *adapter, uint64_t *phys_addr, uint64_t *offset, uint64_t mask) { uint64_t new_addr; *offset = 0; new_addr = *phys_addr & ~mask; if (new_addr != *phys_addr) { /* Move to next aligned block */ new_addr += mask + 1; /* Return offset for adjusting virt addr */ *offset = new_addr - *phys_addr; /* Return new physical address */ *phys_addr = new_addr; } } /** * et131x_rx_dma_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) * * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, * and the Packet Status Ring. */ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) { u32 i, j; u32 bufsize; u32 pktstat_ringsize, fbr_chunksize; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Alloc memory for the lookup table */ #ifdef USE_FBR0 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); #endif rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); /* The first thing we will do is configure the sizes of the buffer * rings. These will change based on jumbo packet support. Larger * jumbo packets increases the size of each entry in FBR0, and the * number of entries in FBR0, while at the same time decreasing the * number of entries in FBR1. * * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 * entries are huge in order to accommodate a "jumbo" frame, then it * will have less entries. Conversely, FBR1 will now be relied upon * to carry more "normal" frames, thus it's entry size also increases * and the number of entries goes up too (since it now carries * "small" + "regular" packets. * * In this scheme, we try to maintain 512 entries between the two * rings. Also, FBR1 remains a constant size - when it's size doubles * the number of entries halves. FBR0 increases in size, however. */ if (adapter->registry_jumbo_packet < 2048) { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 256; rx_ring->fbr[1]->num_entries = 512; #endif rx_ring->fbr[0]->buffsize = 2048; rx_ring->fbr[0]->num_entries = 512; } else if (adapter->registry_jumbo_packet < 4096) { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 512; rx_ring->fbr[1]->num_entries = 1024; #endif rx_ring->fbr[0]->buffsize = 4096; rx_ring->fbr[0]->num_entries = 512; } else { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 1024; rx_ring->fbr[1]->num_entries = 768; #endif rx_ring->fbr[0]->buffsize = 16384; rx_ring->fbr[0]->num_entries = 128; } #ifdef USE_FBR0 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries + adapter->rx_ring.fbr[0]->num_entries; #else adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; #endif /* Allocate an area of memory for Free Buffer Ring 1 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff; rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, bufsize, &rx_ring->fbr[0]->ring_physaddr, GFP_KERNEL); if (!rx_ring->fbr[0]->ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 1\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; /* Align Free Buffer Ring 1 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->fbr[0]->real_physaddr, &rx_ring->fbr[0]->offset, 0x0FFF); rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + rx_ring->fbr[0]->offset); #ifdef USE_FBR0 /* Allocate an area of memory for Free Buffer Ring 0 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff; rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, bufsize, &rx_ring->fbr[1]->ring_physaddr, GFP_KERNEL); if (!rx_ring->fbr[1]->ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 0\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; /* Align Free Buffer Ring 0 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->fbr[1]->real_physaddr, &rx_ring->fbr[1]->offset, 0x0FFF); rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + rx_ring->fbr[1]->offset); #endif for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { u64 fbr1_offset; u64 fbr1_tmp_physaddr; u32 fbr1_align; /* This code allocates an area of memory big enough for N * free buffers + (buffer_size - 1) so that the buffers can * be aligned on 4k boundaries. If each buffer were aligned * to a buffer_size boundary, the effect would be to double * the size of FBR0. By allocating N buffers at once, we * reduce this overhead. */ if (rx_ring->fbr[0]->buffsize > 4096) fbr1_align = 4096; else fbr1_align = rx_ring->fbr[0]->buffsize; fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; rx_ring->fbr[0]->mem_virtaddrs[i] = dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL); if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; et131x_align_allocated_memory(adapter, &fbr1_tmp_physaddr, &fbr1_offset, (fbr1_align - 1)); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; /* Save the Virtual address of this index for quick * access later */ rx_ring->fbr[0]->virt[index] = (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; /* now store the physical address in the descriptor * so the device can access it */ rx_ring->fbr[0]->bus_high[index] = (u32) (fbr1_tmp_physaddr >> 32); rx_ring->fbr[0]->bus_low[index] = (u32) fbr1_tmp_physaddr; fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; rx_ring->fbr[0]->buffer1[index] = rx_ring->fbr[0]->virt[index]; rx_ring->fbr[0]->buffer2[index] = rx_ring->fbr[0]->virt[index] - 4; } } #ifdef USE_FBR0 /* Same for FBR0 (if in use) */ for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { u64 fbr0_offset; u64 fbr0_tmp_physaddr; fbr_chunksize = ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; rx_ring->fbr[1]->mem_virtaddrs[i] = dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL); if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; et131x_align_allocated_memory(adapter, &fbr0_tmp_physaddr, &fbr0_offset, rx_ring->fbr[1]->buffsize - 1); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; rx_ring->fbr[1]->virt[index] = (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; rx_ring->fbr[1]->bus_high[index] = (u32) (fbr0_tmp_physaddr >> 32); rx_ring->fbr[1]->bus_low[index] = (u32) fbr0_tmp_physaddr; fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; rx_ring->fbr[1]->buffer1[index] = rx_ring->fbr[1]->virt[index]; rx_ring->fbr[1]->buffer2[index] = rx_ring->fbr[1]->virt[index] - 4; } } #endif /* Allocate an area of memory for FIFO of Packet Status ring entries */ pktstat_ringsize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, pktstat_ringsize, &rx_ring->ps_ring_physaddr, GFP_KERNEL); if (!rx_ring->ps_ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Packet Status Ring\n"); return -ENOMEM; } printk(KERN_INFO "Packet Status Ring %lx\n", (unsigned long) rx_ring->ps_ring_physaddr); /* * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ /* Allocate an area of memory for writeback of status information */ rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, sizeof(struct rx_status_block), &rx_ring->rx_status_bus, GFP_KERNEL); if (!rx_ring->rx_status_block) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Status Block\n"); return -ENOMEM; } rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); /* Recv * kmem_cache_create initializes a lookaside list. After successful * creation, nonpaged fixed-size blocks can be allocated from and * freed to the lookaside list. * RFDs will be allocated from this pool. */ rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, sizeof(struct rfd), 0, SLAB_CACHE_DMA | SLAB_HWCACHE_ALIGN, NULL); adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; /* The RFDs are going to be put on lists later on, so initialize the * lists now. */ INIT_LIST_HEAD(&rx_ring->recv_list); return 0; } /** * et131x_rx_dma_memory_free - Free all memory allocated within this module. * @adapter: pointer to our private adapter structure */ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) { u32 index; u32 bufsize; u32 pktstat_ringsize; struct rfd *rfd; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Free RFDs and associated packet descriptors */ WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); while (!list_empty(&rx_ring->recv_list)) { rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, struct rfd, list_node); list_del(&rfd->list_node); rfd->skb = NULL; kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); } /* Free Free Buffer Ring 1 */ if (rx_ring->fbr[0]->ring_virtaddr) { /* First the packet memory */ for (index = 0; index < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { if (rx_ring->fbr[0]->mem_virtaddrs[index]) { u32 fbr1_align; if (rx_ring->fbr[0]->buffsize > 4096) fbr1_align = 4096; else fbr1_align = rx_ring->fbr[0]->buffsize; bufsize = (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + fbr1_align - 1; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[0]->mem_virtaddrs[index], rx_ring->fbr[0]->mem_physaddrs[index]); rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; } } /* Now the FIFO itself */ rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[0]->ring_virtaddr, rx_ring->fbr[0]->ring_physaddr); rx_ring->fbr[0]->ring_virtaddr = NULL; } #ifdef USE_FBR0 /* Now the same for Free Buffer Ring 0 */ if (rx_ring->fbr[1]->ring_virtaddr) { /* First the packet memory */ for (index = 0; index < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { if (rx_ring->fbr[1]->mem_virtaddrs[index]) { bufsize = (rx_ring->fbr[1]->buffsize * (FBR_CHUNKS + 1)) - 1; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[1]->mem_virtaddrs[index], rx_ring->fbr[1]->mem_physaddrs[index]); rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; } } /* Now the FIFO itself */ rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[1]->ring_virtaddr, rx_ring->fbr[1]->ring_physaddr); rx_ring->fbr[1]->ring_virtaddr = NULL; } #endif /* Free Packet Status Ring */ if (rx_ring->ps_ring_virtaddr) { pktstat_ringsize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, rx_ring->ps_ring_virtaddr, rx_ring->ps_ring_physaddr); rx_ring->ps_ring_virtaddr = NULL; } /* Free area of memory for the writeback of status information */ if (rx_ring->rx_status_block) { dma_free_coherent(&adapter->pdev->dev, sizeof(struct rx_status_block), rx_ring->rx_status_block, rx_ring->rx_status_bus); rx_ring->rx_status_block = NULL; } /* Destroy the lookaside (RFD) pool */ if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { kmem_cache_destroy(rx_ring->recv_lookaside); adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; } /* Free the FBR Lookup Table */ #ifdef USE_FBR0 kfree(rx_ring->fbr[1]); #endif kfree(rx_ring->fbr[0]); /* Reset Counters */ rx_ring->num_ready_recv = 0; } /** * et131x_init_recv - Initialize receive data structures. * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) */ int et131x_init_recv(struct et131x_adapter *adapter) { int status = -ENOMEM; struct rfd *rfd = NULL; u32 rfdct; u32 numrfd = 0; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Setup each RFD */ for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { rfd = kmem_cache_alloc(rx_ring->recv_lookaside, GFP_ATOMIC | GFP_DMA); if (!rfd) { dev_err(&adapter->pdev->dev, "Couldn't alloc RFD out of kmem_cache\n"); status = -ENOMEM; continue; } rfd->skb = NULL; /* Add this RFD to the recv_list */ list_add_tail(&rfd->list_node, &rx_ring->recv_list); /* Increment both the available RFD's, and the total RFD's. */ rx_ring->num_ready_recv++; numrfd++; } if (numrfd > NIC_MIN_NUM_RFD) status = 0; rx_ring->num_rfd = numrfd; if (status != 0) { kmem_cache_free(rx_ring->recv_lookaside, rfd); dev_err(&adapter->pdev->dev, "Allocation problems in et131x_init_recv\n"); } return status; } /** * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. * @adapter: pointer to our adapter structure */ void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { struct phy_device *phydev = adapter->phydev; if (!phydev) return; /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. */ if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { writel(0, &adapter->regs->rxdma.max_pkt_time); writel(1, &adapter->regs->rxdma.num_pkt_done); } } /** * NICReturnRFD - Recycle a RFD and put it back onto the receive list * @adapter: pointer to our adapter * @rfd: pointer to the RFD */ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) { struct rx_ring *rx_local = &adapter->rx_ring; struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; u16 buff_index = rfd->bufferindex; u8 ring_index = rfd->ringindex; unsigned long flags; /* We don't use any of the OOB data besides status. Otherwise, we * need to clean up OOB data */ if ( #ifdef USE_FBR0 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || #endif (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { spin_lock_irqsave(&adapter->fbr_lock, flags); if (ring_index == 1) { struct fbr_desc *next = (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) + INDEX10(rx_local->fbr[0]->local_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed)FBR entry */ next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; next->word2 = buff_index; writel(bump_free_buff_ring( &rx_local->fbr[0]->local_full, rx_local->fbr[0]->num_entries - 1), &rx_dma->fbr1_full_offset); } #ifdef USE_FBR0 else { struct fbr_desc *next = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr + INDEX10(rx_local->fbr[1]->local_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed) FBR entry */ next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; next->word2 = buff_index; writel(bump_free_buff_ring( &rx_local->fbr[1]->local_full, rx_local->fbr[1]->num_entries - 1), &rx_dma->fbr0_full_offset); } #endif spin_unlock_irqrestore(&adapter->fbr_lock, flags); } else { dev_err(&adapter->pdev->dev, "%s illegal Buffer Index returned\n", __func__); } /* The processing on this RFD is done, so put it back on the tail of * our list */ spin_lock_irqsave(&adapter->rcv_lock, flags); list_add_tail(&rfd->list_node, &rx_local->recv_list); rx_local->num_ready_recv++; spin_unlock_irqrestore(&adapter->rcv_lock, flags); WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); } static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) { struct rx_ring *rx_local = &adapter->rx_ring; struct rx_status_block *status; struct pkt_stat_desc *psr; struct rfd *rfd; u32 i; u8 *buf; unsigned long flags; struct list_head *element; u8 ring_index; u16 buff_index; u32 len; u32 word0; u32 word1; /* RX Status block is written by the DMA engine prior to every * interrupt. It contains the next to be used entry in the Packet * Status Ring, and also the two Free Buffer rings. */ status = rx_local->rx_status_block; word1 = status->word1 >> 16; /* Get the useful bits */ /* Check the PSR and wrap bits do not match */ if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) /* Looks like this ring is not updated yet */ return NULL; /* The packet status ring indicates that data is available. */ psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + (rx_local->local_psr_full & 0xFFF); /* Grab any information that is required once the PSR is * advanced, since we can no longer rely on the memory being * accurate */ len = psr->word1 & 0xFFFF; ring_index = (psr->word1 >> 26) & 0x03; buff_index = (psr->word1 >> 16) & 0x3FF; word0 = psr->word0; /* Indicate that we have used this PSR entry. */ /* FIXME wrap 12 */ add_12bit(&rx_local->local_psr_full, 1); if ( (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { /* Clear psr full and toggle the wrap bit */ rx_local->local_psr_full &= ~0xFFF; rx_local->local_psr_full ^= 0x1000; } writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); #ifndef USE_FBR0 if (ring_index != 1) return NULL; #endif #ifdef USE_FBR0 if (ring_index > 1 || (ring_index == 0 && buff_index > rx_local->fbr[1]->num_entries - 1) || (ring_index == 1 && buff_index > rx_local->fbr[0]->num_entries - 1)) #else if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) #endif { /* Illegal buffer or ring index cannot be used by S/W*/ dev_err(&adapter->pdev->dev, "NICRxPkts PSR Entry %d indicates " "length of %d and/or bad bi(%d)\n", rx_local->local_psr_full & 0xFFF, len, buff_index); return NULL; } /* Get and fill the RFD. */ spin_lock_irqsave(&adapter->rcv_lock, flags); rfd = NULL; element = rx_local->recv_list.next; rfd = (struct rfd *) list_entry(element, struct rfd, list_node); if (rfd == NULL) { spin_unlock_irqrestore(&adapter->rcv_lock, flags); return NULL; } list_del(&rfd->list_node); rx_local->num_ready_recv--; spin_unlock_irqrestore(&adapter->rcv_lock, flags); rfd->bufferindex = buff_index; rfd->ringindex = ring_index; /* In V1 silicon, there is a bug which screws up filtering of * runt packets. Therefore runt packet filtering is disabled * in the MAC and the packets are dropped here. They are * also counted here. */ if (len < (NIC_MIN_PACKET_SIZE + 4)) { adapter->stats.rx_other_errs++; len = 0; } if (len) { /* Determine if this is a multicast packet coming in */ if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) { /* Promiscuous mode and Multicast mode are * not mutually exclusive as was first * thought. I guess Promiscuous is just * considered a super-set of the other * filters. Generally filter is 0x2b when in * promiscuous mode. */ if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS) && !(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { /* * Note - ring_index for fbr[] array is reversed * 1 for FBR0 etc */ buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> virt[buff_index]; /* Loop through our list to see if the * destination address of this packet * matches one in our list. */ for (i = 0; i < adapter->multicast_addr_count; i++) { if (buf[0] == adapter->multicast_list[i][0] && buf[1] == adapter->multicast_list[i][1] && buf[2] == adapter->multicast_list[i][2] && buf[3] == adapter->multicast_list[i][3] && buf[4] == adapter->multicast_list[i][4] && buf[5] == adapter->multicast_list[i][5]) { break; } } /* If our index is equal to the number * of Multicast address we have, then * this means we did not find this * packet's matching address in our * list. Set the len to zero, * so we free our RFD when we return * from this function. */ if (i == adapter->multicast_addr_count) len = 0; } if (len > 0) adapter->stats.multicast_pkts_rcvd++; } else if (word0 & ALCATEL_BROADCAST_PKT) adapter->stats.broadcast_pkts_rcvd++; else /* Not sure what this counter measures in * promiscuous mode. Perhaps we should check * the MAC address to see if it is directed * to us in promiscuous mode. */ adapter->stats.unicast_pkts_rcvd++; } if (len > 0) { struct sk_buff *skb = NULL; /*rfd->len = len - 4; */ rfd->len = len; skb = dev_alloc_skb(rfd->len + 2); if (!skb) { dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n"); return NULL; } adapter->net_stats.rx_bytes += rfd->len; /* * Note - ring_index for fbr[] array is reversed, * 1 for FBR0 etc */ memcpy(skb_put(skb, rfd->len), rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], rfd->len); skb->dev = adapter->netdev; skb->protocol = eth_type_trans(skb, adapter->netdev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); } else { rfd->len = 0; } nic_return_rfd(adapter, rfd); return rfd; } /** * et131x_handle_recv_interrupt - Interrupt handler for receive processing * @adapter: pointer to our adapter * * Assumption, Rcv spinlock has been acquired. */ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) { struct rfd *rfd = NULL; u32 count = 0; bool done = true; /* Process up to available RFD's */ while (count < NUM_PACKETS_HANDLED) { if (list_empty(&adapter->rx_ring.recv_list)) { WARN_ON(adapter->rx_ring.num_ready_recv != 0); done = false; break; } rfd = nic_rx_pkts(adapter); if (rfd == NULL) break; /* Do not receive any packets until a filter has been set. * Do not receive any packets until we have link. * If length is zero, return the RFD in order to advance the * Free buffer ring. */ if (!adapter->packet_filter || !netif_carrier_ok(adapter->netdev) || rfd->len == 0) continue; /* Increment the number of packets we received */ adapter->net_stats.rx_packets++; /* Set the status on the packet, either resources or success */ if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); } count++; } if (count == NUM_PACKETS_HANDLED || !done) { adapter->rx_ring.unfinished_receives = true; writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &adapter->regs->global.watchdog_timer); } else /* Watchdog timer will disable itself if appropriate. */ adapter->rx_ring.unfinished_receives = false; } /* TX functions */ /** * et131x_tx_dma_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h). * * Allocates memory that will be visible both to the device and to the CPU. * The OS will pass us packets, pointers to which we will insert in the Tx * Descriptor queue. The device will read this queue to find the packets in * memory. The device will update the "status" in memory each time it xmits a * packet. */ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) { int desc_size = 0; struct tx_ring *tx_ring = &adapter->tx_ring; /* Allocate memory for the TCB's (Transmit Control Block) */ adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); if (!adapter->tx_ring.tcb_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); return -ENOMEM; } /* Allocate enough memory for the Tx descriptor ring, and allocate * some extra so that the ring can be aligned on a 4k boundary. */ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; tx_ring->tx_desc_ring = (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size, &tx_ring->tx_desc_ring_pa, GFP_KERNEL); if (!adapter->tx_ring.tx_desc_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ /* Allocate memory for the Tx status block */ tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, sizeof(u32), &tx_ring->tx_status_pa, GFP_KERNEL); if (!adapter->tx_ring.tx_status_pa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx status block\n"); return -ENOMEM; } return 0; } /** * et131x_tx_dma_memory_free - Free all memory allocated within this module * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h). */ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) { int desc_size = 0; if (adapter->tx_ring.tx_desc_ring) { /* Free memory relating to Tx rings here */ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; dma_free_coherent(&adapter->pdev->dev, desc_size, adapter->tx_ring.tx_desc_ring, adapter->tx_ring.tx_desc_ring_pa); adapter->tx_ring.tx_desc_ring = NULL; } /* Free memory for the Tx status block */ if (adapter->tx_ring.tx_status) { dma_free_coherent(&adapter->pdev->dev, sizeof(u32), adapter->tx_ring.tx_status, adapter->tx_ring.tx_status_pa); adapter->tx_ring.tx_status = NULL; } /* Free the memory for the tcb structures */ kfree(adapter->tx_ring.tcb_ring); } /** * nic_send_packet - NIC specific send handler for version B silicon. * @adapter: pointer to our adapter * @tcb: pointer to struct tcb * * Returns 0 or errno. */ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) { u32 i; struct tx_desc desc[24]; /* 24 x 16 byte */ u32 frag = 0; u32 thiscopy, remainder; struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; unsigned long flags; struct phy_device *phydev = adapter->phydev; /* Part of the optimizations of this send routine restrict us to * sending 24 fragments at a pass. In practice we should never see * more than 5 fragments. * * NOTE: The older version of this function (below) can handle any * number of fragments. If needed, we can call this function, * although it is less efficient. */ if (nr_frags > 23) return -EIO; memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); for (i = 0; i < nr_frags; i++) { /* If there is something in this element, lets get a * descriptor from the ring and get the necessary data */ if (i == 0) { /* If the fragments are smaller than a standard MTU, * then map them to a single descriptor in the Tx * Desc ring. However, if they're larger, as is * possible with support for jumbo packets, then * split them each across 2 descriptors. * * This will work until we determine why the hardware * doesn't seem to like large fragments. */ if ((skb->len - skb->data_len) <= 1514) { desc[frag].addr_hi = 0; /* Low 16bits are length, high is vlan and unused currently so zero */ desc[frag].len_vlan = skb->len - skb->data_len; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data, skb->len - skb->data_len, DMA_TO_DEVICE); } else { desc[frag].addr_hi = 0; desc[frag].len_vlan = (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data, ((skb->len - skb->data_len) / 2), DMA_TO_DEVICE); desc[frag].addr_hi = 0; desc[frag].len_vlan = (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data + ((skb->len - skb->data_len) / 2), ((skb->len - skb->data_len) / 2), DMA_TO_DEVICE); } } else { desc[frag].addr_hi = 0; desc[frag].len_vlan = frags[i - 1].size; /* NOTE: Here, the dma_addr_t returned from * dma_map_page() is implicitly cast as a u32. * Although dma_addr_t can be 64-bit, the address * returned by dma_map_page() is always 32-bit * addressable (as defined by the pci/dma subsystem) */ desc[frag++].addr_lo = skb_frag_dma_map( &adapter->pdev->dev, &frags[i - 1], 0, frags[i - 1].size, DMA_TO_DEVICE); } } if (phydev && phydev->speed == SPEED_1000) { if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { /* Last element & Interrupt flag */ desc[frag - 1].flags = 0x5; adapter->tx_ring.since_irq = 0; } else { /* Last element */ desc[frag - 1].flags = 0x1; } } else desc[frag - 1].flags = 0x5; desc[0].flags |= 2; /* First element flag */ tcb->index_start = adapter->tx_ring.send_idx; tcb->stale = 0; spin_lock_irqsave(&adapter->send_hw_lock, flags); thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx); if (thiscopy >= frag) { remainder = 0; thiscopy = frag; } else { remainder = frag - thiscopy; } memcpy(adapter->tx_ring.tx_desc_ring + INDEX10(adapter->tx_ring.send_idx), desc, sizeof(struct tx_desc) * thiscopy); add_10bit(&adapter->tx_ring.send_idx, thiscopy); if (INDEX10(adapter->tx_ring.send_idx) == 0 || INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; } if (remainder) { memcpy(adapter->tx_ring.tx_desc_ring, desc + thiscopy, sizeof(struct tx_desc) * remainder); add_10bit(&adapter->tx_ring.send_idx, remainder); } if (INDEX10(adapter->tx_ring.send_idx) == 0) { if (adapter->tx_ring.send_idx) tcb->index = NUM_DESC_PER_RING_TX - 1; else tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); } else tcb->index = adapter->tx_ring.send_idx - 1; spin_lock(&adapter->tcb_send_qlock); if (adapter->tx_ring.send_tail) adapter->tx_ring.send_tail->next = tcb; else adapter->tx_ring.send_head = tcb; adapter->tx_ring.send_tail = tcb; WARN_ON(tcb->next != NULL); adapter->tx_ring.used++; spin_unlock(&adapter->tcb_send_qlock); /* Write the new write pointer back to the device. */ writel(adapter->tx_ring.send_idx, &adapter->regs->txdma.service_request); /* For Gig only, we use Tx Interrupt coalescing. Enable the software * timer to wake us up if this packet isn't followed by N more. */ if (phydev && phydev->speed == SPEED_1000) { writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &adapter->regs->global.watchdog_timer); } spin_unlock_irqrestore(&adapter->send_hw_lock, flags); return 0; } /** * send_packet - Do the work to send a packet * @skb: the packet(s) to send * @adapter: a pointer to the device's private adapter structure * * Return 0 in almost all cases; non-zero value in extreme hard failure only. * * Assumption: Send spinlock has been acquired */ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) { int status; struct tcb *tcb = NULL; u16 *shbufva; unsigned long flags; /* All packets must have at least a MAC address and a protocol type */ if (skb->len < ETH_HLEN) return -EIO; /* Get a TCB for this packet */ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); tcb = adapter->tx_ring.tcb_qhead; if (tcb == NULL) { spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return -ENOMEM; } adapter->tx_ring.tcb_qhead = tcb->next; if (adapter->tx_ring.tcb_qhead == NULL) adapter->tx_ring.tcb_qtail = NULL; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); tcb->skb = skb; if (skb->data != NULL && skb->len - skb->data_len >= 6) { shbufva = (u16 *) skb->data; if ((shbufva[0] == 0xffff) && (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { tcb->flags |= fMP_DEST_BROAD; } else if ((shbufva[0] & 0x3) == 0x0001) { tcb->flags |= fMP_DEST_MULTI; } } tcb->next = NULL; /* Call the NIC specific send handler. */ status = nic_send_packet(adapter, tcb); if (status != 0) { spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); if (adapter->tx_ring.tcb_qtail) adapter->tx_ring.tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ adapter->tx_ring.tcb_qhead = tcb; adapter->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return status; } WARN_ON(adapter->tx_ring.used > NUM_TCB); return 0; } /** * et131x_send_packets - This function is called by the OS to send packets * @skb: the packet(s) to send * @netdev:device on which to TX the above packet(s) * * Return 0 in almost all cases; non-zero value in extreme hard failure only */ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Send these packets * * NOTE: The Linux Tx entry point is only given one packet at a time * to Tx, so the PacketCount and it's array used makes no sense here */ /* TCB is not available */ if (adapter->tx_ring.used >= NUM_TCB) { /* NOTE: If there's an error on send, no need to queue the * packet under Linux; if we just send an error up to the * netif layer, it will resend the skb to us. */ status = -ENOMEM; } else { /* We need to see if the link is up; if it's not, make the * netif layer think we're good and drop the packet */ if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || !netif_carrier_ok(netdev)) { dev_kfree_skb_any(skb); skb = NULL; adapter->net_stats.tx_dropped++; } else { status = send_packet(skb, adapter); if (status != 0 && status != -ENOMEM) { /* On any other error, make netif think we're * OK and drop the packet */ dev_kfree_skb_any(skb); skb = NULL; adapter->net_stats.tx_dropped++; } } } return status; } /** * free_send_packet - Recycle a struct tcb * @adapter: pointer to our adapter * @tcb: pointer to struct tcb * * Complete the packet if necessary * Assumption - Send spinlock has been acquired */ static inline void free_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) { unsigned long flags; struct tx_desc *desc = NULL; struct net_device_stats *stats = &adapter->net_stats; if (tcb->flags & fMP_DEST_BROAD) atomic_inc(&adapter->stats.broadcast_pkts_xmtd); else if (tcb->flags & fMP_DEST_MULTI) atomic_inc(&adapter->stats.multicast_pkts_xmtd); else atomic_inc(&adapter->stats.unicast_pkts_xmtd); if (tcb->skb) { stats->tx_bytes += tcb->skb->len; /* Iterate through the TX descriptors on the ring * corresponding to this packet and umap the fragments * they point to */ do { desc = (struct tx_desc *) (adapter->tx_ring.tx_desc_ring + INDEX10(tcb->index_start)); dma_unmap_single(&adapter->pdev->dev, desc->addr_lo, desc->len_vlan, DMA_TO_DEVICE); add_10bit(&tcb->index_start, 1); if (INDEX10(tcb->index_start) >= NUM_DESC_PER_RING_TX) { tcb->index_start &= ~ET_DMA10_MASK; tcb->index_start ^= ET_DMA10_WRAP; } } while (desc != (adapter->tx_ring.tx_desc_ring + INDEX10(tcb->index))); dev_kfree_skb_any(tcb->skb); } memset(tcb, 0, sizeof(struct tcb)); /* Add the TCB to the Ready Q */ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); adapter->net_stats.tx_packets++; if (adapter->tx_ring.tcb_qtail) adapter->tx_ring.tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ adapter->tx_ring.tcb_qhead = tcb; adapter->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); WARN_ON(adapter->tx_ring.used < 0); } /** * et131x_free_busy_send_packets - Free and complete the stopped active sends * @adapter: pointer to our adapter * * Assumption - Send spinlock has been acquired */ void et131x_free_busy_send_packets(struct et131x_adapter *adapter) { struct tcb *tcb; unsigned long flags; u32 freed = 0; /* Any packets being sent? Check the first TCB on the send list */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; while (tcb != NULL && freed < NUM_TCB) { struct tcb *next = tcb->next; adapter->tx_ring.send_head = next; if (next == NULL) adapter->tx_ring.send_tail = NULL; adapter->tx_ring.used--; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); freed++; free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; } WARN_ON(freed == NUM_TCB); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); adapter->tx_ring.used = 0; } /** * et131x_handle_send_interrupt - Interrupt handler for sending processing * @adapter: pointer to our adapter * * Re-claim the send resources, complete sends and get more to send from * the send wait queue. * * Assumption - Send spinlock has been acquired */ void et131x_handle_send_interrupt(struct et131x_adapter *adapter) { unsigned long flags; u32 serviced; struct tcb *tcb; u32 index; serviced = readl(&adapter->regs->txdma.new_service_complete); index = INDEX10(serviced); /* Has the ring wrapped? Process any descriptors that do not have * the same "wrap" indicator as the current completion indicator */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; while (tcb && ((serviced ^ tcb->index) & ET_DMA10_WRAP) && index < INDEX10(tcb->index)) { adapter->tx_ring.used--; adapter->tx_ring.send_head = tcb->next; if (tcb->next == NULL) adapter->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ tcb = adapter->tx_ring.send_head; } while (tcb && !((serviced ^ tcb->index) & ET_DMA10_WRAP) && index > (tcb->index & ET_DMA10_MASK)) { adapter->tx_ring.used--; adapter->tx_ring.send_head = tcb->next; if (tcb->next == NULL) adapter->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ tcb = adapter->tx_ring.send_head; } /* Wake up the queue when we hit a low-water mark */ if (adapter->tx_ring.used <= NUM_TCB / 3) netif_wake_queue(adapter->netdev); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } /* ETHTOOL functions */ static int et131x_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); return phy_ethtool_gset(adapter->phydev, cmd); } static int et131x_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); return phy_ethtool_sset(adapter->phydev, cmd); } static int et131x_get_regs_len(struct net_device *netdev) { #define ET131X_REGS_LEN 256 return ET131X_REGS_LEN * sizeof(u32); } static void et131x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *regs_data) { struct et131x_adapter *adapter = netdev_priv(netdev); struct address_map __iomem *aregs = adapter->regs; u32 *regs_buff = regs_data; u32 num = 0; memset(regs_data, 0, et131x_get_regs_len(netdev)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; /* PHY regs */ et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]); /* Autoneg next page transmit reg */ et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]); /* Link partner next page reg */ et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]); /* Global regs */ regs_buff[num++] = readl(&aregs->global.txq_start_addr); regs_buff[num++] = readl(&aregs->global.txq_end_addr); regs_buff[num++] = readl(&aregs->global.rxq_start_addr); regs_buff[num++] = readl(&aregs->global.rxq_end_addr); regs_buff[num++] = readl(&aregs->global.pm_csr); regs_buff[num++] = adapter->stats.interrupt_status; regs_buff[num++] = readl(&aregs->global.int_mask); regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); regs_buff[num++] = readl(&aregs->global.int_status_alias); regs_buff[num++] = readl(&aregs->global.sw_reset); regs_buff[num++] = readl(&aregs->global.slv_timer); regs_buff[num++] = readl(&aregs->global.msi_config); regs_buff[num++] = readl(&aregs->global.loopback); regs_buff[num++] = readl(&aregs->global.watchdog_timer); /* TXDMA regs */ regs_buff[num++] = readl(&aregs->txdma.csr); regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); regs_buff[num++] = readl(&aregs->txdma.pr_num_des); regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); regs_buff[num++] = readl(&aregs->txdma.service_request); regs_buff[num++] = readl(&aregs->txdma.service_complete); regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); regs_buff[num++] = readl(&aregs->txdma.new_service_complete); regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); /* RXDMA regs */ regs_buff[num++] = readl(&aregs->rxdma.csr); regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); } #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ static void et131x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct et131x_adapter *adapter = netdev_priv(netdev); strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); } static struct ethtool_ops et131x_ethtool_ops = { .get_settings = et131x_get_settings, .set_settings = et131x_set_settings, .get_drvinfo = et131x_get_drvinfo, .get_regs_len = et131x_get_regs_len, .get_regs = et131x_get_regs, .get_link = ethtool_op_get_link, }; void et131x_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); } /* PCI functions */ /** * et131x_hwaddr_init - set up the MAC Address on the ET1310 * @adapter: pointer to our private adapter structure */ void et131x_hwaddr_init(struct et131x_adapter *adapter) { /* If have our default mac from init and no mac address from * EEPROM then we need to generate the last octet and set it on the * device */ if (adapter->rom_addr[0] == 0x00 && adapter->rom_addr[1] == 0x00 && adapter->rom_addr[2] == 0x00 && adapter->rom_addr[3] == 0x00 && adapter->rom_addr[4] == 0x00 && adapter->rom_addr[5] == 0x00) { /* * We need to randomly generate the last octet so we * decrease our chances of setting the mac address to * same as another one of our cards in the system */ get_random_bytes(&adapter->addr[5], 1); /* * We have the default value in the register we are * working with so we need to copy the current * address into the permanent address */ memcpy(adapter->rom_addr, adapter->addr, ETH_ALEN); } else { /* We do not have an override address, so set the * current address to the permanent address and add * it to the device */ memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); } } /** * et131x_pci_init - initial PCI setup * @adapter: pointer to our private adapter structure * @pdev: our PCI device * * Perform the initial setup of PCI registers and if possible initialise * the MAC address. At this point the I/O registers have yet to be mapped */ static int et131x_pci_init(struct et131x_adapter *adapter, struct pci_dev *pdev) { int i; u8 max_payload; u8 read_size_reg; if (et131x_init_eeprom(adapter) < 0) return -EIO; /* Let's set up the PORT LOGIC Register. First we need to know what * the max_payload_size is */ if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) { dev_err(&pdev->dev, "Could not read PCI config space for Max Payload Size\n"); return -EIO; } /* Program the Ack/Nak latency and replay timers */ max_payload &= 0x07; /* Only the lower 3 bits are valid */ if (max_payload < 2) { static const u16 acknak[2] = { 0x76, 0xD0 }; static const u16 replay[2] = { 0x1E0, 0x2ED }; if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, acknak[max_payload])) { dev_err(&pdev->dev, "Could not write PCI config space for ACK/NAK\n"); return -EIO; } if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, replay[max_payload])) { dev_err(&pdev->dev, "Could not write PCI config space for Replay Timer\n"); return -EIO; } } /* l0s and l1 latency timers. We are using default values. * Representing 001 for L0s and 010 for L1 */ if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { dev_err(&pdev->dev, "Could not write PCI config space for Latency Timers\n"); return -EIO; } /* Change the max read size to 2k */ if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) { dev_err(&pdev->dev, "Could not read PCI config space for Max read size\n"); return -EIO; } read_size_reg &= 0x8f; read_size_reg |= 0x40; if (pci_write_config_byte(pdev, 0x51, read_size_reg)) { dev_err(&pdev->dev, "Could not write PCI config space for Max read size\n"); return -EIO; } /* Get MAC address from config space if an eeprom exists, otherwise * the MAC address there will not be valid */ if (!adapter->has_eeprom) { et131x_hwaddr_init(adapter); return 0; } for (i = 0; i < ETH_ALEN; i++) { if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, adapter->rom_addr + i)) { dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); return -EIO; } } memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); return 0; } /** * et131x_error_timer_handler * @data: timer-specific variable; here a pointer to our adapter structure * * The routine called when the error timer expires, to track the number of * recurring errors. */ void et131x_error_timer_handler(unsigned long data) { struct et131x_adapter *adapter = (struct et131x_adapter *) data; struct phy_device *phydev = adapter->phydev; if (et1310_in_phy_coma(adapter)) { /* Bring the device immediately out of coma, to * prevent it from sleeping indefinitely, this * mechanism could be improved! */ et1310_disable_phy_coma(adapter); adapter->boot_coma = 20; } else { et1310_update_macstat_host_counters(adapter); } if (!phydev->link && adapter->boot_coma < 11) adapter->boot_coma++; if (adapter->boot_coma == 10) { if (!phydev->link) { if (!et1310_in_phy_coma(adapter)) { /* NOTE - This was originally a 'sync with * interrupt'. How to do that under Linux? */ et131x_enable_interrupts(adapter); et1310_enable_phy_coma(adapter); } } } /* This is a periodic timer, so reschedule */ mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); } /** * et131x_adapter_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success, errno on failure (as defined in errno.h). * * Allocate all the memory blocks for send, receive and others. */ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) { int status; /* Allocate memory for the Tx Ring */ status = et131x_tx_dma_memory_alloc(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_tx_dma_memory_alloc FAILED\n"); return status; } /* Receive buffer memory allocation */ status = et131x_rx_dma_memory_alloc(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_rx_dma_memory_alloc FAILED\n"); et131x_tx_dma_memory_free(adapter); return status; } /* Init receive data structures */ status = et131x_init_recv(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); et131x_tx_dma_memory_free(adapter); et131x_rx_dma_memory_free(adapter); } return status; } /** * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx * @adapter: pointer to our private adapter structure */ void et131x_adapter_memory_free(struct et131x_adapter *adapter) { /* Free DMA memory */ et131x_tx_dma_memory_free(adapter); et131x_rx_dma_memory_free(adapter); } static void et131x_adjust_link(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = adapter->phydev; if (netif_carrier_ok(netdev)) { adapter->boot_coma = 20; if (phydev && phydev->speed == SPEED_10) { /* * NOTE - Is there a way to query this without * TruePHY? * && TRU_QueryCoreType(adapter->hTruePhy, 0)== * EMI_TRUEPHY_A13O) { */ u16 register18; et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &register18); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18 | 0x4); et131x_mii_write(adapter, PHY_INDEX_REG, register18 | 0x8402); et131x_mii_write(adapter, PHY_DATA_REG, register18 | 511); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18); } et1310_config_flow_control(adapter); if (phydev && phydev->speed == SPEED_1000 && adapter->registry_jumbo_packet > 2048) { u16 reg; et131x_mii_read(adapter, PHY_CONFIG, &reg); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; et131x_mii_write(adapter, PHY_CONFIG, reg); } et131x_set_rx_dma_timer(adapter); et1310_config_mac_regs2(adapter); } if (phydev && phydev->link != adapter->link) { /* * Check to see if we are in coma mode and if * so, disable it because we will not be able * to read PHY values until we are out. */ if (et1310_in_phy_coma(adapter)) et1310_disable_phy_coma(adapter); if (phydev->link) { adapter->boot_coma = 20; } else { dev_warn(&adapter->pdev->dev, "Link down - cable problem ?\n"); adapter->boot_coma = 0; if (phydev->speed == SPEED_10) { /* NOTE - Is there a way to query this without * TruePHY? * && TRU_QueryCoreType(adapter->hTruePhy, 0) == * EMI_TRUEPHY_A13O) */ u16 register18; et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &register18); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18 | 0x4); et131x_mii_write(adapter, PHY_INDEX_REG, register18 | 0x8402); et131x_mii_write(adapter, PHY_DATA_REG, register18 | 511); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18); } /* Free the packets being actively sent & stopped */ et131x_free_busy_send_packets(adapter); /* Re-initialize the send structures */ et131x_init_send(adapter); /* * Bring the device back to the state it was during * init prior to autonegotiation being complete. This * way, when we get the auto-neg complete interrupt, * we can complete init by calling config_mac_regs2. */ et131x_soft_reset(adapter); /* Setup ET1310 as per the documentation */ et131x_adapter_setup(adapter); /* perform reset of tx/rx */ et131x_disable_txrx(netdev); et131x_enable_txrx(netdev); } adapter->link = phydev->link; phy_print_status(phydev); } } static int et131x_mii_probe(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = NULL; phydev = phy_find_first(adapter->mii_bus); if (!phydev) { dev_err(&adapter->pdev->dev, "no PHY found\n"); return -ENODEV; } phydev = phy_connect(netdev, dev_name(&phydev->dev), &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_TP); if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) phydev->supported |= SUPPORTED_1000baseT_Full; phydev->advertising = phydev->supported; adapter->phydev = phydev; dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " "(mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); return 0; } /** * et131x_adapter_init * @adapter: pointer to the private adapter struct * @pdev: pointer to the PCI device * * Initialize the data structures for the et131x_adapter object and link * them together with the platform provided device structures. */ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, struct pci_dev *pdev) { static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; struct et131x_adapter *adapter; /* Allocate private adapter struct and copy in relevant information */ adapter = netdev_priv(netdev); adapter->pdev = pci_dev_get(pdev); adapter->netdev = netdev; /* Do the same for the netdev struct */ netdev->irq = pdev->irq; netdev->base_addr = pci_resource_start(pdev, 0); /* Initialize spinlocks here */ spin_lock_init(&adapter->lock); spin_lock_init(&adapter->tcb_send_qlock); spin_lock_init(&adapter->tcb_ready_qlock); spin_lock_init(&adapter->send_hw_lock); spin_lock_init(&adapter->rcv_lock); spin_lock_init(&adapter->rcv_pend_lock); spin_lock_init(&adapter->fbr_lock); spin_lock_init(&adapter->phy_lock); adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ /* Set the MAC address to a default */ memcpy(adapter->addr, default_mac, ETH_ALEN); return adapter; } /** * et131x_pci_remove * @pdev: a pointer to the device's pci_dev structure * * Registered in the pci_driver structure, this function is called when the * PCI subsystem detects that a PCI device which matches the information * contained in the pci_device_id table has been removed. */ static void __devexit et131x_pci_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct et131x_adapter *adapter = netdev_priv(netdev); unregister_netdev(netdev); mdiobus_unregister(adapter->mii_bus); kfree(adapter->mii_bus->irq); mdiobus_free(adapter->mii_bus); et131x_adapter_memory_free(adapter); iounmap(adapter->regs); pci_dev_put(pdev); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); } /** * et131x_up - Bring up a device for use. * @netdev: device to be opened */ void et131x_up(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); et131x_enable_txrx(netdev); phy_start(adapter->phydev); } /** * et131x_down - Bring down the device * @netdev: device to be broght down */ void et131x_down(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* Save the timestamp for the TX watchdog, prevent a timeout */ netdev->trans_start = jiffies; phy_stop(adapter->phydev); et131x_disable_txrx(netdev); } #ifdef CONFIG_PM_SLEEP static int et131x_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); if (netif_running(netdev)) { netif_device_detach(netdev); et131x_down(netdev); pci_save_state(pdev); } return 0; } static int et131x_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); if (netif_running(netdev)) { pci_restore_state(pdev); et131x_up(netdev); netif_device_attach(netdev); } return 0; } static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); #define ET131X_PM_OPS (&et131x_pm_ops) #else #define ET131X_PM_OPS NULL #endif /* ISR functions */ /** * et131x_isr - The Interrupt Service Routine for the driver. * @irq: the IRQ on which the interrupt was received. * @dev_id: device-specific info (here a pointer to a net_device struct) * * Returns a value indicating if the interrupt was handled. */ irqreturn_t et131x_isr(int irq, void *dev_id) { bool handled = true; struct net_device *netdev = (struct net_device *)dev_id; struct et131x_adapter *adapter = NULL; u32 status; if (!netif_device_present(netdev)) { handled = false; goto out; } adapter = netdev_priv(netdev); /* If the adapter is in low power state, then it should not * recognize any interrupt */ /* Disable Device Interrupts */ et131x_disable_interrupts(adapter); /* Get a copy of the value in the interrupt status register * so we can process the interrupting section */ status = readl(&adapter->regs->global.int_status); if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) { status &= ~INT_MASK_ENABLE; } else { status &= ~INT_MASK_ENABLE_NO_FLOW; } /* Make sure this is our interrupt */ if (!status) { handled = false; et131x_enable_interrupts(adapter); goto out; } /* This is our interrupt, so process accordingly */ if (status & ET_INTR_WATCHDOG) { struct tcb *tcb = adapter->tx_ring.send_head; if (tcb) if (++tcb->stale > 1) status |= ET_INTR_TXDMA_ISR; if (adapter->rx_ring.unfinished_receives) status |= ET_INTR_RXDMA_XFR_DONE; else if (tcb == NULL) writel(0, &adapter->regs->global.watchdog_timer); status &= ~ET_INTR_WATCHDOG; } if (status == 0) { /* This interrupt has in some way been "handled" by * the ISR. Either it was a spurious Rx interrupt, or * it was a Tx interrupt that has been filtered by * the ISR. */ et131x_enable_interrupts(adapter); goto out; } /* We need to save the interrupt status value for use in our * DPC. We will clear the software copy of that in that * routine. */ adapter->stats.interrupt_status = status; /* Schedule the ISR handler as a bottom-half task in the * kernel's tq_immediate queue, and mark the queue for * execution */ schedule_work(&adapter->task); out: return IRQ_RETVAL(handled); } /** * et131x_isr_handler - The ISR handler * @p_adapter, a pointer to the device's private adapter structure * * scheduled to run in a deferred context by the ISR. This is where the ISR's * work actually gets done. */ void et131x_isr_handler(struct work_struct *work) { struct et131x_adapter *adapter = container_of(work, struct et131x_adapter, task); u32 status = adapter->stats.interrupt_status; struct address_map __iomem *iomem = adapter->regs; /* * These first two are by far the most common. Once handled, we clear * their two bits in the status word. If the word is now zero, we * exit. */ /* Handle all the completed Transmit interrupts */ if (status & ET_INTR_TXDMA_ISR) et131x_handle_send_interrupt(adapter); /* Handle all the completed Receives interrupts */ if (status & ET_INTR_RXDMA_XFR_DONE) et131x_handle_recv_interrupt(adapter); status &= 0xffffffd7; if (status) { /* Handle the TXDMA Error interrupt */ if (status & ET_INTR_TXDMA_ERR) { u32 txdma_err; /* Following read also clears the register (COR) */ txdma_err = readl(&iomem->txdma.tx_dma_error); dev_warn(&adapter->pdev->dev, "TXDMA_ERR interrupt, error = %d\n", txdma_err); } /* Handle Free Buffer Ring 0 and 1 Low interrupt */ if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { /* * This indicates the number of unused buffers in * RXDMA free buffer ring 0 is <= the limit you * programmed. Free buffer resources need to be * returned. Free buffers are consumed as packets * are passed from the network to the host. The host * becomes aware of the packets from the contents of * the packet status ring. This ring is queried when * the packet done interrupt occurs. Packets are then * passed to the OS. When the OS is done with the * packets the resources can be returned to the * ET1310 for re-use. This interrupt is one method of * returning resources. */ /* If the user has flow control on, then we will * send a pause packet, otherwise just exit */ if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) { u32 pm_csr; /* Tell the device to send a pause packet via * the back pressure register (bp req and * bp xon/xoff) */ pm_csr = readl(&iomem->global.pm_csr); if (!et1310_in_phy_coma(adapter)) writel(3, &iomem->txmac.bp_ctrl); } } /* Handle Packet Status Ring Low Interrupt */ if (status & ET_INTR_RXDMA_STAT_LOW) { /* * Same idea as with the two Free Buffer Rings. * Packets going from the network to the host each * consume a free buffer resource and a packet status * resource. These resoures are passed to the OS. * When the OS is done with the resources, they need * to be returned to the ET1310. This is one method * of returning the resources. */ } /* Handle RXDMA Error Interrupt */ if (status & ET_INTR_RXDMA_ERR) { /* * The rxdma_error interrupt is sent when a time-out * on a request issued by the JAGCore has occurred or * a completion is returned with an un-successful * status. In both cases the request is considered * complete. The JAGCore will automatically re-try the * request in question. Normally information on events * like these are sent to the host using the "Advanced * Error Reporting" capability. This interrupt is * another way of getting similar information. The * only thing required is to clear the interrupt by * reading the ISR in the global resources. The * JAGCore will do a re-try on the request. Normally * you should never see this interrupt. If you start * to see this interrupt occurring frequently then * something bad has occurred. A reset might be the * thing to do. */ /* TRAP();*/ dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", readl(&iomem->txmac.tx_test)); } /* Handle the Wake on LAN Event */ if (status & ET_INTR_WOL) { /* * This is a secondary interrupt for wake on LAN. * The driver should never see this, if it does, * something serious is wrong. We will TRAP the * message when we are in DBG mode, otherwise we * will ignore it. */ dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); } /* Let's move on to the TxMac */ if (status & ET_INTR_TXMAC) { u32 err = readl(&iomem->txmac.err); /* * When any of the errors occur and TXMAC generates * an interrupt to report these errors, it usually * means that TXMAC has detected an error in the data * stream retrieved from the on-chip Tx Q. All of * these errors are catastrophic and TXMAC won't be * able to recover data when these errors occur. In * a nutshell, the whole Tx path will have to be reset * and re-configured afterwards. */ dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", err); /* If we are debugging, we want to see this error, * otherwise we just want the device to be reset and * continue */ } /* Handle RXMAC Interrupt */ if (status & ET_INTR_RXMAC) { /* * These interrupts are catastrophic to the device, * what we need to do is disable the interrupts and * set the flag to cause us to reset so we can solve * this issue. */ /* MP_SET_FLAG( adapter, fMP_ADAPTER_HARDWARE_ERROR); */ dev_warn(&adapter->pdev->dev, "RXMAC interrupt, error 0x%08x. Requesting reset\n", readl(&iomem->rxmac.err_reg)); dev_warn(&adapter->pdev->dev, "Enable 0x%08x, Diag 0x%08x\n", readl(&iomem->rxmac.ctrl), readl(&iomem->rxmac.rxq_diag)); /* * If we are debugging, we want to see this error, * otherwise we just want the device to be reset and * continue */ } /* Handle MAC_STAT Interrupt */ if (status & ET_INTR_MAC_STAT) { /* * This means at least one of the un-masked counters * in the MAC_STAT block has rolled over. Use this * to maintain the top, software managed bits of the * counter(s). */ et1310_handle_macstat_interrupt(adapter); } /* Handle SLV Timeout Interrupt */ if (status & ET_INTR_SLV_TIMEOUT) { /* * This means a timeout has occurred on a read or * write request to one of the JAGCore registers. The * Global Resources block has terminated the request * and on a read request, returned a "fake" value. * The most likely reasons are: Bad Address or the * addressed module is in a power-down state and * can't respond. */ } } et131x_enable_interrupts(adapter); } /* NETDEV functions */ /** * et131x_stats - Return the current device statistics. * @netdev: device whose stats are being queried * * Returns 0 on success, errno on failure (as defined in errno.h) */ static struct net_device_stats *et131x_stats(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &adapter->net_stats; struct ce_stats *devstat = &adapter->stats; stats->rx_errors = devstat->rx_length_errs + devstat->rx_align_errs + devstat->rx_crc_errs + devstat->rx_code_violations + devstat->rx_other_errs; stats->tx_errors = devstat->tx_max_pkt_errs; stats->multicast = devstat->multicast_pkts_rcvd; stats->collisions = devstat->tx_collisions; stats->rx_length_errors = devstat->rx_length_errs; stats->rx_over_errors = devstat->rx_overflows; stats->rx_crc_errors = devstat->rx_crc_errs; /* NOTE: These stats don't have corresponding values in CE_STATS, * so we're going to have to update these directly from within the * TX/RX code */ /* stats->rx_bytes = 20; devstat->; */ /* stats->tx_bytes = 20; devstat->; */ /* stats->rx_dropped = devstat->; */ /* stats->tx_dropped = devstat->; */ /* NOTE: Not used, can't find analogous statistics */ /* stats->rx_frame_errors = devstat->; */ /* stats->rx_fifo_errors = devstat->; */ /* stats->rx_missed_errors = devstat->; */ /* stats->tx_aborted_errors = devstat->; */ /* stats->tx_carrier_errors = devstat->; */ /* stats->tx_fifo_errors = devstat->; */ /* stats->tx_heartbeat_errors = devstat->; */ /* stats->tx_window_errors = devstat->; */ return stats; } /** * et131x_open - Open the device for use. * @netdev: device to be opened * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_open(struct net_device *netdev) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Start the timer to track NIC errors */ init_timer(&adapter->error_timer); adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; adapter->error_timer.function = et131x_error_timer_handler; adapter->error_timer.data = (unsigned long)adapter; add_timer(&adapter->error_timer); /* Register our IRQ */ result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, netdev->name, netdev); if (result) { dev_err(&adapter->pdev->dev, "could not register IRQ %d\n", netdev->irq); return result; } adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; et131x_up(netdev); return result; } /** * et131x_close - Close the device * @netdev: device to be closed * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_close(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); et131x_down(netdev); adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; free_irq(netdev->irq, netdev); /* Stop the error timer */ return del_timer_sync(&adapter->error_timer); } /** * et131x_ioctl - The I/O Control handler for the driver * @netdev: device on which the control request is being made * @reqbuf: a pointer to the IOCTL request buffer * @cmd: the IOCTL command code * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); if (!adapter->phydev) return -EINVAL; return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); } /** * et131x_set_packet_filter - Configures the Rx Packet filtering on the device * @adapter: pointer to our private adapter structure * * FIXME: lot of dups with MAC code * * Returns 0 on success, errno on failure */ static int et131x_set_packet_filter(struct et131x_adapter *adapter) { int status = 0; uint32_t filter = adapter->packet_filter; u32 ctrl; u32 pf_ctrl; ctrl = readl(&adapter->regs->rxmac.ctrl); pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); /* Default to disabled packet filtering. Enable it in the individual * case statements that require the device to filter something */ ctrl |= 0x04; /* Set us to be in promiscuous mode so we receive everything, this * is also true when we get a packet filter of 0 */ if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) pf_ctrl &= ~7; /* Clear filter bits */ else { /* * Set us up with Multicast packet filtering. Three cases are * possible - (1) we have a multi-cast list, (2) we receive ALL * multicast entries or (3) we receive none. */ if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) pf_ctrl &= ~2; /* Multicast filter bit */ else { et1310_setup_device_for_multicast(adapter); pf_ctrl |= 2; ctrl &= ~0x04; } /* Set us up with Unicast packet filtering */ if (filter & ET131X_PACKET_TYPE_DIRECTED) { et1310_setup_device_for_unicast(adapter); pf_ctrl |= 4; ctrl &= ~0x04; } /* Set us up with Broadcast packet filtering */ if (filter & ET131X_PACKET_TYPE_BROADCAST) { pf_ctrl |= 1; /* Broadcast filter bit */ ctrl &= ~0x04; } else pf_ctrl &= ~1; /* Setup the receive mac configuration registers - Packet * Filter control + the enable / disable for packet filter * in the control reg. */ writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); writel(ctrl, &adapter->regs->rxmac.ctrl); } return status; } /** * et131x_multicast - The handler to configure multicasting on the interface * @netdev: a pointer to a net_device struct representing the device */ static void et131x_multicast(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); uint32_t packet_filter = 0; unsigned long flags; struct netdev_hw_addr *ha; int i; spin_lock_irqsave(&adapter->lock, flags); /* Before we modify the platform-independent filter flags, store them * locally. This allows us to determine if anything's changed and if * we even need to bother the hardware */ packet_filter = adapter->packet_filter; /* Clear the 'multicast' flag locally; because we only have a single * flag to check multicast, and multiple multicast addresses can be * set, this is the easiest way to determine if more than one * multicast address is being set. */ packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; /* Check the net_device flags and set the device independent flags * accordingly */ if (netdev->flags & IFF_PROMISC) adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; else adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; if (netdev->flags & IFF_ALLMULTI) adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) < 1) { adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; } else adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; /* Set values in the private adapter struct */ i = 0; netdev_for_each_mc_addr(ha, netdev) { if (i == NIC_MAX_MCAST_LIST) break; memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); } adapter->multicast_addr_count = i; /* Are the new flags different from the previous ones? If not, then no * action is required * * NOTE - This block will always update the multicast_list with the * hardware, even if the addresses aren't the same. */ if (packet_filter != adapter->packet_filter) { /* Call the device's filter function */ et131x_set_packet_filter(adapter); } spin_unlock_irqrestore(&adapter->lock, flags); } /** * et131x_tx - The handler to tx a packet on the device * @skb: data to be Tx'd * @netdev: device on which data is to be Tx'd * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* stop the queue if it's getting full */ if (adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) netif_stop_queue(netdev); /* Save the timestamp for the TX timeout watchdog */ netdev->trans_start = jiffies; /* Call the device-specific data Tx routine */ status = et131x_send_packets(skb, netdev); /* Check status and manage the netif queue if necessary */ if (status != 0) { if (status == -ENOMEM) status = NETDEV_TX_BUSY; else status = NETDEV_TX_OK; } return status; } /** * et131x_tx_timeout - Timeout handler * @netdev: a pointer to a net_device struct representing the device * * The handler called when a Tx request times out. The timeout period is * specified by the 'tx_timeo" element in the net_device structure (see * et131x_alloc_device() to see how this value is set). */ static void et131x_tx_timeout(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct tcb *tcb; unsigned long flags; /* If the device is closed, ignore the timeout */ if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) return; /* Any nonrecoverable hardware error? * Checks adapter->flags for any failure in phy reading */ if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) return; /* Hardware failure? */ if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { dev_err(&adapter->pdev->dev, "hardware error - reset\n"); return; } /* Is send stuck? */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; if (tcb != NULL) { tcb->count++; if (tcb->count > NIC_SEND_HANG_THRESHOLD) { spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); dev_warn(&adapter->pdev->dev, "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", tcb->index, tcb->flags); adapter->net_stats.tx_errors++; /* perform reset of tx/rx */ et131x_disable_txrx(netdev); et131x_enable_txrx(netdev); return; } } spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } /** * et131x_change_mtu - The handler called to change the MTU for the device * @netdev: device whose MTU is to be changed * @new_mtu: the desired MTU * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Make sure the requested MTU is valid */ if (new_mtu < 64 || new_mtu > 9216) return -EINVAL; et131x_disable_txrx(netdev); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MTU */ netdev->mtu = new_mtu; /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); /* Set the config parameter for Jumbo Packet support */ adapter->registry_jumbo_packet = new_mtu + 14; et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_warn(&adapter->pdev->dev, "Change MTU failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); /* Init the device with the new settings */ et131x_adapter_setup(adapter); et131x_enable_txrx(netdev); return result; } /** * et131x_set_mac_addr - handler to change the MAC address for the device * @netdev: device whose MAC is to be changed * @new_mac: the desired MAC address * * Returns 0 on success, errno on failure (as defined in errno.h) * * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 */ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); struct sockaddr *address = new_mac; /* begin blux */ if (adapter == NULL) return -ENODEV; /* Make sure the requested MAC is valid */ if (!is_valid_ether_addr(address->sa_data)) return -EINVAL; et131x_disable_txrx(netdev); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MAC */ /* netdev->set_mac_address = &new_mac; */ memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); printk(KERN_INFO "%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_err(&adapter->pdev->dev, "Change MAC failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); /* Init the device with the new settings */ et131x_adapter_setup(adapter); et131x_enable_txrx(netdev); return result; } static const struct net_device_ops et131x_netdev_ops = { .ndo_open = et131x_open, .ndo_stop = et131x_close, .ndo_start_xmit = et131x_tx, .ndo_set_rx_mode = et131x_multicast, .ndo_tx_timeout = et131x_tx_timeout, .ndo_change_mtu = et131x_change_mtu, .ndo_set_mac_address = et131x_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats = et131x_stats, .ndo_do_ioctl = et131x_ioctl, }; /** * et131x_device_alloc * * Returns pointer to the allocated and initialized net_device struct for * this device. * * Create instances of net_device and wl_private for the new adapter and * register the device's entry points in the net_device structure. */ struct net_device *et131x_device_alloc(void) { struct net_device *netdev; /* Alloc net_device and adapter structs */ netdev = alloc_etherdev(sizeof(struct et131x_adapter)); if (!netdev) { printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); return NULL; } /* * Setup the function registration table (and other data) for a * net_device */ netdev->watchdog_timeo = ET131X_TX_TIMEOUT; netdev->netdev_ops = &et131x_netdev_ops; /* Poll? */ /* netdev->poll = &et131x_poll; */ /* netdev->poll_controller = &et131x_poll_controller; */ return netdev; } /** * et131x_pci_setup - Perform device initialization * @pdev: a pointer to the device's pci_dev structure * @ent: this device's entry in the pci_device_id table * * Returns 0 on success, errno on failure (as defined in errno.h) * * Registered in the pci_driver structure, this function is called when the * PCI subsystem finds a new PCI device which matches the information * contained in the pci_device_id table. This routine is the equivalent to * a device insertion routine. */ static int __devinit et131x_pci_setup(struct pci_dev *pdev, const struct pci_device_id *ent) { int result; struct net_device *netdev; struct et131x_adapter *adapter; int ii; result = pci_enable_device(pdev); if (result) { dev_err(&pdev->dev, "pci_enable_device() failed\n"); goto err_out; } /* Perform some basic PCI checks */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Can't find PCI device's base address\n"); goto err_disable; } if (pci_request_regions(pdev, DRIVER_NAME)) { dev_err(&pdev->dev, "Can't get PCI resources\n"); goto err_disable; } pci_set_master(pdev); /* Check the DMA addressing support of this device */ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (result) { dev_err(&pdev->dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); goto err_release_res; } } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (result) { dev_err(&pdev->dev, "Unable to obtain 32 bit DMA for consistent allocations\n"); goto err_release_res; } } else { dev_err(&pdev->dev, "No usable DMA addressing method\n"); result = -EIO; goto err_release_res; } /* Allocate netdev and private adapter structs */ netdev = et131x_device_alloc(); if (!netdev) { dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); result = -ENOMEM; goto err_release_res; } SET_NETDEV_DEV(netdev, &pdev->dev); et131x_set_ethtool_ops(netdev); adapter = et131x_adapter_init(netdev, pdev); /* Initialise the PCI setup for the device */ et131x_pci_init(adapter, pdev); /* Map the bus-relative registers to system virtual memory */ adapter->regs = pci_ioremap_bar(pdev, 0); if (!adapter->regs) { dev_err(&pdev->dev, "Cannot map device registers\n"); result = -ENOMEM; goto err_free_dev; } /* If Phy COMA mode was enabled when we went down, disable it here. */ writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); /* Issue a global reset to the et1310 */ et131x_soft_reset(adapter); /* Disable all interrupts (paranoid) */ et131x_disable_interrupts(adapter); /* Allocate DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result) { dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); goto err_iounmap; } /* Init send data structures */ et131x_init_send(adapter); /* Set up the task structure for the ISR's deferred handler */ INIT_WORK(&adapter->task, et131x_isr_handler); /* Copy address into the net_device struct */ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); /* Init variable for counting how long we do not have link status */ adapter->boot_coma = 0; et1310_disable_phy_coma(adapter); /* Setup the mii_bus struct */ adapter->mii_bus = mdiobus_alloc(); if (!adapter->mii_bus) { dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); goto err_mem_free; } adapter->mii_bus->name = "et131x_eth_mii"; snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; adapter->mii_bus->reset = et131x_mdio_reset; adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!adapter->mii_bus->irq) { dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); goto err_mdio_free; } for (ii = 0; ii < PHY_MAX_ADDR; ii++) adapter->mii_bus->irq[ii] = PHY_POLL; if (mdiobus_register(adapter->mii_bus)) { dev_err(&pdev->dev, "failed to register MII bus\n"); mdiobus_free(adapter->mii_bus); goto err_mdio_free_irq; } if (et131x_mii_probe(netdev)) { dev_err(&pdev->dev, "failed to probe MII bus\n"); goto err_mdio_unregister; } /* Setup et1310 as per the documentation */ et131x_adapter_setup(adapter); /* We can enable interrupts now * * NOTE - Because registration of interrupt handler is done in the * device's open(), defer enabling device interrupts to that * point */ /* Register the net_device struct with the Linux network layer */ result = register_netdev(netdev); if (result != 0) { dev_err(&pdev->dev, "register_netdev() failed\n"); goto err_mdio_unregister; } /* Register the net_device struct with the PCI subsystem. Save a copy * of the PCI config space for this device now that the device has * been initialized, just in case it needs to be quickly restored. */ pci_set_drvdata(pdev, netdev); pci_save_state(adapter->pdev); return result; err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); err_mdio_free_irq: kfree(adapter->mii_bus->irq); err_mdio_free: mdiobus_free(adapter->mii_bus); err_mem_free: et131x_adapter_memory_free(adapter); err_iounmap: iounmap(adapter->regs); err_free_dev: pci_dev_put(pdev); free_netdev(netdev); err_release_res: pci_release_regions(pdev); err_disable: pci_disable_device(pdev); err_out: return result; } static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, {0,} }; MODULE_DEVICE_TABLE(pci, et131x_pci_table); static struct pci_driver et131x_driver = { .name = DRIVER_NAME, .id_table = et131x_pci_table, .probe = et131x_pci_setup, .remove = __devexit_p(et131x_pci_remove), .driver.pm = ET131X_PM_OPS, }; /** * et131x_init_module - The "main" entry point called on driver initialization * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int __init et131x_init_module(void) { return pci_register_driver(&et131x_driver); } /** * et131x_cleanup_module - The entry point called on driver cleanup */ static void __exit et131x_cleanup_module(void) { pci_unregister_driver(&et131x_driver); } module_init(et131x_init_module); module_exit(et131x_cleanup_module);
gpl-2.0
Rover-Yu/skbtrace-kernel
arch/arm/mach-ep93xx/edb93xx.c
141
10766
/* * arch/arm/mach-ep93xx/edb93xx.c * Cirrus Logic EDB93xx Development Board support. * * EDB93XX, EDB9301, EDB9307A * Copyright (C) 2008-2009 H Hartley Sweeten <hsweeten@visionengravers.com> * * EDB9302 * Copyright (C) 2006 George Kashperko <george@chas.com.ua> * * EDB9302A, EDB9315, EDB9315A * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * EDB9307 * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * EDB9312 * Copyright (C) 2006 Infosys Technologies Limited * Toufeeq Hussain <toufeeq_hussain@infosys.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <sound/cs4271.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include <mach/gpio-ep93xx.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static void __init edb93xx_register_flash(void) { if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); } else { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } } static struct ep93xx_eth_data __initdata edb93xx_eth_data = { .phy_id = 1, }; /************************************************************************* * EDB93xx i2c peripheral handling *************************************************************************/ static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = { .sda_pin = EP93XX_GPIO_LINE_EEDAT, .sda_is_open_drain = 0, .scl_pin = EP93XX_GPIO_LINE_EECLK, .scl_is_open_drain = 0, .udelay = 0, /* default to 100 kHz */ .timeout = 0, /* default to 100 ms */ }; static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { { I2C_BOARD_INFO("isl1208", 0x6f), }, }; static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { { I2C_BOARD_INFO("ds1337", 0x68), }, }; static void __init edb93xx_register_i2c(void) { if (machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xxa_i2c_board_info, ARRAY_SIZE(edb93xxa_i2c_board_info)); } else if (machine_is_edb9302() || machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xx_i2c_board_info, ARRAY_SIZE(edb93xx_i2c_board_info)); } } /************************************************************************* * EDB93xx SPI peripheral handling *************************************************************************/ static struct cs4271_platform_data edb93xx_cs4271_data = { .gpio_nreset = -EINVAL, /* filled in later */ }; static int edb93xx_cs4271_hw_setup(struct spi_device *spi) { return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6, GPIOF_OUT_INIT_HIGH, spi->modalias); } static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi) { gpio_free(EP93XX_GPIO_LINE_EGPIO6); } static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value) { gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value); } static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = { .setup = edb93xx_cs4271_hw_setup, .cleanup = edb93xx_cs4271_hw_cleanup, .cs_control = edb93xx_cs4271_hw_cs_control, }; static struct spi_board_info edb93xx_spi_board_info[] __initdata = { { .modalias = "cs4271", .platform_data = &edb93xx_cs4271_data, .controller_data = &edb93xx_cs4271_hw, .max_speed_hz = 6000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, }; static struct ep93xx_spi_info edb93xx_spi_info __initdata = { .num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info), }; static void __init edb93xx_register_spi(void) { if (machine_is_edb9301() || machine_is_edb9302()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1; else if (machine_is_edb9302a() || machine_is_edb9307a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2); else if (machine_is_edb9315a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14; ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info, ARRAY_SIZE(edb93xx_spi_board_info)); } /************************************************************************* * EDB93xx I2S *************************************************************************/ static struct platform_device edb93xx_audio_device = { .name = "edb93xx-audio", .id = -1, }; static int __init edb93xx_has_audio(void) { return (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()); } static void __init edb93xx_register_i2s(void) { if (edb93xx_has_audio()) { ep93xx_register_i2s(); platform_device_register(&edb93xx_audio_device); } } /************************************************************************* * EDB93xx pwm *************************************************************************/ static void __init edb93xx_register_pwm(void) { if (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a()) { /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */ ep93xx_register_pwm(0, 1); } else if (machine_is_edb9307() || machine_is_edb9307a()) { /* EP9307 only has pwm.0 (PWMOUT) */ ep93xx_register_pwm(1, 0); } else { /* EP9312 and EP9315 have both */ ep93xx_register_pwm(1, 1); } } /************************************************************************* * EDB93xx framebuffer *************************************************************************/ static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { .num_modes = EP93XXFB_USE_MODEDB, .bpp = 16, .flags = 0, }; static int __init edb93xx_has_fb(void) { /* These platforms have an ep93xx with video capability */ return machine_is_edb9307() || machine_is_edb9307a() || machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_fb(void) { if (!edb93xx_has_fb()) return; if (machine_is_edb9307a() || machine_is_edb9315a()) edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0; else edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3; ep93xx_register_fb(&edb93xxfb_info); } /************************************************************************* * EDB93xx IDE *************************************************************************/ static int __init edb93xx_has_ide(void) { /* * Although EDB9312 and EDB9315 do have IDE capability, they have * INTRQ line wired as pull-up, which makes using IDE interface * problematic. */ return machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_ide(void) { if (!edb93xx_has_ide()) return; ep93xx_register_ide(); } static void __init edb93xx_init_machine(void) { ep93xx_init_devices(); edb93xx_register_flash(); ep93xx_register_eth(&edb93xx_eth_data, 1); edb93xx_register_i2c(); edb93xx_register_spi(); edb93xx_register_i2s(); edb93xx_register_pwm(); edb93xx_register_fb(); edb93xx_register_ide(); } #ifdef CONFIG_MACH_EDB9301 MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302 MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") /* Maintainer: George Kashperko <george@chas.com.ua> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302A MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307 MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307A MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9312 MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315 MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315A MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif
gpl-2.0
AdrianHuang/rt-thread-for-vmm
components/external/SQLite-3.8.1/mptest/mptest.c
141
37321
/* ** 2013-04-05 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This is a program used for testing SQLite, and specifically for testing ** the ability of independent processes to access the same SQLite database ** concurrently. ** ** Compile this program as follows: ** ** gcc -g -c -Wall sqlite3.c $(OPTS) ** gcc -g -o mptest mptest.c sqlite3.o $(LIBS) ** ** Recommended options: ** ** -DHAVE_USLEEP ** -DSQLITE_NO_SYNC ** -DSQLITE_THREADSAFE=0 ** -DSQLITE_OMIT_LOAD_EXTENSION ** ** Run like this: ** ** ./mptest $database $script ** ** where $database is the database to use for testing and $script is a ** test script. */ #include "sqlite3.h" #include <stdio.h> #if defined(_WIN32) # define WIN32_LEAN_AND_MEAN # include <windows.h> #else # include <unistd.h> #endif #include <stdlib.h> #include <string.h> #include <assert.h> #include <ctype.h> /* The suffix to append to the child command lines, if any */ #if defined(_WIN32) # define GETPID (int)GetCurrentProcessId #else # define GETPID getpid #endif /* Mark a parameter as unused to suppress compiler warnings */ #define UNUSED_PARAMETER(x) (void)x /* Global data */ static struct Global { char *argv0; /* Name of the executable */ const char *zVfs; /* Name of VFS to use. Often NULL meaning "default" */ char *zDbFile; /* Name of the database */ sqlite3 *db; /* Open connection to database */ char *zErrLog; /* Filename for error log */ FILE *pErrLog; /* Where to write errors */ char *zLog; /* Name of output log file */ FILE *pLog; /* Where to write log messages */ char zName[32]; /* Symbolic name of this process */ int taskId; /* Task ID. 0 means supervisor. */ int iTrace; /* Tracing level */ int bSqlTrace; /* True to trace SQL commands */ int bIgnoreSqlErrors; /* Ignore errors in SQL statements */ int nError; /* Number of errors */ int nTest; /* Number of --match operators */ int iTimeout; /* Milliseconds until a busy timeout */ int bSync; /* Call fsync() */ } g; /* Default timeout */ #define DEFAULT_TIMEOUT 10000 /* ** Print a message adding zPrefix[] to the beginning of every line. */ static void printWithPrefix(FILE *pOut, const char *zPrefix, const char *zMsg){ while( zMsg && zMsg[0] ){ int i; for(i=0; zMsg[i] && zMsg[i]!='\n' && zMsg[i]!='\r'; i++){} fprintf(pOut, "%s%.*s\n", zPrefix, i, zMsg); zMsg += i; while( zMsg[0]=='\n' || zMsg[0]=='\r' ) zMsg++; } } /* ** Compare two pointers to strings, where the pointers might be NULL. */ static int safe_strcmp(const char *a, const char *b){ if( a==b ) return 0; if( a==0 ) return -1; if( b==0 ) return 1; return strcmp(a,b); } /* ** Return TRUE if string z[] matches glob pattern zGlob[]. ** Return FALSE if the pattern does not match. ** ** Globbing rules: ** ** '*' Matches any sequence of zero or more characters. ** ** '?' Matches exactly one character. ** ** [...] Matches one character from the enclosed list of ** characters. ** ** [^...] Matches one character not in the enclosed list. ** ** '#' Matches any sequence of one or more digits with an ** optional + or - sign in front */ int strglob(const char *zGlob, const char *z){ int c, c2; int invert; int seen; while( (c = (*(zGlob++)))!=0 ){ if( c=='*' ){ while( (c=(*(zGlob++))) == '*' || c=='?' ){ if( c=='?' && (*(z++))==0 ) return 0; } if( c==0 ){ return 1; }else if( c=='[' ){ while( *z && strglob(zGlob-1,z) ){ z++; } return (*z)!=0; } while( (c2 = (*(z++)))!=0 ){ while( c2!=c ){ c2 = *(z++); if( c2==0 ) return 0; } if( strglob(zGlob,z) ) return 1; } return 0; }else if( c=='?' ){ if( (*(z++))==0 ) return 0; }else if( c=='[' ){ int prior_c = 0; seen = 0; invert = 0; c = *(z++); if( c==0 ) return 0; c2 = *(zGlob++); if( c2=='^' ){ invert = 1; c2 = *(zGlob++); } if( c2==']' ){ if( c==']' ) seen = 1; c2 = *(zGlob++); } while( c2 && c2!=']' ){ if( c2=='-' && zGlob[0]!=']' && zGlob[0]!=0 && prior_c>0 ){ c2 = *(zGlob++); if( c>=prior_c && c<=c2 ) seen = 1; prior_c = 0; }else{ if( c==c2 ){ seen = 1; } prior_c = c2; } c2 = *(zGlob++); } if( c2==0 || (seen ^ invert)==0 ) return 0; }else if( c=='#' ){ if( (z[0]=='-' || z[0]=='+') && isdigit(z[1]) ) z++; if( !isdigit(z[0]) ) return 0; z++; while( isdigit(z[0]) ){ z++; } }else{ if( c!=(*(z++)) ) return 0; } } return *z==0; } /* ** Close output stream pOut if it is not stdout or stderr */ static void maybeClose(FILE *pOut){ if( pOut!=stdout && pOut!=stderr ) fclose(pOut); } /* ** Print an error message */ static void errorMessage(const char *zFormat, ...){ va_list ap; char *zMsg; char zPrefix[30]; va_start(ap, zFormat); zMsg = sqlite3_vmprintf(zFormat, ap); va_end(ap); sqlite3_snprintf(sizeof(zPrefix), zPrefix, "%s:ERROR: ", g.zName); if( g.pLog ){ printWithPrefix(g.pLog, zPrefix, zMsg); fflush(g.pLog); } if( g.pErrLog && safe_strcmp(g.zErrLog,g.zLog) ){ printWithPrefix(g.pErrLog, zPrefix, zMsg); fflush(g.pErrLog); } sqlite3_free(zMsg); g.nError++; } /* Forward declaration */ static int trySql(const char*, ...); /* ** Print an error message and then quit. */ static void fatalError(const char *zFormat, ...){ va_list ap; char *zMsg; char zPrefix[30]; va_start(ap, zFormat); zMsg = sqlite3_vmprintf(zFormat, ap); va_end(ap); sqlite3_snprintf(sizeof(zPrefix), zPrefix, "%s:FATAL: ", g.zName); if( g.pLog ){ printWithPrefix(g.pLog, zPrefix, zMsg); fflush(g.pLog); maybeClose(g.pLog); } if( g.pErrLog && safe_strcmp(g.zErrLog,g.zLog) ){ printWithPrefix(g.pErrLog, zPrefix, zMsg); fflush(g.pErrLog); maybeClose(g.pErrLog); } sqlite3_free(zMsg); if( g.db ){ int nTry = 0; g.iTimeout = 0; while( trySql("UPDATE client SET wantHalt=1;")==SQLITE_BUSY && (nTry++)<100 ){ sqlite3_sleep(10); } } sqlite3_close(g.db); exit(1); } /* ** Print a log message */ static void logMessage(const char *zFormat, ...){ va_list ap; char *zMsg; char zPrefix[30]; va_start(ap, zFormat); zMsg = sqlite3_vmprintf(zFormat, ap); va_end(ap); sqlite3_snprintf(sizeof(zPrefix), zPrefix, "%s: ", g.zName); if( g.pLog ){ printWithPrefix(g.pLog, zPrefix, zMsg); fflush(g.pLog); } sqlite3_free(zMsg); } /* ** Return the length of a string omitting trailing whitespace */ static int clipLength(const char *z){ int n = (int)strlen(z); while( n>0 && isspace(z[n-1]) ){ n--; } return n; } /* ** Auxiliary SQL function to return the name of the VFS */ static void vfsNameFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ sqlite3 *db = sqlite3_context_db_handle(context); char *zVfs = 0; UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); sqlite3_file_control(db, "main", SQLITE_FCNTL_VFSNAME, &zVfs); if( zVfs ){ sqlite3_result_text(context, zVfs, -1, sqlite3_free); } } /* ** Busy handler with a g.iTimeout-millisecond timeout */ static int busyHandler(void *pCD, int count){ UNUSED_PARAMETER(pCD); if( count*10>g.iTimeout ){ if( g.iTimeout>0 ) errorMessage("timeout after %dms", g.iTimeout); return 0; } sqlite3_sleep(10); return 1; } /* ** SQL Trace callback */ static void sqlTraceCallback(void *NotUsed1, const char *zSql){ UNUSED_PARAMETER(NotUsed1); logMessage("[%.*s]", clipLength(zSql), zSql); } /* ** SQL error log callback */ static void sqlErrorCallback(void *pArg, int iErrCode, const char *zMsg){ UNUSED_PARAMETER(pArg); if( iErrCode==SQLITE_ERROR && g.bIgnoreSqlErrors ) return; if( (iErrCode&0xff)==SQLITE_SCHEMA && g.iTrace<3 ) return; if( g.iTimeout==0 && (iErrCode&0xff)==SQLITE_BUSY && g.iTrace<3 ) return; if( (iErrCode&0xff)==SQLITE_NOTICE ){ logMessage("(info) %s", zMsg); }else{ errorMessage("(errcode=%d) %s", iErrCode, zMsg); } } /* ** Prepare an SQL statement. Issue a fatal error if unable. */ static sqlite3_stmt *prepareSql(const char *zFormat, ...){ va_list ap; char *zSql; int rc; sqlite3_stmt *pStmt = 0; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); rc = sqlite3_prepare_v2(g.db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ){ sqlite3_finalize(pStmt); fatalError("%s\n%s\n", sqlite3_errmsg(g.db), zSql); } sqlite3_free(zSql); return pStmt; } /* ** Run arbitrary SQL. Issue a fatal error on failure. */ static void runSql(const char *zFormat, ...){ va_list ap; char *zSql; int rc; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); rc = sqlite3_exec(g.db, zSql, 0, 0, 0); if( rc!=SQLITE_OK ){ fatalError("%s\n%s\n", sqlite3_errmsg(g.db), zSql); } sqlite3_free(zSql); } /* ** Try to run arbitrary SQL. Return success code. */ static int trySql(const char *zFormat, ...){ va_list ap; char *zSql; int rc; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); rc = sqlite3_exec(g.db, zSql, 0, 0, 0); sqlite3_free(zSql); return rc; } /* Structure for holding an arbitrary length string */ typedef struct String String; struct String { char *z; /* the string */ int n; /* Slots of z[] used */ int nAlloc; /* Slots of z[] allocated */ }; /* Free a string */ static void stringFree(String *p){ if( p->z ) sqlite3_free(p->z); memset(p, 0, sizeof(*p)); } /* Append n bytes of text to a string. If n<0 append the entire string. */ static void stringAppend(String *p, const char *z, int n){ if( n<0 ) n = (int)strlen(z); if( p->n+n>=p->nAlloc ){ int nAlloc = p->nAlloc*2 + n + 100; char *z = sqlite3_realloc(p->z, nAlloc); if( z==0 ) fatalError("out of memory"); p->z = z; p->nAlloc = nAlloc; } memcpy(p->z+p->n, z, n); p->n += n; p->z[p->n] = 0; } /* Reset a string to an empty string */ static void stringReset(String *p){ if( p->z==0 ) stringAppend(p, " ", 1); p->n = 0; p->z[0] = 0; } /* Append a new token onto the end of the string */ static void stringAppendTerm(String *p, const char *z){ int i; if( p->n ) stringAppend(p, " ", 1); if( z==0 ){ stringAppend(p, "nil", 3); return; } for(i=0; z[i] && !isspace(z[i]); i++){} if( i>0 && z[i]==0 ){ stringAppend(p, z, i); return; } stringAppend(p, "'", 1); while( z[0] ){ for(i=0; z[i] && z[i]!='\''; i++){} if( z[i] ){ stringAppend(p, z, i+1); stringAppend(p, "'", 1); z += i+1; }else{ stringAppend(p, z, i); break; } } stringAppend(p, "'", 1); } /* ** Callback function for evalSql() */ static int evalCallback(void *pCData, int argc, char **argv, char **azCol){ String *p = (String*)pCData; int i; UNUSED_PARAMETER(azCol); for(i=0; i<argc; i++) stringAppendTerm(p, argv[i]); return 0; } /* ** Run arbitrary SQL and record the results in an output string ** given by the first parameter. */ static int evalSql(String *p, const char *zFormat, ...){ va_list ap; char *zSql; int rc; char *zErrMsg = 0; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); assert( g.iTimeout>0 ); rc = sqlite3_exec(g.db, zSql, evalCallback, p, &zErrMsg); sqlite3_free(zSql); if( rc ){ char zErr[30]; sqlite3_snprintf(sizeof(zErr), zErr, "error(%d)", rc); stringAppendTerm(p, zErr); if( zErrMsg ){ stringAppendTerm(p, zErrMsg); sqlite3_free(zErrMsg); } } return rc; } /* ** Auxiliary SQL function to recursively evaluate SQL. */ static void evalFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ sqlite3 *db = sqlite3_context_db_handle(context); const char *zSql = (const char*)sqlite3_value_text(argv[0]); String res; char *zErrMsg = 0; int rc; UNUSED_PARAMETER(argc); memset(&res, 0, sizeof(res)); rc = sqlite3_exec(db, zSql, evalCallback, &res, &zErrMsg); if( zErrMsg ){ sqlite3_result_error(context, zErrMsg, -1); sqlite3_free(zErrMsg); }else if( rc ){ sqlite3_result_error_code(context, rc); }else{ sqlite3_result_text(context, res.z, -1, SQLITE_TRANSIENT); } stringFree(&res); } /* ** Look up the next task for client iClient in the database. ** Return the task script and the task number and mark that ** task as being under way. */ static int startScript( int iClient, /* The client number */ char **pzScript, /* Write task script here */ int *pTaskId, /* Write task number here */ char **pzTaskName /* Name of the task */ ){ sqlite3_stmt *pStmt = 0; int taskId; int rc; int totalTime = 0; *pzScript = 0; g.iTimeout = 0; while(1){ rc = trySql("BEGIN IMMEDIATE"); if( rc==SQLITE_BUSY ){ sqlite3_sleep(10); totalTime += 10; continue; } if( rc!=SQLITE_OK ){ fatalError("in startScript: %s", sqlite3_errmsg(g.db)); } if( g.nError || g.nTest ){ runSql("UPDATE counters SET nError=nError+%d, nTest=nTest+%d", g.nError, g.nTest); g.nError = 0; g.nTest = 0; } pStmt = prepareSql("SELECT 1 FROM client WHERE id=%d AND wantHalt",iClient); rc = sqlite3_step(pStmt); sqlite3_finalize(pStmt); if( rc==SQLITE_ROW ){ runSql("DELETE FROM client WHERE id=%d", iClient); g.iTimeout = DEFAULT_TIMEOUT; runSql("COMMIT TRANSACTION;"); return SQLITE_DONE; } pStmt = prepareSql( "SELECT script, id, name FROM task" " WHERE client=%d AND starttime IS NULL" " ORDER BY id LIMIT 1", iClient); rc = sqlite3_step(pStmt); if( rc==SQLITE_ROW ){ int n = sqlite3_column_bytes(pStmt, 0); *pzScript = sqlite3_malloc(n+1); strcpy(*pzScript, (const char*)sqlite3_column_text(pStmt, 0)); *pTaskId = taskId = sqlite3_column_int(pStmt, 1); *pzTaskName = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 2)); sqlite3_finalize(pStmt); runSql("UPDATE task" " SET starttime=strftime('%%Y-%%m-%%d %%H:%%M:%%f','now')" " WHERE id=%d;", taskId); g.iTimeout = DEFAULT_TIMEOUT; runSql("COMMIT TRANSACTION;"); return SQLITE_OK; } sqlite3_finalize(pStmt); if( rc==SQLITE_DONE ){ if( totalTime>30000 ){ errorMessage("Waited over 30 seconds with no work. Giving up."); runSql("DELETE FROM client WHERE id=%d; COMMIT;", iClient); sqlite3_close(g.db); exit(1); } while( trySql("COMMIT")==SQLITE_BUSY ){ sqlite3_sleep(10); totalTime += 10; } sqlite3_sleep(100); totalTime += 100; continue; } fatalError("%s", sqlite3_errmsg(g.db)); } g.iTimeout = DEFAULT_TIMEOUT; } /* ** Mark a script as having finished. Remove the CLIENT table entry ** if bShutdown is true. */ static int finishScript(int iClient, int taskId, int bShutdown){ runSql("UPDATE task" " SET endtime=strftime('%%Y-%%m-%%d %%H:%%M:%%f','now')" " WHERE id=%d;", taskId); if( bShutdown ){ runSql("DELETE FROM client WHERE id=%d", iClient); } return SQLITE_OK; } /* ** Start up a client process for iClient, if it is not already ** running. If the client is already running, then this routine ** is a no-op. */ static void startClient(int iClient){ runSql("INSERT OR IGNORE INTO client VALUES(%d,0)", iClient); if( sqlite3_changes(g.db) ){ char *zSys; int rc; zSys = sqlite3_mprintf("%s \"%s\" --client %d --trace %d", g.argv0, g.zDbFile, iClient, g.iTrace); if( g.bSqlTrace ){ zSys = sqlite3_mprintf("%z --sqltrace", zSys); } if( g.bSync ){ zSys = sqlite3_mprintf("%z --sync", zSys); } if( g.zVfs ){ zSys = sqlite3_mprintf("%z --vfs \"%s\"", zSys, g.zVfs); } if( g.iTrace>=2 ) logMessage("system('%q')", zSys); #if !defined(_WIN32) zSys = sqlite3_mprintf("%z &", zSys); rc = system(zSys); if( rc ) errorMessage("system() fails with error code %d", rc); #else { STARTUPINFOA startupInfo; PROCESS_INFORMATION processInfo; memset(&startupInfo, 0, sizeof(startupInfo)); startupInfo.cb = sizeof(startupInfo); memset(&processInfo, 0, sizeof(processInfo)); rc = CreateProcessA(NULL, zSys, NULL, NULL, FALSE, 0, NULL, NULL, &startupInfo, &processInfo); if( rc ){ CloseHandle(processInfo.hThread); CloseHandle(processInfo.hProcess); }else{ errorMessage("CreateProcessA() fails with error code %lu", GetLastError()); } } #endif sqlite3_free(zSys); } } /* ** Read the entire content of a file into memory */ static char *readFile(const char *zFilename){ FILE *in = fopen(zFilename, "rb"); long sz; char *z; if( in==0 ){ fatalError("cannot open \"%s\" for reading", zFilename); } fseek(in, 0, SEEK_END); sz = ftell(in); rewind(in); z = sqlite3_malloc( sz+1 ); sz = (long)fread(z, 1, sz, in); z[sz] = 0; fclose(in); return z; } /* ** Return the length of the next token. */ static int tokenLength(const char *z, int *pnLine){ int n = 0; if( isspace(z[0]) || (z[0]=='/' && z[1]=='*') ){ int inC = 0; int c; if( z[0]=='/' ){ inC = 1; n = 2; } while( (c = z[n++])!=0 ){ if( c=='\n' ) (*pnLine)++; if( isspace(c) ) continue; if( inC && c=='*' && z[n]=='/' ){ n++; inC = 0; }else if( !inC && c=='/' && z[n]=='*' ){ n++; inC = 1; }else if( !inC ){ break; } } n--; }else if( z[0]=='-' && z[1]=='-' ){ for(n=2; z[n] && z[n]!='\n'; n++){} if( z[n] ){ (*pnLine)++; n++; } }else if( z[0]=='"' || z[0]=='\'' ){ int delim = z[0]; for(n=1; z[n]; n++){ if( z[n]=='\n' ) (*pnLine)++; if( z[n]==delim ){ n++; if( z[n+1]!=delim ) break; } } }else{ int c; for(n=1; (c = z[n])!=0 && !isspace(c) && c!='"' && c!='\'' && c!=';'; n++){} } return n; } /* ** Copy a single token into a string buffer. */ static int extractToken(const char *zIn, int nIn, char *zOut, int nOut){ int i; if( nIn<=0 ){ zOut[0] = 0; return 0; } for(i=0; i<nIn && i<nOut-1 && !isspace(zIn[i]); i++){ zOut[i] = zIn[i]; } zOut[i] = 0; return i; } /* ** Find the number of characters up to the start of the next "--end" token. */ static int findEnd(const char *z, int *pnLine){ int n = 0; while( z[n] && (strncmp(z+n,"--end",5) || !isspace(z[n+5])) ){ n += tokenLength(z+n, pnLine); } return n; } /* ** Find the number of characters up to the first character past the ** of the next "--endif" or "--else" token. Nested --if commands are ** also skipped. */ static int findEndif(const char *z, int stopAtElse, int *pnLine){ int n = 0; while( z[n] ){ int len = tokenLength(z+n, pnLine); if( (strncmp(z+n,"--endif",7)==0 && isspace(z[n+7])) || (stopAtElse && strncmp(z+n,"--else",6)==0 && isspace(z[n+6])) ){ return n+len; } if( strncmp(z+n,"--if",4)==0 && isspace(z[n+4]) ){ int skip = findEndif(z+n+len, 0, pnLine); n += skip + len; }else{ n += len; } } return n; } /* ** Wait for a client process to complete all its tasks */ static void waitForClient(int iClient, int iTimeout, char *zErrPrefix){ sqlite3_stmt *pStmt; int rc; if( iClient>0 ){ pStmt = prepareSql( "SELECT 1 FROM task" " WHERE client=%d" " AND client IN (SELECT id FROM client)" " AND endtime IS NULL", iClient); }else{ pStmt = prepareSql( "SELECT 1 FROM task" " WHERE client IN (SELECT id FROM client)" " AND endtime IS NULL"); } g.iTimeout = 0; while( ((rc = sqlite3_step(pStmt))==SQLITE_BUSY || rc==SQLITE_ROW) && iTimeout>0 ){ sqlite3_reset(pStmt); sqlite3_sleep(50); iTimeout -= 50; } sqlite3_finalize(pStmt); g.iTimeout = DEFAULT_TIMEOUT; if( rc!=SQLITE_DONE ){ if( zErrPrefix==0 ) zErrPrefix = ""; if( iClient>0 ){ errorMessage("%stimeout waiting for client %d", zErrPrefix, iClient); }else{ errorMessage("%stimeout waiting for all clients", zErrPrefix); } } } /* Return a pointer to the tail of a filename */ static char *filenameTail(char *z){ int i, j; for(i=j=0; z[i]; i++) if( z[i]=='/' ) j = i+1; return z+j; } /* ** Interpret zArg as a boolean value. Return either 0 or 1. */ static int booleanValue(char *zArg){ int i; if( zArg==0 ) return 0; for(i=0; zArg[i]>='0' && zArg[i]<='9'; i++){} if( i>0 && zArg[i]==0 ) return atoi(zArg); if( sqlite3_stricmp(zArg, "on")==0 || sqlite3_stricmp(zArg,"yes")==0 ){ return 1; } if( sqlite3_stricmp(zArg, "off")==0 || sqlite3_stricmp(zArg,"no")==0 ){ return 0; } errorMessage("unknown boolean: [%s]", zArg); return 0; } /* This routine exists as a convenient place to set a debugger ** breakpoint. */ static void test_breakpoint(void){ static volatile int cnt = 0; cnt++; } /* Maximum number of arguments to a --command */ #define MX_ARG 2 /* ** Run a script. */ static void runScript( int iClient, /* The client number, or 0 for the master */ int taskId, /* The task ID for clients. 0 for master */ char *zScript, /* Text of the script */ char *zFilename /* File from which script was read. */ ){ int lineno = 1; int prevLine = 1; int ii = 0; int iBegin = 0; int n, c, j; int len; int nArg; String sResult; char zCmd[30]; char zError[1000]; char azArg[MX_ARG][100]; memset(&sResult, 0, sizeof(sResult)); stringReset(&sResult); while( (c = zScript[ii])!=0 ){ prevLine = lineno; len = tokenLength(zScript+ii, &lineno); if( isspace(c) || (c=='/' && zScript[ii+1]=='*') ){ ii += len; continue; } if( c!='-' || zScript[ii+1]!='-' || !isalpha(zScript[ii+2]) ){ ii += len; continue; } /* Run any prior SQL before processing the new --command */ if( ii>iBegin ){ char *zSql = sqlite3_mprintf("%.*s", ii-iBegin, zScript+iBegin); evalSql(&sResult, zSql); sqlite3_free(zSql); iBegin = ii + len; } /* Parse the --command */ if( g.iTrace>=2 ) logMessage("%.*s", len, zScript+ii); n = extractToken(zScript+ii+2, len-2, zCmd, sizeof(zCmd)); for(nArg=0; n<len-2 && nArg<MX_ARG; nArg++){ while( n<len-2 && isspace(zScript[ii+2+n]) ){ n++; } if( n>=len-2 ) break; n += extractToken(zScript+ii+2+n, len-2-n, azArg[nArg], sizeof(azArg[nArg])); } for(j=nArg; j<MX_ARG; j++) azArg[j++][0] = 0; /* ** --sleep N ** ** Pause for N milliseconds */ if( strcmp(zCmd, "sleep")==0 ){ sqlite3_sleep(atoi(azArg[0])); }else /* ** --exit N ** ** Exit this process. If N>0 then exit without shutting down ** SQLite. (In other words, simulate a crash.) */ if( strcmp(zCmd, "exit")==0 ){ int rc = atoi(azArg[0]); finishScript(iClient, taskId, 1); if( rc==0 ) sqlite3_close(g.db); exit(rc); }else /* ** --testcase NAME ** ** Begin a new test case. Announce in the log that the test case ** has begun. */ if( strcmp(zCmd, "testcase")==0 ){ if( g.iTrace==1 ) logMessage("%.*s", len - 1, zScript+ii); stringReset(&sResult); }else /* ** --finish ** ** Mark the current task as having finished, even if it is not. ** This can be used in conjunction with --exit to simulate a crash. */ if( strcmp(zCmd, "finish")==0 && iClient>0 ){ finishScript(iClient, taskId, 1); }else /* ** --reset ** ** Reset accumulated results back to an empty string */ if( strcmp(zCmd, "reset")==0 ){ stringReset(&sResult); }else /* ** --match ANSWER... ** ** Check to see if output matches ANSWER. Report an error if not. */ if( strcmp(zCmd, "match")==0 ){ int jj; char *zAns = zScript+ii; for(jj=7; jj<len-1 && isspace(zAns[jj]); jj++){} zAns += jj; if( len-jj-1!=sResult.n || strncmp(sResult.z, zAns, len-jj-1) ){ errorMessage("line %d of %s:\nExpected [%.*s]\n Got [%s]", prevLine, zFilename, len-jj-1, zAns, sResult.z); } g.nTest++; stringReset(&sResult); }else /* ** --glob ANSWER... ** --notglob ANSWER.... ** ** Check to see if output does or does not match the glob pattern ** ANSWER. */ if( strcmp(zCmd, "glob")==0 || strcmp(zCmd, "notglob")==0 ){ int jj; char *zAns = zScript+ii; char *zCopy; int isGlob = (zCmd[0]=='g'); for(jj=9-3*isGlob; jj<len-1 && isspace(zAns[jj]); jj++){} zAns += jj; zCopy = sqlite3_mprintf("%.*s", len-jj-1, zAns); if( (sqlite3_strglob(zCopy, sResult.z)==0)^isGlob ){ errorMessage("line %d of %s:\nExpected [%s]\n Got [%s]", prevLine, zFilename, zCopy, sResult.z); } sqlite3_free(zCopy); g.nTest++; stringReset(&sResult); }else /* ** --output ** ** Output the result of the previous SQL. */ if( strcmp(zCmd, "output")==0 ){ logMessage("%s", sResult.z); }else /* ** --source FILENAME ** ** Run a subscript from a separate file. */ if( strcmp(zCmd, "source")==0 ){ char *zNewFile, *zNewScript; char *zToDel = 0; zNewFile = azArg[0]; if( zNewFile[0]!='/' ){ int k; for(k=(int)strlen(zFilename)-1; k>=0 && zFilename[k]!='/'; k--){} if( k>0 ){ zNewFile = zToDel = sqlite3_mprintf("%.*s/%s", k,zFilename,zNewFile); } } zNewScript = readFile(zNewFile); if( g.iTrace ) logMessage("begin script [%s]\n", zNewFile); runScript(0, 0, zNewScript, zNewFile); sqlite3_free(zNewScript); if( g.iTrace ) logMessage("end script [%s]\n", zNewFile); sqlite3_free(zToDel); }else /* ** --print MESSAGE.... ** ** Output the remainder of the line to the log file */ if( strcmp(zCmd, "print")==0 ){ int jj; for(jj=7; jj<len && isspace(zScript[ii+jj]); jj++){} logMessage("%.*s", len-jj, zScript+ii+jj); }else /* ** --if EXPR ** ** Skip forward to the next matching --endif or --else if EXPR is false. */ if( strcmp(zCmd, "if")==0 ){ int jj, rc; sqlite3_stmt *pStmt; for(jj=4; jj<len && isspace(zScript[ii+jj]); jj++){} pStmt = prepareSql("SELECT %.*s", len-jj, zScript+ii+jj); rc = sqlite3_step(pStmt); if( rc!=SQLITE_ROW || sqlite3_column_int(pStmt, 0)==0 ){ ii += findEndif(zScript+ii+len, 1, &lineno); } sqlite3_finalize(pStmt); }else /* ** --else ** ** This command can only be encountered if currently inside an --if that ** is true. Skip forward to the next matching --endif. */ if( strcmp(zCmd, "else")==0 ){ ii += findEndif(zScript+ii+len, 0, &lineno); }else /* ** --endif ** ** This command can only be encountered if currently inside an --if that ** is true or an --else of a false if. This is a no-op. */ if( strcmp(zCmd, "endif")==0 ){ /* no-op */ }else /* ** --start CLIENT ** ** Start up the given client. */ if( strcmp(zCmd, "start")==0 && iClient==0 ){ int iNewClient = atoi(azArg[0]); if( iNewClient>0 ){ startClient(iNewClient); } }else /* ** --wait CLIENT TIMEOUT ** ** Wait until all tasks complete for the given client. If CLIENT is ** "all" then wait for all clients to complete. Wait no longer than ** TIMEOUT milliseconds (default 10,000) */ if( strcmp(zCmd, "wait")==0 && iClient==0 ){ int iTimeout = nArg>=2 ? atoi(azArg[1]) : 10000; sqlite3_snprintf(sizeof(zError),zError,"line %d of %s\n", prevLine, zFilename); waitForClient(atoi(azArg[0]), iTimeout, zError); }else /* ** --task CLIENT ** <task-content-here> ** --end ** ** Assign work to a client. Start the client if it is not running ** already. */ if( strcmp(zCmd, "task")==0 && iClient==0 ){ int iTarget = atoi(azArg[0]); int iEnd; char *zTask; char *zTName; iEnd = findEnd(zScript+ii+len, &lineno); if( iTarget<0 ){ errorMessage("line %d of %s: bad client number: %d", prevLine, zFilename, iTarget); }else{ zTask = sqlite3_mprintf("%.*s", iEnd, zScript+ii+len); if( nArg>1 ){ zTName = sqlite3_mprintf("%s", azArg[1]); }else{ zTName = sqlite3_mprintf("%s:%d", filenameTail(zFilename), prevLine); } startClient(iTarget); runSql("INSERT INTO task(client,script,name)" " VALUES(%d,'%q',%Q)", iTarget, zTask, zTName); sqlite3_free(zTask); sqlite3_free(zTName); } iEnd += tokenLength(zScript+ii+len+iEnd, &lineno); len += iEnd; iBegin = ii+len; }else /* ** --breakpoint ** ** This command calls "test_breakpoint()" which is a routine provided ** as a convenient place to set a debugger breakpoint. */ if( strcmp(zCmd, "breakpoint")==0 ){ test_breakpoint(); }else /* ** --show-sql-errors BOOLEAN ** ** Turn display of SQL errors on and off. */ if( strcmp(zCmd, "show-sql-errors")==0 ){ g.bIgnoreSqlErrors = nArg>=1 ? !booleanValue(azArg[0]) : 1; }else /* error */{ errorMessage("line %d of %s: unknown command --%s", prevLine, zFilename, zCmd); } ii += len; } if( iBegin<ii ){ char *zSql = sqlite3_mprintf("%.*s", ii-iBegin, zScript+iBegin); runSql(zSql); sqlite3_free(zSql); } stringFree(&sResult); } /* ** Look for a command-line option. If present, return a pointer. ** Return NULL if missing. ** ** hasArg==0 means the option is a flag. It is either present or not. ** hasArg==1 means the option has an argument. Return a pointer to the ** argument. */ static char *findOption( char **azArg, int *pnArg, const char *zOption, int hasArg ){ int i, j; char *zReturn = 0; int nArg = *pnArg; assert( hasArg==0 || hasArg==1 ); for(i=0; i<nArg; i++){ const char *z; if( i+hasArg >= nArg ) break; z = azArg[i]; if( z[0]!='-' ) continue; z++; if( z[0]=='-' ){ if( z[1]==0 ) break; z++; } if( strcmp(z,zOption)==0 ){ if( hasArg && i==nArg-1 ){ fatalError("command-line option \"--%s\" requires an argument", z); } if( hasArg ){ zReturn = azArg[i+1]; }else{ zReturn = azArg[i]; } j = i+1+(hasArg!=0); while( j<nArg ) azArg[i++] = azArg[j++]; *pnArg = i; return zReturn; } } return zReturn; } /* Print a usage message for the program and exit */ static void usage(const char *argv0){ int i; const char *zTail = argv0; for(i=0; argv0[i]; i++){ if( argv0[i]=='/' ) zTail = argv0+i+1; } fprintf(stderr,"Usage: %s DATABASE ?OPTIONS? ?SCRIPT?\n", zTail); exit(1); } /* Report on unrecognized arguments */ static void unrecognizedArguments( const char *argv0, int nArg, char **azArg ){ int i; fprintf(stderr,"%s: unrecognized arguments:", argv0); for(i=0; i<nArg; i++){ fprintf(stderr," %s", azArg[i]); } fprintf(stderr,"\n"); exit(1); } int main(int argc, char **argv){ const char *zClient; int iClient; int n, i; int openFlags = SQLITE_OPEN_READWRITE; int rc; char *zScript; int taskId; const char *zTrace; const char *zCOption; g.argv0 = argv[0]; g.iTrace = 1; if( argc<2 ) usage(argv[0]); g.zDbFile = argv[1]; if( strglob("*.test", g.zDbFile) ) usage(argv[0]); if( strcmp(sqlite3_sourceid(), SQLITE_SOURCE_ID)!=0 ){ fprintf(stderr, "SQLite library and header mismatch\n" "Library: %s\n" "Header: %s\n", sqlite3_sourceid(), SQLITE_SOURCE_ID); exit(1); } n = argc-2; sqlite3_snprintf(sizeof(g.zName), g.zName, "%05d.mptest", GETPID()); g.zVfs = findOption(argv+2, &n, "vfs", 1); zClient = findOption(argv+2, &n, "client", 1); g.zErrLog = findOption(argv+2, &n, "errlog", 1); g.zLog = findOption(argv+2, &n, "log", 1); zTrace = findOption(argv+2, &n, "trace", 1); if( zTrace ) g.iTrace = atoi(zTrace); if( findOption(argv+2, &n, "quiet", 0)!=0 ) g.iTrace = 0; g.bSqlTrace = findOption(argv+2, &n, "sqltrace", 0)!=0; g.bSync = findOption(argv+2, &n, "sync", 0)!=0; if( g.zErrLog ){ g.pErrLog = fopen(g.zErrLog, "a"); }else{ g.pErrLog = stderr; } if( g.zLog ){ g.pLog = fopen(g.zLog, "a"); }else{ g.pLog = stdout; } sqlite3_config(SQLITE_CONFIG_LOG, sqlErrorCallback, 0); if( zClient ){ iClient = atoi(zClient); if( iClient<1 ) fatalError("illegal client number: %d\n", iClient); sqlite3_snprintf(sizeof(g.zName), g.zName, "%05d.client%02d", GETPID(), iClient); }else{ if( g.iTrace>0 ){ printf("With SQLite " SQLITE_VERSION " " SQLITE_SOURCE_ID "\n" ); for(i=0; (zCOption = sqlite3_compileoption_get(i))!=0; i++){ printf("-DSQLITE_%s\n", zCOption); } fflush(stdout); } iClient = 0; unlink(g.zDbFile); openFlags |= SQLITE_OPEN_CREATE; } rc = sqlite3_open_v2(g.zDbFile, &g.db, openFlags, g.zVfs); if( rc ) fatalError("cannot open [%s]", g.zDbFile); sqlite3_enable_load_extension(g.db, 1); sqlite3_busy_handler(g.db, busyHandler, 0); sqlite3_create_function(g.db, "vfsname", 0, SQLITE_UTF8, 0, vfsNameFunc, 0, 0); sqlite3_create_function(g.db, "eval", 1, SQLITE_UTF8, 0, evalFunc, 0, 0); g.iTimeout = DEFAULT_TIMEOUT; if( g.bSqlTrace ) sqlite3_trace(g.db, sqlTraceCallback, 0); if( !g.bSync ) trySql("PRAGMA synchronous=OFF"); if( iClient>0 ){ if( n>0 ) unrecognizedArguments(argv[0], n, argv+2); if( g.iTrace ) logMessage("start-client"); while(1){ char *zTaskName = 0; rc = startScript(iClient, &zScript, &taskId, &zTaskName); if( rc==SQLITE_DONE ) break; if( g.iTrace ) logMessage("begin %s (%d)", zTaskName, taskId); runScript(iClient, taskId, zScript, zTaskName); if( g.iTrace ) logMessage("end %s (%d)", zTaskName, taskId); finishScript(iClient, taskId, 0); sqlite3_free(zTaskName); sqlite3_sleep(10); } if( g.iTrace ) logMessage("end-client"); }else{ sqlite3_stmt *pStmt; int iTimeout; if( n==0 ){ fatalError("missing script filename"); } if( n>1 ) unrecognizedArguments(argv[0], n, argv+2); runSql( "CREATE TABLE task(\n" " id INTEGER PRIMARY KEY,\n" " name TEXT,\n" " client INTEGER,\n" " starttime DATE,\n" " endtime DATE,\n" " script TEXT\n" ");" "CREATE INDEX task_i1 ON task(client, starttime);\n" "CREATE INDEX task_i2 ON task(client, endtime);\n" "CREATE TABLE counters(nError,nTest);\n" "INSERT INTO counters VALUES(0,0);\n" "CREATE TABLE client(id INTEGER PRIMARY KEY, wantHalt);\n" ); zScript = readFile(argv[2]); if( g.iTrace ) logMessage("begin script [%s]\n", argv[2]); runScript(0, 0, zScript, argv[2]); sqlite3_free(zScript); if( g.iTrace ) logMessage("end script [%s]\n", argv[2]); waitForClient(0, 2000, "during shutdown...\n"); trySql("UPDATE client SET wantHalt=1"); sqlite3_sleep(10); g.iTimeout = 0; iTimeout = 1000; while( ((rc = trySql("SELECT 1 FROM client"))==SQLITE_BUSY || rc==SQLITE_ROW) && iTimeout>0 ){ sqlite3_sleep(10); iTimeout -= 10; } sqlite3_sleep(100); pStmt = prepareSql("SELECT nError, nTest FROM counters"); iTimeout = 1000; while( (rc = sqlite3_step(pStmt))==SQLITE_BUSY && iTimeout>0 ){ sqlite3_sleep(10); iTimeout -= 10; } if( rc==SQLITE_ROW ){ g.nError += sqlite3_column_int(pStmt, 0); g.nTest += sqlite3_column_int(pStmt, 1); } sqlite3_finalize(pStmt); } sqlite3_close(g.db); maybeClose(g.pLog); maybeClose(g.pErrLog); if( iClient==0 ){ printf("Summary: %d errors in %d tests\n", g.nError, g.nTest); } return g.nError>0; }
gpl-2.0
frank-liu/SPADE-7x30-3.0
net/xfrm/xfrm_user.c
141
70771
/* xfrm_user.c: User interface to configure xfrm engine. * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * * Changes: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * */ #include <linux/crypto.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/init.h> #include <linux/security.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/netlink.h> #include <net/ah.h> #include <asm/uaccess.h> #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include <linux/in6.h> #endif static inline int aead_len(struct xfrm_algo_aead *alg) { return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); } static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) { struct nlattr *rt = attrs[type]; struct xfrm_algo *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < xfrm_alg_len(algp)) return -EINVAL; switch (type) { case XFRMA_ALG_AUTH: case XFRMA_ALG_CRYPT: case XFRMA_ALG_COMP: break; default: return -EINVAL; } algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; return 0; } static int verify_auth_trunc(struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; struct xfrm_algo_auth *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < xfrm_alg_auth_len(algp)) return -EINVAL; algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; return 0; } static int verify_aead(struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; struct xfrm_algo_aead *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < aead_len(algp)) return -EINVAL; algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; return 0; } static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, xfrm_address_t **addrp) { struct nlattr *rt = attrs[type]; if (rt && addrp) *addrp = nla_data(rt); } static inline int verify_sec_ctx_len(struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; if (!rt) return 0; uctx = nla_data(rt); if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) return -EINVAL; return 0; } static inline int verify_replay(struct xfrm_usersa_info *p, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; if ((p->flags & XFRM_STATE_ESN) && !rt) return -EINVAL; if (!rt) return 0; if (p->id.proto != IPPROTO_ESP) return -EINVAL; if (p->replay_window != 0) return -EINVAL; return 0; } static int verify_newsa_info(struct xfrm_usersa_info *p, struct nlattr **attrs) { int err; err = -EINVAL; switch (p->family) { case AF_INET: break; case AF_INET6: #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) break; #else err = -EAFNOSUPPORT; goto out; #endif default: goto out; } err = -EINVAL; switch (p->id.proto) { case IPPROTO_AH: if ((!attrs[XFRMA_ALG_AUTH] && !attrs[XFRMA_ALG_AUTH_TRUNC]) || attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_ALG_COMP] || attrs[XFRMA_TFCPAD]) goto out; break; case IPPROTO_ESP: if (attrs[XFRMA_ALG_COMP]) goto out; if (!attrs[XFRMA_ALG_AUTH] && !attrs[XFRMA_ALG_AUTH_TRUNC] && !attrs[XFRMA_ALG_CRYPT] && !attrs[XFRMA_ALG_AEAD]) goto out; if ((attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_CRYPT]) && attrs[XFRMA_ALG_AEAD]) goto out; if (attrs[XFRMA_TFCPAD] && p->mode != XFRM_MODE_TUNNEL) goto out; break; case IPPROTO_COMP: if (!attrs[XFRMA_ALG_COMP] || attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_TFCPAD]) goto out; break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: if (attrs[XFRMA_ALG_COMP] || attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_ENCAP] || attrs[XFRMA_SEC_CTX] || attrs[XFRMA_TFCPAD] || !attrs[XFRMA_COADDR]) goto out; break; #endif default: goto out; } if ((err = verify_aead(attrs))) goto out; if ((err = verify_auth_trunc(attrs))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) goto out; if ((err = verify_sec_ctx_len(attrs))) goto out; if ((err = verify_replay(p, attrs))) goto out; err = -EINVAL; switch (p->mode) { case XFRM_MODE_TRANSPORT: case XFRM_MODE_TUNNEL: case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_BEET: break; default: goto out; } err = 0; out: return err; } static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, struct xfrm_algo_desc *(*get_byname)(const char *, int), struct nlattr *rta) { struct xfrm_algo *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; *props = algo->desc.sadb_alg_id; p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strcpy(p->alg_name, algo->name); *algpp = p; return 0; } static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, struct nlattr *rta) { struct xfrm_algo *ualg; struct xfrm_algo_auth *p; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; *props = algo->desc.sadb_alg_id; p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); if (!p) return -ENOMEM; strcpy(p->alg_name, algo->name); p->alg_key_len = ualg->alg_key_len; p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); *algpp = p; return 0; } static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, struct nlattr *rta) { struct xfrm_algo_auth *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) return -EINVAL; *props = algo->desc.sadb_alg_id; p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strcpy(p->alg_name, algo->name); if (!p->alg_trunc_len) p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; *algpp = p; return 0; } static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, struct nlattr *rta) { struct xfrm_algo_aead *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); if (!algo) return -ENOSYS; *props = algo->desc.sadb_alg_id; p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strcpy(p->alg_name, algo->name); *algpp = p; return 0; } static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, struct nlattr *rp) { struct xfrm_replay_state_esn *up; if (!replay_esn || !rp) return 0; up = nla_data(rp); if (xfrm_replay_state_esn_len(replay_esn) != xfrm_replay_state_esn_len(up)) return -EINVAL; return 0; } static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, struct xfrm_replay_state_esn **preplay_esn, struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; if (!rta) return 0; up = nla_data(rta); p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); if (!p) return -ENOMEM; pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; } *replay_esn = p; *preplay_esn = pp; return 0; } static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) { int len = 0; if (xfrm_ctx) { len += sizeof(struct xfrm_user_sec_ctx); len += xfrm_ctx->ctx_len; } return len; } static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { memcpy(&x->id, &p->id, sizeof(x->id)); memcpy(&x->sel, &p->sel, sizeof(x->sel)); memcpy(&x->lft, &p->lft, sizeof(x->lft)); x->props.mode = p->mode; x->props.replay_window = p->replay_window; x->props.reqid = p->reqid; x->props.family = p->family; memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); x->props.flags = p->flags; if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) x->sel.family = p->family; } /* * someday when pfkey also has support, we could have the code * somehow made shareable and move it to xfrm_state.c - JHS * */ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; if (re) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, xfrm_replay_state_esn_len(replay_esn)); memcpy(x->preplay_esn, replay_esn, xfrm_replay_state_esn_len(replay_esn)); } if (rp) { struct xfrm_replay_state *replay; replay = nla_data(rp); memcpy(&x->replay, replay, sizeof(*replay)); memcpy(&x->preplay, replay, sizeof(*replay)); } if (lt) { struct xfrm_lifetime_cur *ltime; ltime = nla_data(lt); x->curlft.bytes = ltime->bytes; x->curlft.packets = ltime->packets; x->curlft.add_time = ltime->add_time; x->curlft.use_time = ltime->use_time; } if (et) x->replay_maxage = nla_get_u32(et); if (rt) x->replay_maxdiff = nla_get_u32(rt); } static struct xfrm_state *xfrm_state_construct(struct net *net, struct xfrm_usersa_info *p, struct nlattr **attrs, int *errp) { struct xfrm_state *x = xfrm_state_alloc(net); int err = -ENOMEM; if (!x) goto error_no_put; copy_from_user_state(x, p); if ((err = attach_aead(&x->aead, &x->props.ealgo, attrs[XFRMA_ALG_AEAD]))) goto error; if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, attrs[XFRMA_ALG_AUTH_TRUNC]))) goto error; if (!x->props.aalgo) { if ((err = attach_auth(&x->aalg, &x->props.aalgo, attrs[XFRMA_ALG_AUTH]))) goto error; } if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, xfrm_ealg_get_byname, attrs[XFRMA_ALG_CRYPT]))) goto error; if ((err = attach_one_algo(&x->calg, &x->props.calgo, xfrm_calg_get_byname, attrs[XFRMA_ALG_COMP]))) goto error; if (attrs[XFRMA_ENCAP]) { x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), sizeof(*x->encap), GFP_KERNEL); if (x->encap == NULL) goto error; } if (attrs[XFRMA_TFCPAD]) x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); if (attrs[XFRMA_COADDR]) { x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), sizeof(*x->coaddr), GFP_KERNEL); if (x->coaddr == NULL) goto error; } xfrm_mark_get(attrs, &x->mark); err = __xfrm_init_state(x, false); if (err) goto error; if (attrs[XFRMA_SEC_CTX] && security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) goto error; if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, attrs[XFRMA_REPLAY_ESN_VAL]))) goto error; x->km.seq = p->seq; x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; /* sysctl_xfrm_aevent_etime is in 100ms units */ x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; if ((err = xfrm_init_replay(x))) goto error; /* override default values from above */ xfrm_update_ae_params(x, attrs); return x; error: x->km.state = XFRM_STATE_DEAD; xfrm_state_put(x); error_no_put: *errp = err; return NULL; } static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_usersa_info *p = nlmsg_data(nlh); struct xfrm_state *x; int err; struct km_event c; uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; err = verify_newsa_info(p, attrs); if (err) return err; x = xfrm_state_construct(net, p, attrs, &err); if (!x) return err; xfrm_state_hold(x); if (nlh->nlmsg_type == XFRM_MSG_NEWSA) err = xfrm_state_add(x); else err = xfrm_state_update(x); security_task_getsecid(current, &sid); xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); if (err < 0) { x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); goto out; } c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c); out: xfrm_state_put(x); return err; } static struct xfrm_state *xfrm_user_state_lookup(struct net *net, struct xfrm_usersa_id *p, struct nlattr **attrs, int *errp) { struct xfrm_state *x = NULL; struct xfrm_mark m; int err; u32 mark = xfrm_mark_get(attrs, &m); if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { err = -ESRCH; x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); } else { xfrm_address_t *saddr = NULL; verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); if (!saddr) { err = -EINVAL; goto out; } err = -ESRCH; x = xfrm_state_lookup_byaddr(net, mark, &p->daddr, saddr, p->proto, p->family); } out: if (!x && errp) *errp = err; return x; } static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; int err = -ESRCH; struct km_event c; struct xfrm_usersa_id *p = nlmsg_data(nlh); uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; x = xfrm_user_state_lookup(net, p, attrs, &err); if (x == NULL) return err; if ((err = security_xfrm_state_delete(x)) != 0) goto out; if (xfrm_state_kern(x)) { err = -EPERM; goto out; } err = xfrm_state_delete(x); if (err < 0) goto out; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c); out: security_task_getsecid(current, &sid); xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); xfrm_state_put(x); return err; } static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); memcpy(&p->stats, &x->stats, sizeof(p->stats)); memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); p->mode = x->props.mode; p->replay_window = x->props.replay_window; p->reqid = x->props.reqid; p->family = x->props.family; p->flags = x->props.flags; p->seq = x->km.seq; } struct xfrm_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) { struct xfrm_user_sec_ctx *uctx; struct nlattr *attr; int ctx_size = sizeof(*uctx) + s->ctx_len; attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); if (attr == NULL) return -EMSGSIZE; uctx = nla_data(attr); uctx->exttype = XFRMA_SEC_CTX; uctx->len = ctx_size; uctx->ctx_doi = s->ctx_doi; uctx->ctx_alg = s->ctx_alg; uctx->ctx_len = s->ctx_len; memcpy(uctx + 1, s->ctx_str, s->ctx_len); return 0; } static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) { struct xfrm_algo *algo; struct nlattr *nla; nla = nla_reserve(skb, XFRMA_ALG_AUTH, sizeof(*algo) + (auth->alg_key_len + 7) / 8); if (!nla) return -EMSGSIZE; algo = nla_data(nla); strcpy(algo->alg_name, auth->alg_name); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len; return 0; } /* Don't change this without updating xfrm_sa_len! */ static int copy_to_user_state_extra(struct xfrm_state *x, struct xfrm_usersa_info *p, struct sk_buff *skb) { copy_to_user_state(x, p); if (x->coaddr) NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); if (x->lastused) NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); if (x->aead) NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); if (x->aalg) { if (copy_to_user_auth(x->aalg, skb)) goto nla_put_failure; NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(x->aalg), x->aalg); } if (x->ealg) NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); if (x->calg) NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); if (x->encap) NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); if (x->tfcpad) NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; if (x->replay_esn) NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); if (x->security && copy_sec_ctx(x->security, skb) < 0) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int dump_one_state(struct xfrm_state *x, int count, void *ptr) { struct xfrm_dump_info *sp = ptr; struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct xfrm_usersa_info *p; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; p = nlmsg_data(nlh); err = copy_to_user_state_extra(x, p, skb); if (err) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return err; } static int xfrm_dump_sa_done(struct netlink_callback *cb) { struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; xfrm_state_walk_done(walk); return 0; } static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; struct xfrm_dump_info info; BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > sizeof(cb->args) - sizeof(cb->args[0])); info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; if (!cb->args[0]) { cb->args[0] = 1; xfrm_state_walk_init(walk, 0); } (void) xfrm_state_walk(net, walk, dump_one_state, &info); return skb->len; } static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, struct xfrm_state *x, u32 seq) { struct xfrm_dump_info info; struct sk_buff *skb; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = seq; info.nlmsg_flags = 0; if (dump_one_state(x, 0, &info)) { kfree_skb(skb); return NULL; } return skb; } static inline size_t xfrm_spdinfo_msgsize(void) { return NLMSG_ALIGN(4) + nla_total_size(sizeof(struct xfrmu_spdinfo)) + nla_total_size(sizeof(struct xfrmu_spdhinfo)); } static int build_spdinfo(struct sk_buff *skb, struct net *net, u32 pid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; struct xfrmu_spdhinfo sph; struct nlmsghdr *nlh; u32 *f; nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE; f = nlmsg_data(nlh); *f = flags; xfrm_spd_getinfo(net, &si); spc.incnt = si.incnt; spc.outcnt = si.outcnt; spc.fwdcnt = si.fwdcnt; spc.inscnt = si.inscnt; spc.outscnt = si.outscnt; spc.fwdscnt = si.fwdscnt; sph.spdhcnt = si.spdhcnt; sph.spdhmcnt = si.spdhmcnt; NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); u32 spid = NETLINK_CB(skb).pid; u32 seq = nlh->nlmsg_seq; r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) BUG(); return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); } static inline size_t xfrm_sadinfo_msgsize(void) { return NLMSG_ALIGN(4) + nla_total_size(sizeof(struct xfrmu_sadhinfo)) + nla_total_size(4); /* XFRMA_SAD_CNT */ } static int build_sadinfo(struct sk_buff *skb, struct net *net, u32 pid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; struct xfrmu_sadhinfo sh; struct nlmsghdr *nlh; u32 *f; nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE; f = nlmsg_data(nlh); *f = flags; xfrm_sad_getinfo(net, &si); sh.sadhmcnt = si.sadhmcnt; sh.sadhcnt = si.sadhcnt; NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); u32 spid = NETLINK_CB(skb).pid; u32 seq = nlh->nlmsg_seq; r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) BUG(); return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); } static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_usersa_id *p = nlmsg_data(nlh); struct xfrm_state *x; struct sk_buff *resp_skb; int err = -ESRCH; x = xfrm_user_state_lookup(net, p, attrs, &err); if (x == NULL) goto out_noput; resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); } xfrm_state_put(x); out_noput: return err; } static int verify_userspi_info(struct xfrm_userspi_info *p) { switch (p->info.id.proto) { case IPPROTO_AH: case IPPROTO_ESP: break; case IPPROTO_COMP: /* IPCOMP spi is 16-bits. */ if (p->max >= 0x10000) return -EINVAL; break; default: return -EINVAL; } if (p->min > p->max) return -EINVAL; return 0; } static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct xfrm_userspi_info *p; struct sk_buff *resp_skb; xfrm_address_t *daddr; int family; int err; u32 mark; struct xfrm_mark m; p = nlmsg_data(nlh); err = verify_userspi_info(p); if (err) goto out_noput; family = p->info.family; daddr = &p->info.id.daddr; x = NULL; mark = xfrm_mark_get(attrs, &m); if (p->info.seq) { x = xfrm_find_acq_byseq(net, mark, p->info.seq); if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { xfrm_state_put(x); x = NULL; } } if (!x) x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, p->info.id.proto, daddr, &p->info.saddr, 1, family); err = -ENOENT; if (x == NULL) goto out_noput; err = xfrm_alloc_spi(x, p->min, p->max); if (err) goto out; resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); goto out; } err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); out: xfrm_state_put(x); out_noput: return err; } static int verify_policy_dir(u8 dir) { switch (dir) { case XFRM_POLICY_IN: case XFRM_POLICY_OUT: case XFRM_POLICY_FWD: break; default: return -EINVAL; } return 0; } static int verify_policy_type(u8 type) { switch (type) { case XFRM_POLICY_TYPE_MAIN: #ifdef CONFIG_XFRM_SUB_POLICY case XFRM_POLICY_TYPE_SUB: #endif break; default: return -EINVAL; } return 0; } static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) { switch (p->share) { case XFRM_SHARE_ANY: case XFRM_SHARE_SESSION: case XFRM_SHARE_USER: case XFRM_SHARE_UNIQUE: break; default: return -EINVAL; } switch (p->action) { case XFRM_POLICY_ALLOW: case XFRM_POLICY_BLOCK: break; default: return -EINVAL; } switch (p->sel.family) { case AF_INET: break; case AF_INET6: #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) break; #else return -EAFNOSUPPORT; #endif default: return -EINVAL; } return verify_policy_dir(p->dir); } static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; if (!rt) return 0; uctx = nla_data(rt); return security_xfrm_policy_alloc(&pol->security, uctx); } static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, int nr) { int i; xp->xfrm_nr = nr; for (i = 0; i < nr; i++, ut++) { struct xfrm_tmpl *t = &xp->xfrm_vec[i]; memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); memcpy(&t->saddr, &ut->saddr, sizeof(xfrm_address_t)); t->reqid = ut->reqid; t->mode = ut->mode; t->share = ut->share; t->optional = ut->optional; t->aalgos = ut->aalgos; t->ealgos = ut->ealgos; t->calgos = ut->calgos; /* If all masks are ~0, then we allow all algorithms. */ t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); t->encap_family = ut->family; } } static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) { int i; if (nr > XFRM_MAX_DEPTH) return -EINVAL; for (i = 0; i < nr; i++) { /* We never validated the ut->family value, so many * applications simply leave it at zero. The check was * never made and ut->family was ignored because all * templates could be assumed to have the same family as * the policy itself. Now that we will have ipv4-in-ipv6 * and ipv6-in-ipv4 tunnels, this is no longer true. */ if (!ut[i].family) ut[i].family = family; switch (ut[i].family) { case AF_INET: break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: break; #endif default: return -EINVAL; } } return 0; } static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_TMPL]; if (!rt) { pol->xfrm_nr = 0; } else { struct xfrm_user_tmpl *utmpl = nla_data(rt); int nr = nla_len(rt) / sizeof(*utmpl); int err; err = validate_tmpl(nr, utmpl, pol->family); if (err) return err; copy_templates(pol, utmpl, nr); } return 0; } static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; struct xfrm_userpolicy_type *upt; u8 type = XFRM_POLICY_TYPE_MAIN; int err; if (rt) { upt = nla_data(rt); type = upt->type; } err = verify_policy_type(type); if (err) return err; *tp = type; return 0; } static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) { xp->priority = p->priority; xp->index = p->index; memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); xp->action = p->action; xp->flags = p->flags; xp->family = p->sel.family; /* XXX xp->share = p->share; */ } static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); p->priority = xp->priority; p->index = xp->index; p->sel.family = xp->family; p->dir = dir; p->action = xp->action; p->flags = xp->flags; p->share = XFRM_SHARE_ANY; /* XXX xp->share */ } static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) { struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); int err; if (!xp) { *errp = -ENOMEM; return NULL; } copy_from_user_policy(xp, p); err = copy_from_user_policy_type(&xp->type, attrs); if (err) goto error; if (!(err = copy_from_user_tmpl(xp, attrs))) err = copy_from_user_sec_ctx(xp, attrs); if (err) goto error; xfrm_mark_get(attrs, &xp->mark); return xp; error: *errp = err; xp->walk.dead = 1; xfrm_policy_destroy(xp); return NULL; } static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_userpolicy_info *p = nlmsg_data(nlh); struct xfrm_policy *xp; struct km_event c; int err; int excl; uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; err = verify_newpolicy_info(p); if (err) return err; err = verify_sec_ctx_len(attrs); if (err) return err; xp = xfrm_policy_construct(net, p, attrs, &err); if (!xp) return err; /* shouldn't excl be based on nlh flags?? * Aha! this is anti-netlink really i.e more pfkey derived * in netlink excl is a flag and you wouldnt need * a type XFRM_MSG_UPDPOLICY - JHS */ excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; err = xfrm_policy_insert(p->dir, xp, excl); security_task_getsecid(current, &sid); xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); if (err) { security_xfrm_policy_free(xp->security); kfree(xp); return err; } c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); xfrm_pol_put(xp); return 0; } static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) { struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; int i; if (xp->xfrm_nr == 0) return 0; for (i = 0; i < xp->xfrm_nr; i++) { struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); up->reqid = kp->reqid; up->mode = kp->mode; up->share = kp->share; up->optional = kp->optional; up->aalgos = kp->aalgos; up->ealgos = kp->ealgos; up->calgos = kp->calgos; } return nla_put(skb, XFRMA_TMPL, sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); } static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) { if (x->security) { return copy_sec_ctx(x->security, skb); } return 0; } static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) { if (xp->security) { return copy_sec_ctx(xp->security, skb); } return 0; } static inline size_t userpolicy_type_attrsize(void) { #ifdef CONFIG_XFRM_SUB_POLICY return nla_total_size(sizeof(struct xfrm_userpolicy_type)); #else return 0; #endif } #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { struct xfrm_userpolicy_type upt = { .type = type, }; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } #else static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { return 0; } #endif static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) { struct xfrm_dump_info *sp = ptr; struct xfrm_userpolicy_info *p; struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; p = nlmsg_data(nlh); copy_to_user_policy(xp, p, dir); if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; if (copy_to_user_sec_ctx(xp, skb)) goto nlmsg_failure; if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; if (xfrm_mark_put(skb, &xp->mark)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_dump_policy_done(struct netlink_callback *cb) { struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; xfrm_policy_walk_done(walk); return 0; } static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; struct xfrm_dump_info info; BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > sizeof(cb->args) - sizeof(cb->args[0])); info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; if (!cb->args[0]) { cb->args[0] = 1; xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); } (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); return skb->len; } static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, struct xfrm_policy *xp, int dir, u32 seq) { struct xfrm_dump_info info; struct sk_buff *skb; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = seq; info.nlmsg_flags = 0; if (dump_one_policy(xp, dir, 0, &info) < 0) { kfree_skb(skb); return NULL; } return skb; } static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_userpolicy_id *p; u8 type = XFRM_POLICY_TYPE_MAIN; int err; struct km_event c; int delete; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); p = nlmsg_data(nlh); delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; err = copy_from_user_policy_type(&type, attrs); if (err) return err; err = verify_policy_dir(p->dir); if (err) return err; if (p->index) xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; err = verify_sec_ctx_len(attrs); if (err) return err; ctx = NULL; if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); err = security_xfrm_policy_alloc(&ctx, uctx); if (err) return err; } xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) return -ENOENT; if (!delete) { struct sk_buff *resp_skb; resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); } } else { uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; security_task_getsecid(current, &sid); xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, sid); if (err != 0) goto out; c.data.byid = p->index; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); } out: xfrm_pol_put(xp); return err; } static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct km_event c; struct xfrm_usersa_flush *p = nlmsg_data(nlh); struct xfrm_audit audit_info; int err; audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); security_task_getsecid(current, &audit_info.secid); err = xfrm_state_flush(net, p->proto, &audit_info); if (err) { if (err == -ESRCH) /* empty table */ return 0; return err; } c.data.proto = p->proto; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; c.net = net; km_state_notify(NULL, &c); return 0; } static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) { size_t replay_size = x->replay_esn ? xfrm_replay_state_esn_len(x->replay_esn) : sizeof(struct xfrm_replay_state); return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) + nla_total_size(replay_size) + nla_total_size(sizeof(struct xfrm_lifetime_cur)) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(4) /* XFRM_AE_RTHR */ + nla_total_size(4); /* XFRM_AE_ETHR */ } static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) { struct xfrm_aevent_id *id; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); if (nlh == NULL) return -EMSGSIZE; id = nlmsg_data(nlh); memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); id->sa_id.spi = x->id.spi; id->sa_id.family = x->props.family; id->sa_id.proto = x->id.proto; memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); id->reqid = x->props.reqid; id->flags = c->data.aevent; if (x->replay_esn) NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); else NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); if (id->flags & XFRM_AE_RTHR) NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); if (id->flags & XFRM_AE_ETHR) NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, x->replay_maxage * 10 / HZ); if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct sk_buff *r_skb; int err; struct km_event c; u32 mark; struct xfrm_mark m; struct xfrm_aevent_id *p = nlmsg_data(nlh); struct xfrm_usersa_id *id = &p->sa_id; mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); if (x == NULL) return -ESRCH; r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); if (r_skb == NULL) { xfrm_state_put(x); return -ENOMEM; } /* * XXX: is this lock really needed - none of the other * gets lock (the concern is things getting updated * while we are still reading) - jhs */ spin_lock_bh(&x->lock); c.data.aevent = p->flags; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; if (build_aevent(r_skb, x, &c) < 0) BUG(); err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; } static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct km_event c; int err = - EINVAL; u32 mark = 0; struct xfrm_mark m; struct xfrm_aevent_id *p = nlmsg_data(nlh); struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; if (!lt && !rp && !re) return err; /* pedantic mode - thou shalt sayeth replaceth */ if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) return err; mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); if (x == NULL) return -ESRCH; if (x->km.state != XFRM_STATE_VALID) goto out; err = xfrm_replay_verify_len(x->replay_esn, rp); if (err) goto out; spin_lock_bh(&x->lock); xfrm_update_ae_params(x, attrs); spin_unlock_bh(&x->lock); c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; c.data.aevent = XFRM_AE_CU; km_state_notify(x, &c); err = 0; out: xfrm_state_put(x); return err; } static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct km_event c; u8 type = XFRM_POLICY_TYPE_MAIN; int err; struct xfrm_audit audit_info; err = copy_from_user_policy_type(&type, attrs); if (err) return err; audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); security_task_getsecid(current, &audit_info.secid); err = xfrm_policy_flush(net, type, &audit_info); if (err) { if (err == -ESRCH) /* empty table */ return 0; return err; } c.data.type = type; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.pid = nlh->nlmsg_pid; c.net = net; km_policy_notify(NULL, 0, &c); return 0; } static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_user_polexpire *up = nlmsg_data(nlh); struct xfrm_userpolicy_info *p = &up->pol; u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); err = copy_from_user_policy_type(&type, attrs); if (err) return err; err = verify_policy_dir(p->dir); if (err) return err; if (p->index) xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; err = verify_sec_ctx_len(attrs); if (err) return err; ctx = NULL; if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); err = security_xfrm_policy_alloc(&ctx, uctx); if (err) return err; } xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) return -ENOENT; if (unlikely(xp->walk.dead)) goto out; err = 0; if (up->hard) { uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; security_task_getsecid(current, &sid); xfrm_policy_delete(xp, p->dir); xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); } else { // reset the timers here? WARN(1, "Dont know what to do with soft policy expire\n"); } km_policy_expired(xp, p->dir, up->hard, current->pid); out: xfrm_pol_put(xp); return err; } static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; int err; struct xfrm_user_expire *ue = nlmsg_data(nlh); struct xfrm_usersa_info *p = &ue->state; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); err = -ENOENT; if (x == NULL) return err; spin_lock_bh(&x->lock); err = -EINVAL; if (x->km.state != XFRM_STATE_VALID) goto out; km_state_expired(x, ue->hard, current->pid); if (ue->hard) { uid_t loginuid = audit_get_loginuid(current); u32 sessionid = audit_get_sessionid(current); u32 sid; security_task_getsecid(current, &sid); __xfrm_state_delete(x); xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); } err = 0; out: spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; } static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_user_tmpl *ut; int i; struct nlattr *rt = attrs[XFRMA_TMPL]; struct xfrm_mark mark; struct xfrm_user_acquire *ua = nlmsg_data(nlh); struct xfrm_state *x = xfrm_state_alloc(net); int err = -ENOMEM; if (!x) goto nomem; xfrm_mark_get(attrs, &mark); err = verify_newpolicy_info(&ua->policy); if (err) goto bad_policy; /* build an XP */ xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); if (!xp) goto free_state; memcpy(&x->id, &ua->id, sizeof(ua->id)); memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); xp->mark.m = x->mark.m = mark.m; xp->mark.v = x->mark.v = mark.v; ut = nla_data(rt); /* extract the templates and for each call km_key */ for (i = 0; i < xp->xfrm_nr; i++, ut++) { struct xfrm_tmpl *t = &xp->xfrm_vec[i]; memcpy(&x->id, &t->id, sizeof(x->id)); x->props.mode = t->mode; x->props.reqid = t->reqid; x->props.family = ut->family; t->aalgos = ua->aalgos; t->ealgos = ua->ealgos; t->calgos = ua->calgos; err = km_query(x, t, xp); } kfree(x); kfree(xp); return 0; bad_policy: WARN(1, "BAD policy passed\n"); free_state: kfree(x); nomem: return err; } #ifdef CONFIG_XFRM_MIGRATE static int copy_from_user_migrate(struct xfrm_migrate *ma, struct xfrm_kmaddress *k, struct nlattr **attrs, int *num) { struct nlattr *rt = attrs[XFRMA_MIGRATE]; struct xfrm_user_migrate *um; int i, num_migrate; if (k != NULL) { struct xfrm_user_kmaddress *uk; uk = nla_data(attrs[XFRMA_KMADDRESS]); memcpy(&k->local, &uk->local, sizeof(k->local)); memcpy(&k->remote, &uk->remote, sizeof(k->remote)); k->family = uk->family; k->reserved = uk->reserved; } um = nla_data(rt); num_migrate = nla_len(rt) / sizeof(*um); if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) return -EINVAL; for (i = 0; i < num_migrate; i++, um++, ma++) { memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); ma->proto = um->proto; ma->mode = um->mode; ma->reqid = um->reqid; ma->old_family = um->old_family; ma->new_family = um->new_family; } *num = i; return 0; } static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); struct xfrm_migrate m[XFRM_MAX_DEPTH]; struct xfrm_kmaddress km, *kmp; u8 type; int err; int n = 0; if (attrs[XFRMA_MIGRATE] == NULL) return -EINVAL; kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; err = copy_from_user_policy_type(&type, attrs); if (err) return err; err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); if (err) return err; if (!n) return 0; xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp); return 0; } #else static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { return -ENOPROTOOPT; } #endif #ifdef CONFIG_XFRM_MIGRATE static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) { struct xfrm_user_migrate um; memset(&um, 0, sizeof(um)); um.proto = m->proto; um.mode = m->mode; um.reqid = m->reqid; um.old_family = m->old_family; memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); um.new_family = m->new_family; memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); } static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) { struct xfrm_user_kmaddress uk; memset(&uk, 0, sizeof(uk)); uk.family = k->family; uk.reserved = k->reserved; memcpy(&uk.local, &k->local, sizeof(uk.local)); memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); } static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma) { return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) + userpolicy_type_attrsize(); } static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k, const struct xfrm_selector *sel, u8 dir, u8 type) { const struct xfrm_migrate *mp; struct xfrm_userpolicy_id *pol_id; struct nlmsghdr *nlh; int i; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); if (nlh == NULL) return -EMSGSIZE; pol_id = nlmsg_data(nlh); /* copy data from selector, dir, and type to the pol_id */ memset(pol_id, 0, sizeof(*pol_id)); memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); pol_id->dir = dir; if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0)) goto nlmsg_failure; if (copy_to_user_policy_type(type, skb) < 0) goto nlmsg_failure; for (i = 0, mp = m ; i < num_migrate; i++, mp++) { if (copy_to_user_migrate(mp, skb) < 0) goto nlmsg_failure; } return nlmsg_end(skb, nlh); nlmsg_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k) { struct net *net = &init_net; struct sk_buff *skb; skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; /* build migrate */ if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); } #else static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k) { return -ENOPROTOOPT; } #endif #define XMSGSIZE(type) sizeof(struct type) static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), }; #undef XMSGSIZE static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, [XFRMA_LASTUSED] = { .type = NLA_U64}, [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, [XFRMA_TFCPAD] = { .type = NLA_U32 }, [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, }; static struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); } xfrm_dispatch[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, .dump = xfrm_dump_sa, .done = xfrm_dump_sa_done }, [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, .dump = xfrm_dump_policy, .done = xfrm_dump_policy_done }, [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, }; static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *attrs[XFRMA_MAX+1]; struct xfrm_link *link; int type, err; type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX) return -EINVAL; type -= XFRM_MSG_BASE; if (type < 0) { printk(KERN_ERR "[xfrm] xfrm_dispatch[type]: type is smaller than 0 %s!\n", __func__); return -EINVAL; } link = &xfrm_dispatch[type]; /* All operations require privileges, even GET */ if (security_netlink_recv(skb, CAP_NET_ADMIN)) return -EPERM; if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && (nlh->nlmsg_flags & NLM_F_DUMP)) { if (link->dump == NULL) return -EINVAL; return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done); } err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, xfrma_policy); if (err < 0) return err; if (link->doit == NULL) return -EINVAL; return link->doit(skb, nlh, attrs); } static void xfrm_netlink_rcv(struct sk_buff *skb) { mutex_lock(&xfrm_cfg_mutex); netlink_rcv_skb(skb, &xfrm_user_rcv_msg); mutex_unlock(&xfrm_cfg_mutex); } static inline size_t xfrm_expire_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) + nla_total_size(sizeof(struct xfrm_mark)); } static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) { struct xfrm_user_expire *ue; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); if (nlh == NULL) return -EMSGSIZE; ue = nlmsg_data(nlh); copy_to_user_state(x, &ue->state); ue->hard = (c->data.hard != 0) ? 1 : 0; if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: return -EMSGSIZE; } static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct sk_buff *skb; skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_expire(skb, x, c) < 0) { kfree_skb(skb); return -EMSGSIZE; } return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); } static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct sk_buff *skb; skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_aevent(skb, x, c) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); } static int xfrm_notify_sa_flush(const struct km_event *c) { struct net *net = c->net; struct xfrm_usersa_flush *p; struct nlmsghdr *nlh; struct sk_buff *skb; int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); if (nlh == NULL) { kfree_skb(skb); return -EMSGSIZE; } p = nlmsg_data(nlh); p->proto = c->data.proto; nlmsg_end(skb, nlh); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); } static inline size_t xfrm_sa_len(struct xfrm_state *x) { size_t l = 0; if (x->aead) l += nla_total_size(aead_len(x->aead)); if (x->aalg) { l += nla_total_size(sizeof(struct xfrm_algo) + (x->aalg->alg_key_len + 7) / 8); l += nla_total_size(xfrm_alg_auth_len(x->aalg)); } if (x->ealg) l += nla_total_size(xfrm_alg_len(x->ealg)); if (x->calg) l += nla_total_size(sizeof(*x->calg)); if (x->encap) l += nla_total_size(sizeof(*x->encap)); if (x->tfcpad) l += nla_total_size(sizeof(x->tfcpad)); if (x->replay_esn) l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); if (x->security) l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + x->security->ctx_len); if (x->coaddr) l += nla_total_size(sizeof(*x->coaddr)); /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size(sizeof(u64)); return l; } static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct xfrm_usersa_info *p; struct xfrm_usersa_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; int len = xfrm_sa_len(x); int headlen; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELSA) { len += nla_total_size(headlen); headlen = sizeof(*id); len += nla_total_size(sizeof(struct xfrm_mark)); } len += NLMSG_ALIGN(headlen); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); if (nlh == NULL) goto nla_put_failure; p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELSA) { struct nlattr *attr; id = nlmsg_data(nlh); memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); id->spi = x->id.spi; id->family = x->props.family; id->proto = x->id.proto; attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); if (attr == NULL) goto nla_put_failure; p = nla_data(attr); } if (copy_to_user_state_extra(x, p, skb)) goto nla_put_failure; nlmsg_end(skb, nlh); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); nla_put_failure: /* Somebody screwed up with xfrm_sa_len! */ WARN_ON(1); kfree_skb(skb); return -1; } static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) { switch (c->event) { case XFRM_MSG_EXPIRE: return xfrm_exp_state_notify(x, c); case XFRM_MSG_NEWAE: return xfrm_aevent_state_notify(x, c); case XFRM_MSG_DELSA: case XFRM_MSG_UPDSA: case XFRM_MSG_NEWSA: return xfrm_notify_sa(x, c); case XFRM_MSG_FLUSHSA: return xfrm_notify_sa_flush(c); default: printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", c->event); break; } return 0; } static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, struct xfrm_policy *xp) { return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(xfrm_user_sec_ctx_size(x->security)) + userpolicy_type_attrsize(); } static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp, int dir) { struct xfrm_user_acquire *ua; struct nlmsghdr *nlh; __u32 seq = xfrm_get_acqseq(); nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); if (nlh == NULL) return -EMSGSIZE; ua = nlmsg_data(nlh); memcpy(&ua->id, &x->id, sizeof(ua->id)); memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); copy_to_user_policy(xp, &ua->policy, dir); ua->aalgos = xt->aalgos; ua->ealgos = xt->ealgos; ua->calgos = xt->calgos; ua->seq = x->km.seq = seq; if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; if (copy_to_user_state_sec_ctx(x, skb)) goto nlmsg_failure; if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; if (xfrm_mark_put(skb, &xp->mark)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp, int dir) { struct net *net = xs_net(x); struct sk_buff *skb; skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_acquire(skb, x, xt, xp, dir) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); } /* User gives us xfrm_user_policy_info followed by an array of 0 * or more templates. */ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, u8 *data, int len, int *dir) { struct net *net = sock_net(sk); struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); struct xfrm_policy *xp; int nr; switch (sk->sk_family) { case AF_INET: if (opt != IP_XFRM_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: if (opt != IPV6_XFRM_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #endif default: *dir = -EINVAL; return NULL; } *dir = -EINVAL; if (len < sizeof(*p) || verify_newpolicy_info(p)) return NULL; nr = ((len - sizeof(*p)) / sizeof(*ut)); if (validate_tmpl(nr, ut, p->sel.family)) return NULL; if (p->dir > XFRM_POLICY_OUT) return NULL; xp = xfrm_policy_alloc(net, GFP_ATOMIC); if (xp == NULL) { *dir = -ENOBUFS; return NULL; } copy_from_user_policy(xp, p); xp->type = XFRM_POLICY_TYPE_MAIN; copy_templates(xp, ut, nr); *dir = p->dir; return xp; } static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) { return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) + nla_total_size(sizeof(struct xfrm_mark)) + userpolicy_type_attrsize(); } static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, int dir, const struct km_event *c) { struct xfrm_user_polexpire *upe; struct nlmsghdr *nlh; int hard = c->data.hard; nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); if (nlh == NULL) return -EMSGSIZE; upe = nlmsg_data(nlh); copy_to_user_policy(xp, &upe->pol, dir); if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; if (copy_to_user_sec_ctx(xp, skb)) goto nlmsg_failure; if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; if (xfrm_mark_put(skb, &xp->mark)) goto nla_put_failure; upe->hard = !!hard; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct net *net = xp_net(xp); struct sk_buff *skb; skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_polexpire(skb, xp, dir, c) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); } static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct net *net = xp_net(xp); struct xfrm_userpolicy_info *p; struct xfrm_userpolicy_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); int headlen; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELPOLICY) { len += nla_total_size(headlen); headlen = sizeof(*id); } len += userpolicy_type_attrsize(); len += nla_total_size(sizeof(struct xfrm_mark)); len += NLMSG_ALIGN(headlen); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); if (nlh == NULL) goto nlmsg_failure; p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELPOLICY) { struct nlattr *attr; id = nlmsg_data(nlh); memset(id, 0, sizeof(*id)); id->dir = dir; if (c->data.byid) id->index = xp->index; else memcpy(&id->sel, &xp->selector, sizeof(id->sel)); attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); if (attr == NULL) goto nlmsg_failure; p = nla_data(attr); } copy_to_user_policy(xp, p, dir); if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; if (xfrm_mark_put(skb, &xp->mark)) goto nla_put_failure; nlmsg_end(skb, nlh); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); nla_put_failure: nlmsg_failure: kfree_skb(skb); return -1; } static int xfrm_notify_policy_flush(const struct km_event *c) { struct net *net = c->net; struct nlmsghdr *nlh; struct sk_buff *skb; skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); if (nlh == NULL) goto nlmsg_failure; if (copy_to_user_policy_type(c->data.type, skb) < 0) goto nlmsg_failure; nlmsg_end(skb, nlh); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); nlmsg_failure: kfree_skb(skb); return -1; } static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { switch (c->event) { case XFRM_MSG_NEWPOLICY: case XFRM_MSG_UPDPOLICY: case XFRM_MSG_DELPOLICY: return xfrm_notify_policy(xp, dir, c); case XFRM_MSG_FLUSHPOLICY: return xfrm_notify_policy_flush(c); case XFRM_MSG_POLEXPIRE: return xfrm_exp_policy_notify(xp, dir, c); default: printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", c->event); } return 0; } static inline size_t xfrm_report_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); } static int build_report(struct sk_buff *skb, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct xfrm_user_report *ur; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); if (nlh == NULL) return -EMSGSIZE; ur = nlmsg_data(nlh); ur->proto = proto; memcpy(&ur->sel, sel, sizeof(ur->sel)); if (addr) NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int xfrm_send_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct sk_buff *skb; skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_report(skb, proto, sel, addr) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); } static inline size_t xfrm_mapping_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); } static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, xfrm_address_t *new_saddr, __be16 new_sport) { struct xfrm_user_mapping *um; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); if (nlh == NULL) return -EMSGSIZE; um = nlmsg_data(nlh); memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); um->id.spi = x->id.spi; um->id.family = x->props.family; um->id.proto = x->id.proto; memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); um->new_sport = new_sport; um->old_sport = x->encap->encap_sport; um->reqid = x->props.reqid; return nlmsg_end(skb, nlh); } static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) { struct net *net = xs_net(x); struct sk_buff *skb; if (x->id.proto != IPPROTO_ESP) return -EINVAL; if (!x->encap) return -EINVAL; skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_mapping(skb, x, ipaddr, sport) < 0) BUG(); return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); } static struct xfrm_mgr netlink_mgr = { .id = "netlink", .notify = xfrm_send_state_notify, .acquire = xfrm_send_acquire, .compile_policy = xfrm_compile_policy, .notify_policy = xfrm_send_policy_notify, .report = xfrm_send_report, .migrate = xfrm_send_migrate, .new_mapping = xfrm_send_mapping, }; static int __net_init xfrm_user_net_init(struct net *net) { struct sock *nlsk; nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX, xfrm_netlink_rcv, NULL, THIS_MODULE); if (nlsk == NULL) return -ENOMEM; net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ rcu_assign_pointer(net->xfrm.nlsk, nlsk); return 0; } static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) rcu_assign_pointer(net->xfrm.nlsk, NULL); synchronize_net(); list_for_each_entry(net, net_exit_list, exit_list) netlink_kernel_release(net->xfrm.nlsk_stash); } static struct pernet_operations xfrm_user_net_ops = { .init = xfrm_user_net_init, .exit_batch = xfrm_user_net_exit, }; static int __init xfrm_user_init(void) { int rv; printk(KERN_INFO "Initializing XFRM netlink socket\n"); rv = register_pernet_subsys(&xfrm_user_net_ops); if (rv < 0) return rv; rv = xfrm_register_km(&netlink_mgr); if (rv < 0) unregister_pernet_subsys(&xfrm_user_net_ops); return rv; } static void __exit xfrm_user_exit(void) { xfrm_unregister_km(&netlink_mgr); unregister_pernet_subsys(&xfrm_user_net_ops); } module_init(xfrm_user_init); module_exit(xfrm_user_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
gpl-2.0
Mantoui/kernel_motorola_msm8960-common
drivers/platform/x86/asus-laptop.c
653
40665
/* * asus-laptop.c - Asus Laptop Support * * * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor * Copyright (C) 2006-2007 Corentin Chary * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * The development page for this driver is located at * http://sourceforge.net/projects/acpi4asus/ * * Credits: * Pontus Fuchs - Helper functions, cleanup * Johann Wiesner - Small compile fixes * John Belmonte - ACPI code for Toshiba laptop was a good starting point. * Eric Burghard - LED display support for W1N * Josh Green - Light Sens support * Thomas Tuttle - His first patch for led support was very helpful * Sam Lin - GPS support */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/err.h> #include <linux/proc_fs.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/rfkill.h> #include <linux/slab.h> #include <linux/dmi.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #define ASUS_LAPTOP_VERSION "0.42" #define ASUS_LAPTOP_NAME "Asus Laptop Support" #define ASUS_LAPTOP_CLASS "hotkey" #define ASUS_LAPTOP_DEVICE_NAME "Hotkey" #define ASUS_LAPTOP_FILE KBUILD_MODNAME #define ASUS_LAPTOP_PREFIX "\\_SB.ATKD." MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary"); MODULE_DESCRIPTION(ASUS_LAPTOP_NAME); MODULE_LICENSE("GPL"); /* * WAPF defines the behavior of the Fn+Fx wlan key * The significance of values is yet to be found, but * most of the time: * 0x0 will do nothing * 0x1 will allow to control the device with Fn+Fx key. * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key * 0x5 like 0x1 or 0x4 * So, if something doesn't work as you want, just try other values =) */ static uint wapf = 1; module_param(wapf, uint, 0444); MODULE_PARM_DESC(wapf, "WAPF value"); static int wlan_status = 1; static int bluetooth_status = 1; static int wimax_status = -1; static int wwan_status = -1; module_param(wlan_status, int, 0444); MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); module_param(bluetooth_status, int, 0444); MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); module_param(wimax_status, int, 0444); MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); module_param(wwan_status, int, 0444); MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); /* * Some events we use, same for all Asus */ #define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */ #define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */ #define ATKD_BR_MIN ATKD_BR_UP #define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */ #define ATKD_LCD_ON 0x33 #define ATKD_LCD_OFF 0x34 /* * Known bits returned by \_SB.ATKD.HWRS */ #define WL_HWRS 0x80 #define BT_HWRS 0x100 /* * Flags for hotk status * WL_ON and BT_ON are also used for wireless_status() */ #define WL_RSTS 0x01 /* internal Wifi */ #define BT_RSTS 0x02 /* internal Bluetooth */ #define WM_RSTS 0x08 /* internal wimax */ #define WW_RSTS 0x20 /* internal wwan */ /* LED */ #define METHOD_MLED "MLED" #define METHOD_TLED "TLED" #define METHOD_RLED "RLED" /* W1JC */ #define METHOD_PLED "PLED" /* A7J */ #define METHOD_GLED "GLED" /* G1, G2 (probably) */ /* LEDD */ #define METHOD_LEDD "SLCM" /* * Bluetooth and WLAN * WLED and BLED are not handled like other XLED, because in some dsdt * they also control the WLAN/Bluetooth device. */ #define METHOD_WLAN "WLED" #define METHOD_BLUETOOTH "BLED" /* WWAN and WIMAX */ #define METHOD_WWAN "GSMC" #define METHOD_WIMAX "WMXC" #define METHOD_WL_STATUS "RSTS" /* Brightness */ #define METHOD_BRIGHTNESS_SET "SPLV" #define METHOD_BRIGHTNESS_GET "GPLV" /* Display */ #define METHOD_SWITCH_DISPLAY "SDSP" #define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ #define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ /* GPS */ /* R2H use different handle for GPS on/off */ #define METHOD_GPS_ON "SDON" #define METHOD_GPS_OFF "SDOF" #define METHOD_GPS_STATUS "GPST" /* Keyboard light */ #define METHOD_KBD_LIGHT_SET "SLKB" #define METHOD_KBD_LIGHT_GET "GLKB" /* * Define a specific led structure to keep the main structure clean */ struct asus_led { int wk; struct work_struct work; struct led_classdev led; struct asus_laptop *asus; const char *method; }; /* * This is the main structure, we can use it to store anything interesting * about the hotk device */ struct asus_laptop { char *name; /* laptop name */ struct acpi_table_header *dsdt_info; struct platform_device *platform_device; struct acpi_device *device; /* the device we are in */ struct backlight_device *backlight_device; struct input_dev *inputdev; struct key_entry *keymap; struct asus_led mled; struct asus_led tled; struct asus_led rled; struct asus_led pled; struct asus_led gled; struct asus_led kled; struct workqueue_struct *led_workqueue; int wireless_status; bool have_rsts; struct rfkill *gps_rfkill; acpi_handle handle; /* the handle of the hotk device */ u32 ledd_status; /* status of the LED display */ u8 light_level; /* light sensor level */ u8 light_switch; /* light sensor switch value */ u16 event_count[128]; /* count for each event TODO make this better */ }; static const struct key_entry asus_keymap[] = { /* Lenovo SL Specific keycodes */ {KE_KEY, 0x02, { KEY_SCREENLOCK } }, {KE_KEY, 0x05, { KEY_WLAN } }, {KE_KEY, 0x08, { KEY_F13 } }, {KE_KEY, 0x17, { KEY_ZOOM } }, {KE_KEY, 0x1f, { KEY_BATTERY } }, /* End of Lenovo SL Specific keycodes */ {KE_KEY, 0x30, { KEY_VOLUMEUP } }, {KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, {KE_KEY, 0x32, { KEY_MUTE } }, {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, {KE_KEY, 0x41, { KEY_NEXTSONG } }, {KE_KEY, 0x43, { KEY_STOPCD } }, {KE_KEY, 0x45, { KEY_PLAYPAUSE } }, {KE_KEY, 0x4c, { KEY_MEDIA } }, {KE_KEY, 0x50, { KEY_EMAIL } }, {KE_KEY, 0x51, { KEY_WWW } }, {KE_KEY, 0x55, { KEY_CALC } }, {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */ {KE_KEY, 0x5D, { KEY_WLAN } }, {KE_KEY, 0x5E, { KEY_WLAN } }, {KE_KEY, 0x5F, { KEY_WLAN } }, {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */ {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, {KE_KEY, 0x82, { KEY_CAMERA } }, {KE_KEY, 0x88, { KEY_WLAN } }, {KE_KEY, 0x8A, { KEY_PROG1 } }, {KE_KEY, 0x95, { KEY_MEDIA } }, {KE_KEY, 0x99, { KEY_PHONE } }, {KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, {KE_KEY, 0xb5, { KEY_CALC } }, {KE_END, 0}, }; /* * This function evaluates an ACPI method, given an int as parameter, the * method is searched within the scope of the handle, can be NULL. The output * of the method is written is output, which can also be NULL * * returns 0 if write is successful, -1 else. */ static int write_acpi_int_ret(acpi_handle handle, const char *method, int val, struct acpi_buffer *output) { struct acpi_object_list params; /* list of input parameters (an int) */ union acpi_object in_obj; /* the only param we use */ acpi_status status; if (!handle) return -1; params.count = 1; params.pointer = &in_obj; in_obj.type = ACPI_TYPE_INTEGER; in_obj.integer.value = val; status = acpi_evaluate_object(handle, (char *)method, &params, output); if (status == AE_OK) return 0; else return -1; } static int write_acpi_int(acpi_handle handle, const char *method, int val) { return write_acpi_int_ret(handle, method, val, NULL); } static int acpi_check_handle(acpi_handle handle, const char *method, acpi_handle *ret) { acpi_status status; if (method == NULL) return -ENODEV; if (ret) status = acpi_get_handle(handle, (char *)method, ret); else { acpi_handle dummy; status = acpi_get_handle(handle, (char *)method, &dummy); } if (status != AE_OK) { if (ret) pr_warn("Error finding %s\n", method); return -ENODEV; } return 0; } /* Generic LED function */ static int asus_led_set(struct asus_laptop *asus, const char *method, int value) { if (!strcmp(method, METHOD_MLED)) value = !value; else if (!strcmp(method, METHOD_GLED)) value = !value + 1; else value = !!value; return write_acpi_int(asus->handle, method, value); } /* * LEDs */ /* /sys/class/led handlers */ static void asus_led_cdev_set(struct led_classdev *led_cdev, enum led_brightness value) { struct asus_led *led = container_of(led_cdev, struct asus_led, led); struct asus_laptop *asus = led->asus; led->wk = !!value; queue_work(asus->led_workqueue, &led->work); } static void asus_led_cdev_update(struct work_struct *work) { struct asus_led *led = container_of(work, struct asus_led, work); struct asus_laptop *asus = led->asus; asus_led_set(asus, led->method, led->wk); } static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev) { return led_cdev->brightness; } /* * Keyboard backlight (also a LED) */ static int asus_kled_lvl(struct asus_laptop *asus) { unsigned long long kblv; struct acpi_object_list params; union acpi_object in_obj; acpi_status rv; params.count = 1; params.pointer = &in_obj; in_obj.type = ACPI_TYPE_INTEGER; in_obj.integer.value = 2; rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET, &params, &kblv); if (ACPI_FAILURE(rv)) { pr_warn("Error reading kled level\n"); return -ENODEV; } return kblv; } static int asus_kled_set(struct asus_laptop *asus, int kblv) { if (kblv > 0) kblv = (1 << 7) | (kblv & 0x7F); else kblv = 0; if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) { pr_warn("Keyboard LED display write failed\n"); return -EINVAL; } return 0; } static void asus_kled_cdev_set(struct led_classdev *led_cdev, enum led_brightness value) { struct asus_led *led = container_of(led_cdev, struct asus_led, led); struct asus_laptop *asus = led->asus; led->wk = value; queue_work(asus->led_workqueue, &led->work); } static void asus_kled_cdev_update(struct work_struct *work) { struct asus_led *led = container_of(work, struct asus_led, work); struct asus_laptop *asus = led->asus; asus_kled_set(asus, led->wk); } static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev) { struct asus_led *led = container_of(led_cdev, struct asus_led, led); struct asus_laptop *asus = led->asus; return asus_kled_lvl(asus); } static void asus_led_exit(struct asus_laptop *asus) { if (asus->mled.led.dev) led_classdev_unregister(&asus->mled.led); if (asus->tled.led.dev) led_classdev_unregister(&asus->tled.led); if (asus->pled.led.dev) led_classdev_unregister(&asus->pled.led); if (asus->rled.led.dev) led_classdev_unregister(&asus->rled.led); if (asus->gled.led.dev) led_classdev_unregister(&asus->gled.led); if (asus->kled.led.dev) led_classdev_unregister(&asus->kled.led); if (asus->led_workqueue) { destroy_workqueue(asus->led_workqueue); asus->led_workqueue = NULL; } } /* Ugly macro, need to fix that later */ static int asus_led_register(struct asus_laptop *asus, struct asus_led *led, const char *name, const char *method) { struct led_classdev *led_cdev = &led->led; if (!method || acpi_check_handle(asus->handle, method, NULL)) return 0; /* Led not present */ led->asus = asus; led->method = method; INIT_WORK(&led->work, asus_led_cdev_update); led_cdev->name = name; led_cdev->brightness_set = asus_led_cdev_set; led_cdev->brightness_get = asus_led_cdev_get; led_cdev->max_brightness = 1; return led_classdev_register(&asus->platform_device->dev, led_cdev); } static int asus_led_init(struct asus_laptop *asus) { int r; /* * Functions that actually update the LED's are called from a * workqueue. By doing this as separate work rather than when the LED * subsystem asks, we avoid messing with the Asus ACPI stuff during a * potentially bad time, such as a timer interrupt. */ asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); if (!asus->led_workqueue) return -ENOMEM; r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED); if (r) goto error; r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED); if (r) goto error; r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED); if (r) goto error; r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED); if (r) goto error; r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED); if (r) goto error; if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) && !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) { struct asus_led *led = &asus->kled; struct led_classdev *cdev = &led->led; led->asus = asus; INIT_WORK(&led->work, asus_kled_cdev_update); cdev->name = "asus::kbd_backlight"; cdev->brightness_set = asus_kled_cdev_set; cdev->brightness_get = asus_kled_cdev_get; cdev->max_brightness = 3; r = led_classdev_register(&asus->platform_device->dev, cdev); } error: if (r) asus_led_exit(asus); return r; } /* * Backlight device */ static int asus_read_brightness(struct backlight_device *bd) { struct asus_laptop *asus = bl_get_data(bd); unsigned long long value; acpi_status rv = AE_OK; rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, NULL, &value); if (ACPI_FAILURE(rv)) pr_warn("Error reading brightness\n"); return value; } static int asus_set_brightness(struct backlight_device *bd, int value) { struct asus_laptop *asus = bl_get_data(bd); if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) { pr_warn("Error changing brightness\n"); return -EIO; } return 0; } static int update_bl_status(struct backlight_device *bd) { int value = bd->props.brightness; return asus_set_brightness(bd, value); } static const struct backlight_ops asusbl_ops = { .get_brightness = asus_read_brightness, .update_status = update_bl_status, }; static int asus_backlight_notify(struct asus_laptop *asus) { struct backlight_device *bd = asus->backlight_device; int old = bd->props.brightness; backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); return old; } static int asus_backlight_init(struct asus_laptop *asus) { struct backlight_device *bd; struct backlight_properties props; if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL)) return 0; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = 15; props.type = BACKLIGHT_PLATFORM; bd = backlight_device_register(ASUS_LAPTOP_FILE, &asus->platform_device->dev, asus, &asusbl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register asus backlight device\n"); asus->backlight_device = NULL; return PTR_ERR(bd); } asus->backlight_device = bd; bd->props.brightness = asus_read_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); return 0; } static void asus_backlight_exit(struct asus_laptop *asus) { if (asus->backlight_device) backlight_device_unregister(asus->backlight_device); asus->backlight_device = NULL; } /* * Platform device handlers */ /* * We write our info in page, we begin at offset off and cannot write more * than count bytes. We set eof to 1 if we handle those 2 values. We return the * number of bytes written in page */ static ssize_t show_infos(struct device *dev, struct device_attribute *attr, char *page) { struct asus_laptop *asus = dev_get_drvdata(dev); int len = 0; unsigned long long temp; char buf[16]; /* enough for all info */ acpi_status rv = AE_OK; /* * We use the easy way, we don't care of off and count, * so we don't set eof to 1 */ len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n"); len += sprintf(page + len, "Model reference : %s\n", asus->name); /* * The SFUN method probably allows the original driver to get the list * of features supported by a given model. For now, 0x0100 or 0x0800 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. * The significance of others is yet to be found. */ rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "SFUN value : %#x\n", (uint) temp); /* * The HWRS method return informations about the hardware. * 0x80 bit is for WLAN, 0x100 for Bluetooth. * 0x40 for WWAN, 0x10 for WIMAX. * The significance of others is yet to be found. * We don't currently use this for device detection, and it * takes several seconds to run on some systems. */ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "HWRS value : %#x\n", (uint) temp); /* * Another value for userspace: the ASYM method returns 0x02 for * battery low and 0x04 for battery critical, its readings tend to be * more accurate than those provided by _BST. * Note: since not all the laptops provide this method, errors are * silently ignored. */ rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "ASYM value : %#x\n", (uint) temp); if (asus->dsdt_info) { snprintf(buf, 16, "%d", asus->dsdt_info->length); len += sprintf(page + len, "DSDT length : %s\n", buf); snprintf(buf, 16, "%d", asus->dsdt_info->checksum); len += sprintf(page + len, "DSDT checksum : %s\n", buf); snprintf(buf, 16, "%d", asus->dsdt_info->revision); len += sprintf(page + len, "DSDT revision : %s\n", buf); snprintf(buf, 7, "%s", asus->dsdt_info->oem_id); len += sprintf(page + len, "OEM id : %s\n", buf); snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id); len += sprintf(page + len, "OEM table id : %s\n", buf); snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision); len += sprintf(page + len, "OEM revision : 0x%s\n", buf); snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id); len += sprintf(page + len, "ASL comp vendor id : %s\n", buf); snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision); len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf); } return len; } static int parse_arg(const char *buf, unsigned long count, int *val) { if (!count) return 0; if (count > 31) return -EINVAL; if (sscanf(buf, "%i", val) != 1) return -EINVAL; return count; } static ssize_t sysfs_acpi_set(struct asus_laptop *asus, const char *buf, size_t count, const char *method) { int rv, value; int out = 0; rv = parse_arg(buf, count, &value); if (rv > 0) out = value ? 1 : 0; if (write_acpi_int(asus->handle, method, value)) return -ENODEV; return rv; } /* * LEDD display */ static ssize_t show_ledd(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "0x%08x\n", asus->ledd_status); } static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) { if (write_acpi_int(asus->handle, METHOD_LEDD, value)) { pr_warn("LED display write failed\n"); return -ENODEV; } asus->ledd_status = (u32) value; } return rv; } /* * Wireless */ static int asus_wireless_status(struct asus_laptop *asus, int mask) { unsigned long long status; acpi_status rv = AE_OK; if (!asus->have_rsts) return (asus->wireless_status & mask) ? 1 : 0; rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS, NULL, &status); if (ACPI_FAILURE(rv)) { pr_warn("Error reading Wireless status\n"); return -EINVAL; } return !!(status & mask); } /* * WLAN */ static int asus_wlan_set(struct asus_laptop *asus, int status) { if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) { pr_warn("Error setting wlan status to %d\n", status); return -EIO; } return 0; } static ssize_t show_wlan(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS)); } static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WLAN); } /* * Bluetooth */ static int asus_bluetooth_set(struct asus_laptop *asus, int status) { if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) { pr_warn("Error setting bluetooth status to %d\n", status); return -EIO; } return 0; } static ssize_t show_bluetooth(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS)); } static ssize_t store_bluetooth(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH); } /* * Wimax */ static int asus_wimax_set(struct asus_laptop *asus, int status) { if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) { pr_warn("Error setting wimax status to %d\n", status); return -EIO; } return 0; } static ssize_t show_wimax(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); } static ssize_t store_wimax(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); } /* * Wwan */ static int asus_wwan_set(struct asus_laptop *asus, int status) { if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) { pr_warn("Error setting wwan status to %d\n", status); return -EIO; } return 0; } static ssize_t show_wwan(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); } static ssize_t store_wwan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); } /* * Display */ static void asus_set_display(struct asus_laptop *asus, int value) { /* no sanity check needed for now */ if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value)) pr_warn("Error setting display\n"); return; } /* * Experimental support for display switching. As of now: 1 should activate * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. * Any combination (bitwise) of these will suffice. I never actually tested 4 * displays hooked up simultaneously, so be warned. See the acpi4asus README * for more info. */ static ssize_t store_disp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) asus_set_display(asus, value); return rv; } /* * Light Sens */ static void asus_als_switch(struct asus_laptop *asus, int value) { if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value)) pr_warn("Error setting light sensor switch\n"); asus->light_switch = value; } static ssize_t show_lssw(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus->light_switch); } static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) asus_als_switch(asus, value ? 1 : 0); return rv; } static void asus_als_level(struct asus_laptop *asus, int value) { if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value)) pr_warn("Error setting light sensor level\n"); asus->light_level = value; } static ssize_t show_lslvl(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus->light_level); } static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) { value = (0 < value) ? ((15 < value) ? 15 : value) : 0; /* 0 <= value <= 15 */ asus_als_level(asus, value); } return rv; } /* * GPS */ static int asus_gps_status(struct asus_laptop *asus) { unsigned long long status; acpi_status rv = AE_OK; rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, NULL, &status); if (ACPI_FAILURE(rv)) { pr_warn("Error reading GPS status\n"); return -ENODEV; } return !!status; } static int asus_gps_switch(struct asus_laptop *asus, int status) { const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF; if (write_acpi_int(asus->handle, meth, 0x02)) return -ENODEV; return 0; } static ssize_t show_gps(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_gps_status(asus)); } static ssize_t store_gps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; int ret; rv = parse_arg(buf, count, &value); if (rv <= 0) return -EINVAL; ret = asus_gps_switch(asus, !!value); if (ret) return ret; rfkill_set_sw_state(asus->gps_rfkill, !value); return rv; } /* * rfkill */ static int asus_gps_rfkill_set(void *data, bool blocked) { struct asus_laptop *asus = data; return asus_gps_switch(asus, !blocked); } static const struct rfkill_ops asus_gps_rfkill_ops = { .set_block = asus_gps_rfkill_set, }; static void asus_rfkill_exit(struct asus_laptop *asus) { if (asus->gps_rfkill) { rfkill_unregister(asus->gps_rfkill); rfkill_destroy(asus->gps_rfkill); asus->gps_rfkill = NULL; } } static int asus_rfkill_init(struct asus_laptop *asus) { int result; if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) || acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) || acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) return 0; asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev, RFKILL_TYPE_GPS, &asus_gps_rfkill_ops, asus); if (!asus->gps_rfkill) return -EINVAL; result = rfkill_register(asus->gps_rfkill); if (result) { rfkill_destroy(asus->gps_rfkill); asus->gps_rfkill = NULL; } return result; } /* * Input device (i.e. hotkeys) */ static void asus_input_notify(struct asus_laptop *asus, int event) { if (asus->inputdev) sparse_keymap_report_event(asus->inputdev, event, 1, true); } static int asus_input_init(struct asus_laptop *asus) { struct input_dev *input; int error; input = input_allocate_device(); if (!input) { pr_info("Unable to allocate input device\n"); return -ENOMEM; } input->name = "Asus Laptop extra buttons"; input->phys = ASUS_LAPTOP_FILE "/input0"; input->id.bustype = BUS_HOST; input->dev.parent = &asus->platform_device->dev; error = sparse_keymap_setup(input, asus_keymap, NULL); if (error) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } error = input_register_device(input); if (error) { pr_info("Unable to register input device\n"); goto err_free_keymap; } asus->inputdev = input; return 0; err_free_keymap: sparse_keymap_free(input); err_free_dev: input_free_device(input); return error; } static void asus_input_exit(struct asus_laptop *asus) { if (asus->inputdev) { sparse_keymap_free(asus->inputdev); input_unregister_device(asus->inputdev); } asus->inputdev = NULL; } /* * ACPI driver */ static void asus_acpi_notify(struct acpi_device *device, u32 event) { struct asus_laptop *asus = acpi_driver_data(device); u16 count; /* TODO Find a better way to handle events count. */ count = asus->event_count[event % 128]++; acpi_bus_generate_proc_event(asus->device, event, count); acpi_bus_generate_netlink_event(asus->device->pnp.device_class, dev_name(&asus->device->dev), event, count); /* Brightness events are special */ if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) { /* Ignore them completely if the acpi video driver is used */ if (asus->backlight_device != NULL) { /* Update the backlight device. */ asus_backlight_notify(asus); } return ; } asus_input_notify(asus, event); } static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL); static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth, store_bluetooth); static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp); static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps); static struct attribute *asus_attributes[] = { &dev_attr_infos.attr, &dev_attr_wlan.attr, &dev_attr_bluetooth.attr, &dev_attr_wimax.attr, &dev_attr_wwan.attr, &dev_attr_display.attr, &dev_attr_ledd.attr, &dev_attr_ls_level.attr, &dev_attr_ls_switch.attr, &dev_attr_gps.attr, NULL }; static mode_t asus_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct asus_laptop *asus = platform_get_drvdata(pdev); acpi_handle handle = asus->handle; bool supported; if (attr == &dev_attr_wlan.attr) { supported = !acpi_check_handle(handle, METHOD_WLAN, NULL); } else if (attr == &dev_attr_bluetooth.attr) { supported = !acpi_check_handle(handle, METHOD_BLUETOOTH, NULL); } else if (attr == &dev_attr_display.attr) { supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL); } else if (attr == &dev_attr_wimax.attr) { supported = !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL); } else if (attr == &dev_attr_wwan.attr) { supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL); } else if (attr == &dev_attr_ledd.attr) { supported = !acpi_check_handle(handle, METHOD_LEDD, NULL); } else if (attr == &dev_attr_ls_switch.attr || attr == &dev_attr_ls_level.attr) { supported = !acpi_check_handle(handle, METHOD_ALS_CONTROL, NULL) && !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL); } else if (attr == &dev_attr_gps.attr) { supported = !acpi_check_handle(handle, METHOD_GPS_ON, NULL) && !acpi_check_handle(handle, METHOD_GPS_OFF, NULL) && !acpi_check_handle(handle, METHOD_GPS_STATUS, NULL); } else { supported = true; } return supported ? attr->mode : 0; } static const struct attribute_group asus_attr_group = { .is_visible = asus_sysfs_is_visible, .attrs = asus_attributes, }; static int asus_platform_init(struct asus_laptop *asus) { int result; asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1); if (!asus->platform_device) return -ENOMEM; platform_set_drvdata(asus->platform_device, asus); result = platform_device_add(asus->platform_device); if (result) goto fail_platform_device; result = sysfs_create_group(&asus->platform_device->dev.kobj, &asus_attr_group); if (result) goto fail_sysfs; return 0; fail_sysfs: platform_device_del(asus->platform_device); fail_platform_device: platform_device_put(asus->platform_device); return result; } static void asus_platform_exit(struct asus_laptop *asus) { sysfs_remove_group(&asus->platform_device->dev.kobj, &asus_attr_group); platform_device_unregister(asus->platform_device); } static struct platform_driver platform_driver = { .driver = { .name = ASUS_LAPTOP_FILE, .owner = THIS_MODULE, } }; /* * This function is used to initialize the context with right values. In this * method, we can make all the detection we want, and modify the asus_laptop * struct */ static int asus_laptop_get_info(struct asus_laptop *asus) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *model = NULL; unsigned long long bsts_result; char *string = NULL; acpi_status status; /* * Get DSDT headers early enough to allow for differentiating between * models, but late enough to allow acpi_bus_register_driver() to fail * before doing anything ACPI-specific. Should we encounter a machine, * which needs special handling (i.e. its hotkey device has a different * HID), this bit will be moved. */ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info); if (ACPI_FAILURE(status)) pr_warn("Couldn't get the DSDT table header\n"); /* We have to write 0 on init this far for all ASUS models */ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) { pr_err("Hotkey initialization failed\n"); return -ENODEV; } /* This needs to be called for some laptops to init properly */ status = acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result); if (ACPI_FAILURE(status)) pr_warn("Error calling BSTS\n"); else if (bsts_result) pr_notice("BSTS called, 0x%02x returned\n", (uint) bsts_result); /* This too ... */ if (write_acpi_int(asus->handle, "CWAP", wapf)) pr_err("Error calling CWAP(%d)\n", wapf); /* * Try to match the object returned by INIT to the specific model. * Handle every possible object (or the lack of thereof) the DSDT * writers might throw at us. When in trouble, we pass NULL to * asus_model_match() and try something completely different. */ if (buffer.pointer) { model = buffer.pointer; switch (model->type) { case ACPI_TYPE_STRING: string = model->string.pointer; break; case ACPI_TYPE_BUFFER: string = model->buffer.pointer; break; default: string = ""; break; } } asus->name = kstrdup(string, GFP_KERNEL); if (!asus->name) { kfree(buffer.pointer); return -ENOMEM; } if (*string) pr_notice(" %s model detected\n", string); if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; kfree(model); return AE_OK; } static int __devinit asus_acpi_init(struct asus_laptop *asus) { int result = 0; result = acpi_bus_get_status(asus->device); if (result) return result; if (!asus->device->status.present) { pr_err("Hotkey device not present, aborting\n"); return -ENODEV; } result = asus_laptop_get_info(asus); if (result) return result; /* WLED and BLED are on by default */ if (bluetooth_status >= 0) asus_bluetooth_set(asus, !!bluetooth_status); if (wlan_status >= 0) asus_wlan_set(asus, !!wlan_status); if (wimax_status >= 0) asus_wimax_set(asus, !!wimax_status); if (wwan_status >= 0) asus_wwan_set(asus, !!wwan_status); /* Keyboard Backlight is on by default */ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL)) asus_kled_set(asus, 1); /* LED display is off by default */ asus->ledd_status = 0xFFF; /* Set initial values of light sensor and level */ asus->light_switch = 0; /* Default to light sensor disabled */ asus->light_level = 5; /* level 5 for sensor sensitivity */ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) && !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) { asus_als_switch(asus, asus->light_switch); asus_als_level(asus, asus->light_level); } return result; } static void __devinit asus_dmi_check(void) { const char *model; model = dmi_get_system_info(DMI_PRODUCT_NAME); if (!model) return; /* On L1400B WLED control the sound card, don't mess with it ... */ if (strncmp(model, "L1400B", 6) == 0) { wlan_status = -1; } } static bool asus_device_present; static int __devinit asus_acpi_add(struct acpi_device *device) { struct asus_laptop *asus; int result; pr_notice("Asus Laptop Support version %s\n", ASUS_LAPTOP_VERSION); asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL); if (!asus) return -ENOMEM; asus->handle = device->handle; strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME); strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS); device->driver_data = asus; asus->device = device; asus_dmi_check(); result = asus_acpi_init(asus); if (result) goto fail_platform; /* * Register the platform device first. It is used as a parent for the * sub-devices below. */ result = asus_platform_init(asus); if (result) goto fail_platform; if (!acpi_video_backlight_support()) { result = asus_backlight_init(asus); if (result) goto fail_backlight; } else pr_info("Backlight controlled by ACPI video driver\n"); result = asus_input_init(asus); if (result) goto fail_input; result = asus_led_init(asus); if (result) goto fail_led; result = asus_rfkill_init(asus); if (result) goto fail_rfkill; asus_device_present = true; return 0; fail_rfkill: asus_led_exit(asus); fail_led: asus_input_exit(asus); fail_input: asus_backlight_exit(asus); fail_backlight: asus_platform_exit(asus); fail_platform: kfree(asus->name); kfree(asus); return result; } static int asus_acpi_remove(struct acpi_device *device, int type) { struct asus_laptop *asus = acpi_driver_data(device); asus_backlight_exit(asus); asus_rfkill_exit(asus); asus_led_exit(asus); asus_input_exit(asus); asus_platform_exit(asus); kfree(asus->name); kfree(asus); return 0; } static const struct acpi_device_id asus_device_ids[] = { {"ATK0100", 0}, {"ATK0101", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, asus_device_ids); static struct acpi_driver asus_acpi_driver = { .name = ASUS_LAPTOP_NAME, .class = ASUS_LAPTOP_CLASS, .owner = THIS_MODULE, .ids = asus_device_ids, .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, .ops = { .add = asus_acpi_add, .remove = asus_acpi_remove, .notify = asus_acpi_notify, }, }; static int __init asus_laptop_init(void) { int result; result = platform_driver_register(&platform_driver); if (result < 0) return result; result = acpi_bus_register_driver(&asus_acpi_driver); if (result < 0) goto fail_acpi_driver; if (!asus_device_present) { result = -ENODEV; goto fail_no_device; } return 0; fail_no_device: acpi_bus_unregister_driver(&asus_acpi_driver); fail_acpi_driver: platform_driver_unregister(&platform_driver); return result; } static void __exit asus_laptop_exit(void) { acpi_bus_unregister_driver(&asus_acpi_driver); platform_driver_unregister(&platform_driver); } module_init(asus_laptop_init); module_exit(asus_laptop_exit);
gpl-2.0
defer/semc-kernel-msm8x60
arch/arm/mach-davinci/irq.c
909
4437
/* * Interrupt handler for DaVinci boards. * * Copyright (C) 2006 Texas Instruments. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/cputype.h> #include <mach/common.h> #include <asm/mach/irq.h> #define IRQ_BIT(irq) ((irq) & 0x1f) #define FIQ_REG0_OFFSET 0x0000 #define FIQ_REG1_OFFSET 0x0004 #define IRQ_REG0_OFFSET 0x0008 #define IRQ_REG1_OFFSET 0x000C #define IRQ_ENT_REG0_OFFSET 0x0018 #define IRQ_ENT_REG1_OFFSET 0x001C #define IRQ_INCTL_REG_OFFSET 0x0020 #define IRQ_EABASE_REG_OFFSET 0x0024 #define IRQ_INTPRI0_REG_OFFSET 0x0030 #define IRQ_INTPRI7_REG_OFFSET 0x004C static inline unsigned int davinci_irq_readl(int offset) { return __raw_readl(davinci_intc_base + offset); } static inline void davinci_irq_writel(unsigned long value, int offset) { __raw_writel(value, davinci_intc_base + offset); } /* Disable interrupt */ static void davinci_mask_irq(unsigned int irq) { unsigned int mask; u32 l; mask = 1 << IRQ_BIT(irq); if (irq > 31) { l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET); l &= ~mask; davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET); } else { l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET); l &= ~mask; davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET); } } /* Enable interrupt */ static void davinci_unmask_irq(unsigned int irq) { unsigned int mask; u32 l; mask = 1 << IRQ_BIT(irq); if (irq > 31) { l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET); l |= mask; davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET); } else { l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET); l |= mask; davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET); } } /* EOI interrupt */ static void davinci_ack_irq(unsigned int irq) { unsigned int mask; mask = 1 << IRQ_BIT(irq); if (irq > 31) davinci_irq_writel(mask, IRQ_REG1_OFFSET); else davinci_irq_writel(mask, IRQ_REG0_OFFSET); } static struct irq_chip davinci_irq_chip_0 = { .name = "AINTC", .ack = davinci_ack_irq, .mask = davinci_mask_irq, .unmask = davinci_unmask_irq, }; /* ARM Interrupt Controller Initialization */ void __init davinci_irq_init(void) { unsigned i; const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios; davinci_intc_type = DAVINCI_INTC_TYPE_AINTC; davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_4K); if (WARN_ON(!davinci_intc_base)) return; /* Clear all interrupt requests */ davinci_irq_writel(~0x0, FIQ_REG0_OFFSET); davinci_irq_writel(~0x0, FIQ_REG1_OFFSET); davinci_irq_writel(~0x0, IRQ_REG0_OFFSET); davinci_irq_writel(~0x0, IRQ_REG1_OFFSET); /* Disable all interrupts */ davinci_irq_writel(0x0, IRQ_ENT_REG0_OFFSET); davinci_irq_writel(0x0, IRQ_ENT_REG1_OFFSET); /* Interrupts disabled immediately, IRQ entry reflects all */ davinci_irq_writel(0x0, IRQ_INCTL_REG_OFFSET); /* we don't use the hardware vector table, just its entry addresses */ davinci_irq_writel(0, IRQ_EABASE_REG_OFFSET); /* Clear all interrupt requests */ davinci_irq_writel(~0x0, FIQ_REG0_OFFSET); davinci_irq_writel(~0x0, FIQ_REG1_OFFSET); davinci_irq_writel(~0x0, IRQ_REG0_OFFSET); davinci_irq_writel(~0x0, IRQ_REG1_OFFSET); for (i = IRQ_INTPRI0_REG_OFFSET; i <= IRQ_INTPRI7_REG_OFFSET; i += 4) { unsigned j; u32 pri; for (j = 0, pri = 0; j < 32; j += 4, davinci_def_priorities++) pri |= (*davinci_def_priorities & 0x07) << j; davinci_irq_writel(pri, i); } /* set up genirq dispatch for ARM INTC */ for (i = 0; i < davinci_soc_info.intc_irq_num; i++) { set_irq_chip(i, &davinci_irq_chip_0); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); if (i != IRQ_TINT1_TINT34) set_irq_handler(i, handle_edge_irq); else set_irq_handler(i, handle_level_irq); } }
gpl-2.0
ErikAndren/linux
net/sched/ematch.c
1421
14961
/* * net/sched/ematch.c Extended Match API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * An extended match (ematch) is a small classification tool not worth * writing a full classifier for. Ematches can be interconnected to form * a logic expression and get attached to classifiers to extend their * functionatlity. * * The userspace part transforms the logic expressions into an array * consisting of multiple sequences of interconnected ematches separated * by markers. Precedence is implemented by a special ematch kind * referencing a sequence beyond the marker of the current sequence * causing the current position in the sequence to be pushed onto a stack * to allow the current position to be overwritten by the position referenced * in the special ematch. Matching continues in the new sequence until a * marker is reached causing the position to be restored from the stack. * * Example: * A AND (B1 OR B2) AND C AND D * * ------->-PUSH------- * -->-- / -->-- \ -->-- * / \ / / \ \ / \ * +-------+-------+-------+-------+-------+--------+ * | A AND | B AND | C AND | D END | B1 OR | B2 END | * +-------+-------+-------+-------+-------+--------+ * \ / * --------<-POP--------- * * where B is a virtual ematch referencing to sequence starting with B1. * * ========================================================================== * * How to write an ematch in 60 seconds * ------------------------------------ * * 1) Provide a matcher function: * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, * struct tcf_pkt_info *info) * { * struct mydata *d = (struct mydata *) m->data; * * if (...matching goes here...) * return 1; * else * return 0; * } * * 2) Fill out a struct tcf_ematch_ops: * static struct tcf_ematch_ops my_ops = { * .kind = unique id, * .datalen = sizeof(struct mydata), * .match = my_match, * .owner = THIS_MODULE, * }; * * 3) Register/Unregister your ematch: * static int __init init_my_ematch(void) * { * return tcf_em_register(&my_ops); * } * * static void __exit exit_my_ematch(void) * { * tcf_em_unregister(&my_ops); * } * * module_init(init_my_ematch); * module_exit(exit_my_ematch); * * 4) By now you should have two more seconds left, barely enough to * open up a beer to watch the compilation going. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> static LIST_HEAD(ematch_ops); static DEFINE_RWLOCK(ematch_mod_lock); static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) { struct tcf_ematch_ops *e = NULL; read_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) { if (kind == e->kind) { if (!try_module_get(e->owner)) e = NULL; read_unlock(&ematch_mod_lock); return e; } } read_unlock(&ematch_mod_lock); return NULL; } /** * tcf_em_register - register an extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their presence. * The given @ops must have kind set to a unique identifier and the * callback match() must be implemented. All other callbacks are optional * and a fallback implementation is used instead. * * Returns -EEXISTS if an ematch of the same kind has already registered. */ int tcf_em_register(struct tcf_ematch_ops *ops) { int err = -EEXIST; struct tcf_ematch_ops *e; if (ops->match == NULL) return -EINVAL; write_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) if (ops->kind == e->kind) goto errout; list_add_tail(&ops->link, &ematch_ops); err = 0; errout: write_unlock(&ematch_mod_lock); return err; } EXPORT_SYMBOL(tcf_em_register); /** * tcf_em_unregister - unregster and extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their disappearance * for examples when the module gets unloaded. The @ops parameter must be * the same as the one used for registration. * * Returns -ENOENT if no matching ematch was found. */ void tcf_em_unregister(struct tcf_ematch_ops *ops) { write_lock(&ematch_mod_lock); list_del(&ops->link); write_unlock(&ematch_mod_lock); } EXPORT_SYMBOL(tcf_em_unregister); static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, int index) { return &tree->matches[index]; } static int tcf_em_validate(struct tcf_proto *tp, struct tcf_ematch_tree_hdr *tree_hdr, struct tcf_ematch *em, struct nlattr *nla, int idx) { int err = -EINVAL; struct tcf_ematch_hdr *em_hdr = nla_data(nla); int data_len = nla_len(nla) - sizeof(*em_hdr); void *data = (void *) em_hdr + sizeof(*em_hdr); struct net *net = dev_net(qdisc_dev(tp->q)); if (!TCF_EM_REL_VALID(em_hdr->flags)) goto errout; if (em_hdr->kind == TCF_EM_CONTAINER) { /* Special ematch called "container", carries an index * referencing an external ematch sequence. */ u32 ref; if (data_len < sizeof(ref)) goto errout; ref = *(u32 *) data; if (ref >= tree_hdr->nmatches) goto errout; /* We do not allow backward jumps to avoid loops and jumps * to our own position are of course illegal. */ if (ref <= idx) goto errout; em->data = ref; } else { /* Note: This lookup will increase the module refcnt * of the ematch module referenced. In case of a failure, * a destroy function is called by the underlying layer * which automatically releases the reference again, therefore * the module MUST not be given back under any circumstances * here. Be aware, the destroy function assumes that the * module is held if the ops field is non zero. */ em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops == NULL) { err = -ENOENT; #ifdef CONFIG_MODULES __rtnl_unlock(); request_module("ematch-kind-%u", em_hdr->kind); rtnl_lock(); em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops) { /* We dropped the RTNL mutex in order to * perform the module load. Tell the caller * to replay the request. */ module_put(em->ops->owner); em->ops = NULL; err = -EAGAIN; } #endif goto errout; } /* ematch module provides expected length of data, so we * can do a basic sanity check. */ if (em->ops->datalen && data_len < em->ops->datalen) goto errout; if (em->ops->change) { err = em->ops->change(net, data, data_len, em); if (err < 0) goto errout; } else if (data_len > 0) { /* ematch module doesn't provide an own change * procedure and expects us to allocate and copy * the ematch data. * * TCF_EM_SIMPLE may be specified stating that the * data only consists of a u32 integer and the module * does not expected a memory reference but rather * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; } else { void *v = kmemdup(data, data_len, GFP_KERNEL); if (v == NULL) { err = -ENOBUFS; goto errout; } em->data = (unsigned long) v; } } } em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; em->datalen = data_len; em->net = net; err = 0; errout: return err; } static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = { [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) }, [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED }, }; /** * tcf_em_tree_validate - validate ematch config TLV and build ematch tree * * @tp: classifier kind handle * @nla: ematch tree configuration TLV * @tree: destination ematch tree variable to store the resulting * ematch tree. * * This function validates the given configuration TLV @nla and builds an * ematch tree in @tree. The resulting tree must later be copied into * the private classifier data using tcf_em_tree_change(). You MUST NOT * provide the ematch tree variable of the private classifier data directly, * the changes would not be locked properly. * * Returns a negative error code if the configuration TLV contains errors. */ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, struct tcf_ematch_tree *tree) { int idx, list_len, matches_len, err; struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1]; struct nlattr *rt_match, *rt_hdr, *rt_list; struct tcf_ematch_tree_hdr *tree_hdr; struct tcf_ematch *em; memset(tree, 0, sizeof(*tree)); if (!nla) return 0; err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy); if (err < 0) goto errout; err = -EINVAL; rt_hdr = tb[TCA_EMATCH_TREE_HDR]; rt_list = tb[TCA_EMATCH_TREE_LIST]; if (rt_hdr == NULL || rt_list == NULL) goto errout; tree_hdr = nla_data(rt_hdr); memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr)); rt_match = nla_data(rt_list); list_len = nla_len(rt_list); matches_len = tree_hdr->nmatches * sizeof(*em); tree->matches = kzalloc(matches_len, GFP_KERNEL); if (tree->matches == NULL) goto errout; /* We do not use nla_parse_nested here because the maximum * number of attributes is unknown. This saves us the allocation * for a tb buffer which would serve no purpose at all. * * The array of rt attributes is parsed in the order as they are * provided, their type must be incremental from 1 to n. Even * if it does not serve any real purpose, a failure of sticking * to this policy will result in parsing failure. */ for (idx = 0; nla_ok(rt_match, list_len); idx++) { err = -EINVAL; if (rt_match->nla_type != (idx + 1)) goto errout_abort; if (idx >= tree_hdr->nmatches) goto errout_abort; if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr)) goto errout_abort; em = tcf_em_get_match(tree, idx); err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx); if (err < 0) goto errout_abort; rt_match = nla_next(rt_match, &list_len); } /* Check if the number of matches provided by userspace actually * complies with the array of matches. The number was used for * the validation of references and a mismatch could lead to * undefined references during the matching process. */ if (idx != tree_hdr->nmatches) { err = -EINVAL; goto errout_abort; } err = 0; errout: return err; errout_abort: tcf_em_tree_destroy(tree); return err; } EXPORT_SYMBOL(tcf_em_tree_validate); /** * tcf_em_tree_destroy - destroy an ematch tree * * @tp: classifier kind handle * @tree: ematch tree to be deleted * * This functions destroys an ematch tree previously created by * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that * the ematch tree is not in use before calling this function. */ void tcf_em_tree_destroy(struct tcf_ematch_tree *tree) { int i; if (tree->matches == NULL) return; for (i = 0; i < tree->hdr.nmatches; i++) { struct tcf_ematch *em = tcf_em_get_match(tree, i); if (em->ops) { if (em->ops->destroy) em->ops->destroy(em); else if (!tcf_em_is_simple(em)) kfree((void *) em->data); module_put(em->ops->owner); } } tree->hdr.nmatches = 0; kfree(tree->matches); tree->matches = NULL; } EXPORT_SYMBOL(tcf_em_tree_destroy); /** * tcf_em_tree_dump - dump ematch tree into a rtnl message * * @skb: skb holding the rtnl message * @t: ematch tree to be dumped * @tlv: TLV type to be used to encapsulate the tree * * This function dumps a ematch tree into a rtnl message. It is valid to * call this function while the ematch tree is in use. * * Returns -1 if the skb tailroom is insufficient. */ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) { int i; u8 *tail; struct nlattr *top_start; struct nlattr *list_start; top_start = nla_nest_start(skb, tlv); if (top_start == NULL) goto nla_put_failure; if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr)) goto nla_put_failure; list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) goto nla_put_failure; tail = skb_tail_pointer(skb); for (i = 0; i < tree->hdr.nmatches; i++) { struct nlattr *match_start = (struct nlattr *)tail; struct tcf_ematch *em = tcf_em_get_match(tree, i); struct tcf_ematch_hdr em_hdr = { .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, .matchid = em->matchid, .flags = em->flags }; if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr)) goto nla_put_failure; if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) goto nla_put_failure; } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) { u32 u = em->data; nla_put_nohdr(skb, sizeof(u), &u); } else if (em->datalen > 0) nla_put_nohdr(skb, em->datalen, (void *) em->data); tail = skb_tail_pointer(skb); match_start->nla_len = tail - (u8 *)match_start; } nla_nest_end(skb, list_start); nla_nest_end(skb, top_start); return 0; nla_put_failure: return -1; } EXPORT_SYMBOL(tcf_em_tree_dump); static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { int r = em->ops->match(skb, em, info); return tcf_em_is_inverted(em) ? !r : r; } /* Do not use this function directly, use tcf_em_tree_match instead */ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, struct tcf_pkt_info *info) { int stackp = 0, match_idx = 0, res = 0; struct tcf_ematch *cur_match; int stack[CONFIG_NET_EMATCH_STACK]; proceed: while (match_idx < tree->hdr.nmatches) { cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_is_container(cur_match)) { if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK)) goto stack_overflow; stack[stackp++] = match_idx; match_idx = cur_match->data; goto proceed; } res = tcf_em_match(skb, cur_match, info); if (tcf_em_early_end(cur_match, res)) break; match_idx++; } pop_stack: if (stackp > 0) { match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_is_inverted(cur_match)) res = !res; if (tcf_em_early_end(cur_match, res)) { goto pop_stack; } else { match_idx++; goto proceed; } } return res; stack_overflow: net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match);
gpl-2.0
linux-pmfs/pmfs
arch/arm/mach-at91/at91sam9g45_devices.c
2189
52381
/* * On-Chip devices setup code for the AT91SAM9G45 family * * Copyright (C) 2009 Atmel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/i2c-gpio.h> #include <linux/atmel-mci.h> #include <linux/platform_data/crypto-atmel.h> #include <linux/platform_data/at91_adc.h> #include <linux/fb.h> #include <video/atmel_lcdc.h> #include <mach/at91_adc.h> #include <mach/at91sam9g45.h> #include <mach/at91sam9g45_matrix.h> #include <mach/at91_matrix.h> #include <mach/at91sam9_smc.h> #include <linux/platform_data/dma-atmel.h> #include <mach/atmel-mci.h> #include <media/atmel-isi.h> #include "board.h" #include "generic.h" #include "clock.h" /* -------------------------------------------------------------------- * HDMAC - AHB DMA Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) static u64 hdmac_dmamask = DMA_BIT_MASK(32); static struct resource hdmac_resources[] = { [0] = { .start = AT91SAM9G45_BASE_DMA, .end = AT91SAM9G45_BASE_DMA + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_DMA, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_DMA, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at_hdmac_device = { .name = "at91sam9g45_dma", .id = -1, .dev = { .dma_mask = &hdmac_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = hdmac_resources, .num_resources = ARRAY_SIZE(hdmac_resources), }; void __init at91_add_device_hdmac(void) { platform_device_register(&at_hdmac_device); } #else void __init at91_add_device_hdmac(void) {} #endif /* -------------------------------------------------------------------- * USB Host (OHCI) * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static u64 ohci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_ohci_data; static struct resource usbh_ohci_resources[] = { [0] = { .start = AT91SAM9G45_OHCI_BASE, .end = AT91SAM9G45_OHCI_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_usbh_ohci_device = { .name = "at91_ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_ohci_data, }, .resource = usbh_ohci_resources, .num_resources = ARRAY_SIZE(usbh_ohci_resources), }; void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data) { int i; if (!data) return; /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) at91_set_gpio_output(data->vbus_pin[i], data->vbus_pin_active_low[i]); } /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } usbh_ohci_data = *data; platform_device_register(&at91_usbh_ohci_device); } #else void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB Host HS (EHCI) * Needs an OHCI host for low and full speed management * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE) static u64 ehci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_ehci_data; static struct resource usbh_ehci_resources[] = { [0] = { .start = AT91SAM9G45_EHCI_BASE, .end = AT91SAM9G45_EHCI_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_usbh_ehci_device = { .name = "atmel-ehci", .id = -1, .dev = { .dma_mask = &ehci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_ehci_data, }, .resource = usbh_ehci_resources, .num_resources = ARRAY_SIZE(usbh_ehci_resources), }; void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) { int i; if (!data) return; /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) at91_set_gpio_output(data->vbus_pin[i], data->vbus_pin_active_low[i]); } usbh_ehci_data = *data; platform_device_register(&at91_usbh_ehci_device); } #else void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB HS Device (Gadget) * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE) static struct resource usba_udc_resources[] = { [0] = { .start = AT91SAM9G45_UDPHS_FIFO, .end = AT91SAM9G45_UDPHS_FIFO + SZ_512K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9G45_BASE_UDPHS, .end = AT91SAM9G45_BASE_UDPHS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, [2] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UDPHS, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UDPHS, .flags = IORESOURCE_IRQ, }, }; #define EP(nam, idx, maxpkt, maxbk, dma, isoc) \ [idx] = { \ .name = nam, \ .index = idx, \ .fifo_size = maxpkt, \ .nr_banks = maxbk, \ .can_dma = dma, \ .can_isoc = isoc, \ } static struct usba_ep_data usba_udc_ep[] __initdata = { EP("ep0", 0, 64, 1, 0, 0), EP("ep1", 1, 1024, 2, 1, 1), EP("ep2", 2, 1024, 2, 1, 1), EP("ep3", 3, 1024, 3, 1, 0), EP("ep4", 4, 1024, 3, 1, 0), EP("ep5", 5, 1024, 3, 1, 1), EP("ep6", 6, 1024, 3, 1, 1), }; #undef EP /* * pdata doesn't have room for any endpoints, so we need to * append room for the ones we need right after it. */ static struct { struct usba_platform_data pdata; struct usba_ep_data ep[7]; } usba_udc_data; static struct platform_device at91_usba_udc_device = { .name = "atmel_usba_udc", .id = -1, .dev = { .platform_data = &usba_udc_data.pdata, }, .resource = usba_udc_resources, .num_resources = ARRAY_SIZE(usba_udc_resources), }; void __init at91_add_device_usba(struct usba_platform_data *data) { usba_udc_data.pdata.vbus_pin = -EINVAL; usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep); memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep)); if (data && gpio_is_valid(data->vbus_pin)) { at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); usba_udc_data.pdata.vbus_pin = data->vbus_pin; } /* Pullup pin is handled internally by USB device peripheral */ platform_device_register(&at91_usba_udc_device); } #else void __init at91_add_device_usba(struct usba_platform_data *data) {} #endif /* -------------------------------------------------------------------- * Ethernet * -------------------------------------------------------------------- */ #if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) static u64 eth_dmamask = DMA_BIT_MASK(32); static struct macb_platform_data eth_data; static struct resource eth_resources[] = { [0] = { .start = AT91SAM9G45_BASE_EMAC, .end = AT91SAM9G45_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_EMAC, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_EMAC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_eth_device = { .name = "macb", .id = -1, .dev = { .dma_mask = &eth_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &eth_data, }, .resource = eth_resources, .num_resources = ARRAY_SIZE(eth_resources), }; void __init at91_add_device_eth(struct macb_platform_data *data) { if (!data) return; if (gpio_is_valid(data->phy_irq_pin)) { at91_set_gpio_input(data->phy_irq_pin, 0); at91_set_deglitch(data->phy_irq_pin, 1); } /* Pins used for MII and RMII */ at91_set_A_periph(AT91_PIN_PA17, 0); /* ETXCK_EREFCK */ at91_set_A_periph(AT91_PIN_PA15, 0); /* ERXDV */ at91_set_A_periph(AT91_PIN_PA12, 0); /* ERX0 */ at91_set_A_periph(AT91_PIN_PA13, 0); /* ERX1 */ at91_set_A_periph(AT91_PIN_PA16, 0); /* ERXER */ at91_set_A_periph(AT91_PIN_PA14, 0); /* ETXEN */ at91_set_A_periph(AT91_PIN_PA10, 0); /* ETX0 */ at91_set_A_periph(AT91_PIN_PA11, 0); /* ETX1 */ at91_set_A_periph(AT91_PIN_PA19, 0); /* EMDIO */ at91_set_A_periph(AT91_PIN_PA18, 0); /* EMDC */ if (!data->is_rmii) { at91_set_B_periph(AT91_PIN_PA29, 0); /* ECRS */ at91_set_B_periph(AT91_PIN_PA30, 0); /* ECOL */ at91_set_B_periph(AT91_PIN_PA8, 0); /* ERX2 */ at91_set_B_periph(AT91_PIN_PA9, 0); /* ERX3 */ at91_set_B_periph(AT91_PIN_PA28, 0); /* ERXCK */ at91_set_B_periph(AT91_PIN_PA6, 0); /* ETX2 */ at91_set_B_periph(AT91_PIN_PA7, 0); /* ETX3 */ at91_set_B_periph(AT91_PIN_PA27, 0); /* ETXER */ } eth_data = *data; platform_device_register(&at91sam9g45_eth_device); } #else void __init at91_add_device_eth(struct macb_platform_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct mci_platform_data mmc0_data, mmc1_data; static struct resource mmc0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_MCI0, .end = AT91SAM9G45_BASE_MCI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI0, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_mmc0_device = { .name = "atmel_mci", .id = 0, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc0_data, }, .resource = mmc0_resources, .num_resources = ARRAY_SIZE(mmc0_resources), }; static struct resource mmc1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_MCI1, .end = AT91SAM9G45_BASE_MCI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI1, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_mmc1_device = { .name = "atmel_mci", .id = 1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc1_data, }, .resource = mmc1_resources, .num_resources = ARRAY_SIZE(mmc1_resources), }; /* Consider only one slot : slot 0 */ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) { if (!data) return; /* Must have at least one usable slot */ if (!data->slot[0].bus_width) return; #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) { struct at_dma_slave *atslave; struct mci_dma_data *alt_atslave; alt_atslave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL); atslave = &alt_atslave->sdata; /* DMA slave channel configuration */ atslave->dma_dev = &at_hdmac_device.dev; atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; if (mmc_id == 0) /* MCI0 */ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0) | ATC_DST_PER(AT_DMA_ID_MCI0); else /* MCI1 */ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI1) | ATC_DST_PER(AT_DMA_ID_MCI1); data->dma_slave = alt_atslave; } #endif /* input/irq */ if (gpio_is_valid(data->slot[0].detect_pin)) { at91_set_gpio_input(data->slot[0].detect_pin, 1); at91_set_deglitch(data->slot[0].detect_pin, 1); } if (gpio_is_valid(data->slot[0].wp_pin)) at91_set_gpio_input(data->slot[0].wp_pin, 1); if (mmc_id == 0) { /* MCI0 */ /* CLK */ at91_set_A_periph(AT91_PIN_PA0, 0); /* CMD */ at91_set_A_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */ at91_set_A_periph(AT91_PIN_PA2, 1); if (data->slot[0].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA3, 1); at91_set_A_periph(AT91_PIN_PA4, 1); at91_set_A_periph(AT91_PIN_PA5, 1); if (data->slot[0].bus_width == 8) { at91_set_A_periph(AT91_PIN_PA6, 1); at91_set_A_periph(AT91_PIN_PA7, 1); at91_set_A_periph(AT91_PIN_PA8, 1); at91_set_A_periph(AT91_PIN_PA9, 1); } } mmc0_data = *data; platform_device_register(&at91sam9g45_mmc0_device); } else { /* MCI1 */ /* CLK */ at91_set_A_periph(AT91_PIN_PA31, 0); /* CMD */ at91_set_A_periph(AT91_PIN_PA22, 1); /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */ at91_set_A_periph(AT91_PIN_PA23, 1); if (data->slot[0].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA24, 1); at91_set_A_periph(AT91_PIN_PA25, 1); at91_set_A_periph(AT91_PIN_PA26, 1); if (data->slot[0].bus_width == 8) { at91_set_A_periph(AT91_PIN_PA27, 1); at91_set_A_periph(AT91_PIN_PA28, 1); at91_set_A_periph(AT91_PIN_PA29, 1); at91_set_A_periph(AT91_PIN_PA30, 1); } } mmc1_data = *data; platform_device_register(&at91sam9g45_mmc1_device); } } #else void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {} #endif /* -------------------------------------------------------------------- * NAND / SmartMedia * -------------------------------------------------------------------- */ #if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE) static struct atmel_nand_data nand_data; #define NAND_BASE AT91_CHIPSELECT_3 static struct resource nand_resources[] = { [0] = { .start = NAND_BASE, .end = NAND_BASE + SZ_256M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9G45_BASE_ECC, .end = AT91SAM9G45_BASE_ECC + SZ_512 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9g45_nand_device = { .name = "atmel_nand", .id = -1, .dev = { .platform_data = &nand_data, }, .resource = nand_resources, .num_resources = ARRAY_SIZE(nand_resources), }; void __init at91_add_device_nand(struct atmel_nand_data *data) { unsigned long csa; if (!data) return; csa = at91_matrix_read(AT91_MATRIX_EBICSA); at91_matrix_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA); /* enable pin */ if (gpio_is_valid(data->enable_pin)) at91_set_gpio_output(data->enable_pin, 1); /* ready/busy pin */ if (gpio_is_valid(data->rdy_pin)) at91_set_gpio_input(data->rdy_pin, 1); /* card detect pin */ if (gpio_is_valid(data->det_pin)) at91_set_gpio_input(data->det_pin, 1); nand_data = *data; platform_device_register(&at91sam9g45_nand_device); } #else void __init at91_add_device_nand(struct atmel_nand_data *data) {} #endif /* -------------------------------------------------------------------- * TWI (i2c) * -------------------------------------------------------------------- */ /* * Prefer the GPIO code since the TWI controller isn't robust * (gets overruns and underruns under load) and can only issue * repeated STARTs in one scenario (the driver doesn't yet handle them). */ #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) static struct i2c_gpio_platform_data pdata_i2c0 = { .sda_pin = AT91_PIN_PA20, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PA21, .scl_is_open_drain = 1, .udelay = 5, /* ~100 kHz */ }; static struct platform_device at91sam9g45_twi0_device = { .name = "i2c-gpio", .id = 0, .dev.platform_data = &pdata_i2c0, }; static struct i2c_gpio_platform_data pdata_i2c1 = { .sda_pin = AT91_PIN_PB10, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PB11, .scl_is_open_drain = 1, .udelay = 5, /* ~100 kHz */ }; static struct platform_device at91sam9g45_twi1_device = { .name = "i2c-gpio", .id = 1, .dev.platform_data = &pdata_i2c1, }; void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices) { i2c_register_board_info(i2c_id, devices, nr_devices); if (i2c_id == 0) { at91_set_GPIO_periph(AT91_PIN_PA20, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PA20, 1); at91_set_GPIO_periph(AT91_PIN_PA21, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PA21, 1); platform_device_register(&at91sam9g45_twi0_device); } else { at91_set_GPIO_periph(AT91_PIN_PB10, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PB10, 1); at91_set_GPIO_periph(AT91_PIN_PB11, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PB11, 1); platform_device_register(&at91sam9g45_twi1_device); } } #elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE) static struct resource twi0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TWI0, .end = AT91SAM9G45_BASE_TWI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI0, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_twi0_device = { .name = "i2c-at91sam9g10", .id = 0, .resource = twi0_resources, .num_resources = ARRAY_SIZE(twi0_resources), }; static struct resource twi1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TWI1, .end = AT91SAM9G45_BASE_TWI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI1, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_twi1_device = { .name = "i2c-at91sam9g10", .id = 1, .resource = twi1_resources, .num_resources = ARRAY_SIZE(twi1_resources), }; void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices) { i2c_register_board_info(i2c_id, devices, nr_devices); /* pins used for TWI interface */ if (i2c_id == 0) { at91_set_A_periph(AT91_PIN_PA20, 0); /* TWD */ at91_set_A_periph(AT91_PIN_PA21, 0); /* TWCK */ platform_device_register(&at91sam9g45_twi0_device); } else { at91_set_A_periph(AT91_PIN_PB10, 0); /* TWD */ at91_set_A_periph(AT91_PIN_PB11, 0); /* TWCK */ platform_device_register(&at91sam9g45_twi1_device); } } #else void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * SPI * -------------------------------------------------------------------- */ #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) static u64 spi_dmamask = DMA_BIT_MASK(32); static struct resource spi0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_SPI0, .end = AT91SAM9G45_BASE_SPI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI0, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_spi0_device = { .name = "atmel_spi", .id = 0, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi0_resources, .num_resources = ARRAY_SIZE(spi0_resources), }; static const unsigned spi0_standard_cs[4] = { AT91_PIN_PB3, AT91_PIN_PB18, AT91_PIN_PB19, AT91_PIN_PD27 }; static struct resource spi1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_SPI1, .end = AT91SAM9G45_BASE_SPI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI1, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_spi1_device = { .name = "atmel_spi", .id = 1, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi1_resources, .num_resources = ARRAY_SIZE(spi1_resources), }; static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB17, AT91_PIN_PD28, AT91_PIN_PD18, AT91_PIN_PD19 }; void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) { int i; unsigned long cs_pin; short enable_spi0 = 0; short enable_spi1 = 0; /* Choose SPI chip-selects */ for (i = 0; i < nr_devices; i++) { if (devices[i].controller_data) cs_pin = (unsigned long) devices[i].controller_data; else if (devices[i].bus_num == 0) cs_pin = spi0_standard_cs[devices[i].chip_select]; else cs_pin = spi1_standard_cs[devices[i].chip_select]; if (!gpio_is_valid(cs_pin)) continue; if (devices[i].bus_num == 0) enable_spi0 = 1; else enable_spi1 = 1; /* enable chip-select pin */ at91_set_gpio_output(cs_pin, 1); /* pass chip-select pin to driver */ devices[i].controller_data = (void *) cs_pin; } spi_register_board_info(devices, nr_devices); /* Configure SPI bus(es) */ if (enable_spi0) { at91_set_A_periph(AT91_PIN_PB0, 0); /* SPI0_MISO */ at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */ at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */ platform_device_register(&at91sam9g45_spi0_device); } if (enable_spi1) { at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_MISO */ at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */ at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */ platform_device_register(&at91sam9g45_spi1_device); } } #else void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * AC97 * -------------------------------------------------------------------- */ #if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE) static u64 ac97_dmamask = DMA_BIT_MASK(32); static struct ac97c_platform_data ac97_data; static struct resource ac97_resources[] = { [0] = { .start = AT91SAM9G45_BASE_AC97C, .end = AT91SAM9G45_BASE_AC97C + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AC97C, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AC97C, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_ac97_device = { .name = "atmel_ac97c", .id = 0, .dev = { .dma_mask = &ac97_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &ac97_data, }, .resource = ac97_resources, .num_resources = ARRAY_SIZE(ac97_resources), }; void __init at91_add_device_ac97(struct ac97c_platform_data *data) { if (!data) return; at91_set_A_periph(AT91_PIN_PD8, 0); /* AC97FS */ at91_set_A_periph(AT91_PIN_PD9, 0); /* AC97CK */ at91_set_A_periph(AT91_PIN_PD7, 0); /* AC97TX */ at91_set_A_periph(AT91_PIN_PD6, 0); /* AC97RX */ /* reset */ if (gpio_is_valid(data->reset_pin)) at91_set_gpio_output(data->reset_pin, 0); ac97_data = *data; platform_device_register(&at91sam9g45_ac97_device); } #else void __init at91_add_device_ac97(struct ac97c_platform_data *data) {} #endif /* -------------------------------------------------------------------- * Image Sensor Interface * -------------------------------------------------------------------- */ #if defined(CONFIG_VIDEO_ATMEL_ISI) || defined(CONFIG_VIDEO_ATMEL_ISI_MODULE) static u64 isi_dmamask = DMA_BIT_MASK(32); static struct isi_platform_data isi_data; struct resource isi_resources[] = { [0] = { .start = AT91SAM9G45_BASE_ISI, .end = AT91SAM9G45_BASE_ISI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_ISI, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_ISI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_isi_device = { .name = "atmel_isi", .id = 0, .dev = { .dma_mask = &isi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &isi_data, }, .resource = isi_resources, .num_resources = ARRAY_SIZE(isi_resources), }; static struct clk_lookup isi_mck_lookups[] = { CLKDEV_CON_DEV_ID("isi_mck", "atmel_isi.0", NULL), }; void __init at91_add_device_isi(struct isi_platform_data *data, bool use_pck_as_mck) { struct clk *pck; struct clk *parent; if (!data) return; isi_data = *data; at91_set_A_periph(AT91_PIN_PB20, 0); /* ISI_D0 */ at91_set_A_periph(AT91_PIN_PB21, 0); /* ISI_D1 */ at91_set_A_periph(AT91_PIN_PB22, 0); /* ISI_D2 */ at91_set_A_periph(AT91_PIN_PB23, 0); /* ISI_D3 */ at91_set_A_periph(AT91_PIN_PB24, 0); /* ISI_D4 */ at91_set_A_periph(AT91_PIN_PB25, 0); /* ISI_D5 */ at91_set_A_periph(AT91_PIN_PB26, 0); /* ISI_D6 */ at91_set_A_periph(AT91_PIN_PB27, 0); /* ISI_D7 */ at91_set_A_periph(AT91_PIN_PB28, 0); /* ISI_PCK */ at91_set_A_periph(AT91_PIN_PB30, 0); /* ISI_HSYNC */ at91_set_A_periph(AT91_PIN_PB29, 0); /* ISI_VSYNC */ at91_set_B_periph(AT91_PIN_PB8, 0); /* ISI_PD8 */ at91_set_B_periph(AT91_PIN_PB9, 0); /* ISI_PD9 */ at91_set_B_periph(AT91_PIN_PB10, 0); /* ISI_PD10 */ at91_set_B_periph(AT91_PIN_PB11, 0); /* ISI_PD11 */ platform_device_register(&at91sam9g45_isi_device); if (use_pck_as_mck) { at91_set_B_periph(AT91_PIN_PB31, 0); /* ISI_MCK (PCK1) */ pck = clk_get(NULL, "pck1"); parent = clk_get(NULL, "plla"); BUG_ON(IS_ERR(pck) || IS_ERR(parent)); if (clk_set_parent(pck, parent)) { pr_err("Failed to set PCK's parent\n"); } else { /* Register PCK as ISI_MCK */ isi_mck_lookups[0].clk = pck; clkdev_add_table(isi_mck_lookups, ARRAY_SIZE(isi_mck_lookups)); } clk_put(pck); clk_put(parent); } } #else void __init at91_add_device_isi(struct isi_platform_data *data, bool use_pck_as_mck) {} #endif /* -------------------------------------------------------------------- * LCD Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); static struct atmel_lcdfb_info lcdc_data; static struct resource lcdc_resources[] = { [0] = { .start = AT91SAM9G45_LCDC_BASE, .end = AT91SAM9G45_LCDC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_LCDC, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_LCDC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_lcdc_device = { .id = 0, .dev = { .dma_mask = &lcdc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &lcdc_data, }, .resource = lcdc_resources, .num_resources = ARRAY_SIZE(lcdc_resources), }; void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) { if (!data) return; if (cpu_is_at91sam9g45es()) at91_lcdc_device.name = "at91sam9g45es-lcdfb"; else at91_lcdc_device.name = "at91sam9g45-lcdfb"; at91_set_A_periph(AT91_PIN_PE0, 0); /* LCDDPWR */ at91_set_A_periph(AT91_PIN_PE2, 0); /* LCDCC */ at91_set_A_periph(AT91_PIN_PE3, 0); /* LCDVSYNC */ at91_set_A_periph(AT91_PIN_PE4, 0); /* LCDHSYNC */ at91_set_A_periph(AT91_PIN_PE5, 0); /* LCDDOTCK */ at91_set_A_periph(AT91_PIN_PE6, 0); /* LCDDEN */ at91_set_A_periph(AT91_PIN_PE7, 0); /* LCDD0 */ at91_set_A_periph(AT91_PIN_PE8, 0); /* LCDD1 */ at91_set_A_periph(AT91_PIN_PE9, 0); /* LCDD2 */ at91_set_A_periph(AT91_PIN_PE10, 0); /* LCDD3 */ at91_set_A_periph(AT91_PIN_PE11, 0); /* LCDD4 */ at91_set_A_periph(AT91_PIN_PE12, 0); /* LCDD5 */ at91_set_A_periph(AT91_PIN_PE13, 0); /* LCDD6 */ at91_set_A_periph(AT91_PIN_PE14, 0); /* LCDD7 */ at91_set_A_periph(AT91_PIN_PE15, 0); /* LCDD8 */ at91_set_A_periph(AT91_PIN_PE16, 0); /* LCDD9 */ at91_set_A_periph(AT91_PIN_PE17, 0); /* LCDD10 */ at91_set_A_periph(AT91_PIN_PE18, 0); /* LCDD11 */ at91_set_A_periph(AT91_PIN_PE19, 0); /* LCDD12 */ at91_set_A_periph(AT91_PIN_PE20, 0); /* LCDD13 */ at91_set_A_periph(AT91_PIN_PE21, 0); /* LCDD14 */ at91_set_A_periph(AT91_PIN_PE22, 0); /* LCDD15 */ at91_set_A_periph(AT91_PIN_PE23, 0); /* LCDD16 */ at91_set_A_periph(AT91_PIN_PE24, 0); /* LCDD17 */ at91_set_A_periph(AT91_PIN_PE25, 0); /* LCDD18 */ at91_set_A_periph(AT91_PIN_PE26, 0); /* LCDD19 */ at91_set_A_periph(AT91_PIN_PE27, 0); /* LCDD20 */ at91_set_A_periph(AT91_PIN_PE28, 0); /* LCDD21 */ at91_set_A_periph(AT91_PIN_PE29, 0); /* LCDD22 */ at91_set_A_periph(AT91_PIN_PE30, 0); /* LCDD23 */ lcdc_data = *data; platform_device_register(&at91_lcdc_device); } #else void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {} #endif /* -------------------------------------------------------------------- * Timer/Counter block * -------------------------------------------------------------------- */ #ifdef CONFIG_ATMEL_TCLIB static struct resource tcb0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TCB0, .end = AT91SAM9G45_BASE_TCB0 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_tcb0_device = { .name = "atmel_tcb", .id = 0, .resource = tcb0_resources, .num_resources = ARRAY_SIZE(tcb0_resources), }; /* TCB1 begins with TC3 */ static struct resource tcb1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TCB1, .end = AT91SAM9G45_BASE_TCB1 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_tcb1_device = { .name = "atmel_tcb", .id = 1, .resource = tcb1_resources, .num_resources = ARRAY_SIZE(tcb1_resources), }; static void __init at91_add_device_tc(void) { platform_device_register(&at91sam9g45_tcb0_device); platform_device_register(&at91sam9g45_tcb1_device); } #else static void __init at91_add_device_tc(void) { } #endif /* -------------------------------------------------------------------- * RTC * -------------------------------------------------------------------- */ #if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE) static struct resource rtc_resources[] = { [0] = { .start = AT91SAM9G45_BASE_RTC, .end = AT91SAM9G45_BASE_RTC + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91_ID_SYS, .end = NR_IRQS_LEGACY + AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_rtc_device = { .name = "at91_rtc", .id = -1, .resource = rtc_resources, .num_resources = ARRAY_SIZE(rtc_resources), }; static void __init at91_add_device_rtc(void) { platform_device_register(&at91sam9g45_rtc_device); } #else static void __init at91_add_device_rtc(void) {} #endif /* -------------------------------------------------------------------- * Touchscreen * -------------------------------------------------------------------- */ #if defined(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) || defined(CONFIG_TOUCHSCREEN_ATMEL_TSADCC_MODULE) static u64 tsadcc_dmamask = DMA_BIT_MASK(32); static struct at91_tsadcc_data tsadcc_data; static struct resource tsadcc_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TSC, .end = AT91SAM9G45_BASE_TSC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC, .flags = IORESOURCE_IRQ, } }; static struct platform_device at91sam9g45_tsadcc_device = { .name = "atmel_tsadcc", .id = -1, .dev = { .dma_mask = &tsadcc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &tsadcc_data, }, .resource = tsadcc_resources, .num_resources = ARRAY_SIZE(tsadcc_resources), }; void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data) { if (!data) return; at91_set_gpio_input(AT91_PIN_PD20, 0); /* AD0_XR */ at91_set_gpio_input(AT91_PIN_PD21, 0); /* AD1_XL */ at91_set_gpio_input(AT91_PIN_PD22, 0); /* AD2_YT */ at91_set_gpio_input(AT91_PIN_PD23, 0); /* AD3_TB */ tsadcc_data = *data; platform_device_register(&at91sam9g45_tsadcc_device); } #else void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data) {} #endif /* -------------------------------------------------------------------- * ADC * -------------------------------------------------------------------- */ #if IS_ENABLED(CONFIG_AT91_ADC) static struct at91_adc_data adc_data; static struct resource adc_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TSC, .end = AT91SAM9G45_BASE_TSC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC, .flags = IORESOURCE_IRQ, } }; static struct platform_device at91_adc_device = { .name = "at91_adc", .id = -1, .dev = { .platform_data = &adc_data, }, .resource = adc_resources, .num_resources = ARRAY_SIZE(adc_resources), }; static struct at91_adc_trigger at91_adc_triggers[] = { [0] = { .name = "external-rising", .value = 1, .is_external = true, }, [1] = { .name = "external-falling", .value = 2, .is_external = true, }, [2] = { .name = "external-any", .value = 3, .is_external = true, }, [3] = { .name = "continuous", .value = 6, .is_external = false, }, }; static struct at91_adc_reg_desc at91_adc_register_g45 = { .channel_base = AT91_ADC_CHR(0), .drdy_mask = AT91_ADC_DRDY, .status_register = AT91_ADC_SR, .trigger_register = 0x08, }; void __init at91_add_device_adc(struct at91_adc_data *data) { if (!data) return; if (test_bit(0, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD20, 0); if (test_bit(1, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD21, 0); if (test_bit(2, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD22, 0); if (test_bit(3, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD23, 0); if (test_bit(4, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD24, 0); if (test_bit(5, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD25, 0); if (test_bit(6, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD26, 0); if (test_bit(7, &data->channels_used)) at91_set_gpio_input(AT91_PIN_PD27, 0); if (data->use_external_triggers) at91_set_A_periph(AT91_PIN_PD28, 0); data->num_channels = 8; data->startup_time = 40; data->registers = &at91_adc_register_g45; data->trigger_number = 4; data->trigger_list = at91_adc_triggers; adc_data = *data; platform_device_register(&at91_adc_device); } #else void __init at91_add_device_adc(struct at91_adc_data *data) {} #endif /* -------------------------------------------------------------------- * RTT * -------------------------------------------------------------------- */ static struct resource rtt_resources[] = { { .start = AT91SAM9G45_BASE_RTT, .end = AT91SAM9G45_BASE_RTT + SZ_16 - 1, .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_IRQ, } }; static struct platform_device at91sam9g45_rtt_device = { .name = "at91_rtt", .id = 0, .resource = rtt_resources, }; #if IS_ENABLED(CONFIG_RTC_DRV_AT91SAM9) static void __init at91_add_device_rtt_rtc(void) { at91sam9g45_rtt_device.name = "rtc-at91sam9"; /* * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ at91sam9g45_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) { /* Only one resource is needed: RTT not used as RTC */ at91sam9g45_rtt_device.num_resources = 1; } #endif static void __init at91_add_device_rtt(void) { at91_add_device_rtt_rtc(); platform_device_register(&at91sam9g45_rtt_device); } /* -------------------------------------------------------------------- * TRNG * -------------------------------------------------------------------- */ #if defined(CONFIG_HW_RANDOM_ATMEL) || defined(CONFIG_HW_RANDOM_ATMEL_MODULE) static struct resource trng_resources[] = { { .start = AT91SAM9G45_BASE_TRNG, .end = AT91SAM9G45_BASE_TRNG + SZ_16K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device at91sam9g45_trng_device = { .name = "atmel-trng", .id = -1, .resource = trng_resources, .num_resources = ARRAY_SIZE(trng_resources), }; static void __init at91_add_device_trng(void) { platform_device_register(&at91sam9g45_trng_device); } #else static void __init at91_add_device_trng(void) {} #endif /* -------------------------------------------------------------------- * Watchdog * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE) static struct resource wdt_resources[] = { { .start = AT91SAM9G45_BASE_WDT, .end = AT91SAM9G45_BASE_WDT + SZ_16 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9g45_wdt_device = { .name = "at91_wdt", .id = -1, .resource = wdt_resources, .num_resources = ARRAY_SIZE(wdt_resources), }; static void __init at91_add_device_watchdog(void) { platform_device_register(&at91sam9g45_wdt_device); } #else static void __init at91_add_device_watchdog(void) {} #endif /* -------------------------------------------------------------------- * PWM * --------------------------------------------------------------------*/ #if defined(CONFIG_ATMEL_PWM) || defined(CONFIG_ATMEL_PWM_MODULE) static u32 pwm_mask; static struct resource pwm_resources[] = { [0] = { .start = AT91SAM9G45_BASE_PWMC, .end = AT91SAM9G45_BASE_PWMC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_PWMC, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_PWMC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_pwm0_device = { .name = "atmel_pwm", .id = -1, .dev = { .platform_data = &pwm_mask, }, .resource = pwm_resources, .num_resources = ARRAY_SIZE(pwm_resources), }; void __init at91_add_device_pwm(u32 mask) { if (mask & (1 << AT91_PWM0)) at91_set_B_periph(AT91_PIN_PD24, 1); /* enable PWM0 */ if (mask & (1 << AT91_PWM1)) at91_set_B_periph(AT91_PIN_PD31, 1); /* enable PWM1 */ if (mask & (1 << AT91_PWM2)) at91_set_B_periph(AT91_PIN_PD26, 1); /* enable PWM2 */ if (mask & (1 << AT91_PWM3)) at91_set_B_periph(AT91_PIN_PD0, 1); /* enable PWM3 */ pwm_mask = mask; platform_device_register(&at91sam9g45_pwm0_device); } #else void __init at91_add_device_pwm(u32 mask) {} #endif /* -------------------------------------------------------------------- * SSC -- Synchronous Serial Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE) static u64 ssc0_dmamask = DMA_BIT_MASK(32); static struct resource ssc0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_SSC0, .end = AT91SAM9G45_BASE_SSC0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC0, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_ssc0_device = { .name = "at91sam9g45_ssc", .id = 0, .dev = { .dma_mask = &ssc0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc0_resources, .num_resources = ARRAY_SIZE(ssc0_resources), }; static inline void configure_ssc0_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PD1, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PD0, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PD2, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PD3, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PD4, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PD5, 1); } static u64 ssc1_dmamask = DMA_BIT_MASK(32); static struct resource ssc1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_SSC1, .end = AT91SAM9G45_BASE_SSC1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC1, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_ssc1_device = { .name = "at91sam9g45_ssc", .id = 1, .dev = { .dma_mask = &ssc1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc1_resources, .num_resources = ARRAY_SIZE(ssc1_resources), }; static inline void configure_ssc1_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PD14, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PD12, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PD10, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PD11, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PD13, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PD15, 1); } /* * SSC controllers are accessed through library code, instead of any * kind of all-singing/all-dancing driver. For example one could be * used by a particular I2S audio codec's driver, while another one * on the same system might be used by a custom data capture driver. */ void __init at91_add_device_ssc(unsigned id, unsigned pins) { struct platform_device *pdev; /* * NOTE: caller is responsible for passing information matching * "pins" to whatever will be using each particular controller. */ switch (id) { case AT91SAM9G45_ID_SSC0: pdev = &at91sam9g45_ssc0_device; configure_ssc0_pins(pins); break; case AT91SAM9G45_ID_SSC1: pdev = &at91sam9g45_ssc1_device; configure_ssc1_pins(pins); break; default: return; } platform_device_register(pdev); } #else void __init at91_add_device_ssc(unsigned id, unsigned pins) {} #endif /* -------------------------------------------------------------------- * UART * -------------------------------------------------------------------- */ #if defined(CONFIG_SERIAL_ATMEL) static struct resource dbgu_resources[] = { [0] = { .start = AT91SAM9G45_BASE_DBGU, .end = AT91SAM9G45_BASE_DBGU + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91_ID_SYS, .end = NR_IRQS_LEGACY + AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9g45_dbgu_device = { .name = "atmel_usart", .id = 0, .dev = { .dma_mask = &dbgu_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dbgu_data, }, .resource = dbgu_resources, .num_resources = ARRAY_SIZE(dbgu_resources), }; static inline void configure_dbgu_pins(void) { at91_set_A_periph(AT91_PIN_PB12, 0); /* DRXD */ at91_set_A_periph(AT91_PIN_PB13, 1); /* DTXD */ } static struct resource uart0_resources[] = { [0] = { .start = AT91SAM9G45_BASE_US0, .end = AT91SAM9G45_BASE_US0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US0, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US0, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9g45_uart0_device = { .name = "atmel_usart", .id = 1, .dev = { .dma_mask = &uart0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart0_data, }, .resource = uart0_resources, .num_resources = ARRAY_SIZE(uart0_resources), }; static inline void configure_usart0_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB19, 1); /* TXD0 */ at91_set_A_periph(AT91_PIN_PB18, 0); /* RXD0 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PB17, 0); /* RTS0 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PB15, 0); /* CTS0 */ } static struct resource uart1_resources[] = { [0] = { .start = AT91SAM9G45_BASE_US1, .end = AT91SAM9G45_BASE_US1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US1, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US1, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9g45_uart1_device = { .name = "atmel_usart", .id = 2, .dev = { .dma_mask = &uart1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart1_data, }, .resource = uart1_resources, .num_resources = ARRAY_SIZE(uart1_resources), }; static inline void configure_usart1_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB4, 1); /* TXD1 */ at91_set_A_periph(AT91_PIN_PB5, 0); /* RXD1 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PD16, 0); /* RTS1 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PD17, 0); /* CTS1 */ } static struct resource uart2_resources[] = { [0] = { .start = AT91SAM9G45_BASE_US2, .end = AT91SAM9G45_BASE_US2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US2, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US2, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9g45_uart2_device = { .name = "atmel_usart", .id = 3, .dev = { .dma_mask = &uart2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart2_data, }, .resource = uart2_resources, .num_resources = ARRAY_SIZE(uart2_resources), }; static inline void configure_usart2_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB6, 1); /* TXD2 */ at91_set_A_periph(AT91_PIN_PB7, 0); /* RXD2 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PC9, 0); /* RTS2 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PC11, 0); /* CTS2 */ } static struct resource uart3_resources[] = { [0] = { .start = AT91SAM9G45_BASE_US3, .end = AT91SAM9G45_BASE_US3 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US3, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US3, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9g45_uart3_device = { .name = "atmel_usart", .id = 4, .dev = { .dma_mask = &uart3_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart3_data, }, .resource = uart3_resources, .num_resources = ARRAY_SIZE(uart3_resources), }; static inline void configure_usart3_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB8, 1); /* TXD3 */ at91_set_A_periph(AT91_PIN_PB9, 0); /* RXD3 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PA23, 0); /* RTS3 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PA24, 0); /* CTS3 */ } static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) { struct platform_device *pdev; struct atmel_uart_data *pdata; switch (id) { case 0: /* DBGU */ pdev = &at91sam9g45_dbgu_device; configure_dbgu_pins(); break; case AT91SAM9G45_ID_US0: pdev = &at91sam9g45_uart0_device; configure_usart0_pins(pins); break; case AT91SAM9G45_ID_US1: pdev = &at91sam9g45_uart1_device; configure_usart1_pins(pins); break; case AT91SAM9G45_ID_US2: pdev = &at91sam9g45_uart2_device; configure_usart2_pins(pins); break; case AT91SAM9G45_ID_US3: pdev = &at91sam9g45_uart3_device; configure_usart3_pins(pins); break; default: return; } pdata = pdev->dev.platform_data; pdata->num = portnr; /* update to mapped ID */ if (portnr < ATMEL_MAX_UART) at91_uarts[portnr] = pdev; } void __init at91_add_device_serial(void) { int i; for (i = 0; i < ATMEL_MAX_UART; i++) { if (at91_uarts[i]) platform_device_register(at91_uarts[i]); } } #else void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_add_device_serial(void) {} #endif /* -------------------------------------------------------------------- * SHA1/SHA256 * -------------------------------------------------------------------- */ #if defined(CONFIG_CRYPTO_DEV_ATMEL_SHA) || defined(CONFIG_CRYPTO_DEV_ATMEL_SHA_MODULE) static struct resource sha_resources[] = { { .start = AT91SAM9G45_BASE_SHA, .end = AT91SAM9G45_BASE_SHA + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_sha_device = { .name = "atmel_sha", .id = -1, .resource = sha_resources, .num_resources = ARRAY_SIZE(sha_resources), }; static void __init at91_add_device_sha(void) { platform_device_register(&at91sam9g45_sha_device); } #else static void __init at91_add_device_sha(void) {} #endif /* -------------------------------------------------------------------- * DES/TDES * -------------------------------------------------------------------- */ #if defined(CONFIG_CRYPTO_DEV_ATMEL_TDES) || defined(CONFIG_CRYPTO_DEV_ATMEL_TDES_MODULE) static struct resource tdes_resources[] = { [0] = { .start = AT91SAM9G45_BASE_TDES, .end = AT91SAM9G45_BASE_TDES + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_tdes_device = { .name = "atmel_tdes", .id = -1, .resource = tdes_resources, .num_resources = ARRAY_SIZE(tdes_resources), }; static void __init at91_add_device_tdes(void) { platform_device_register(&at91sam9g45_tdes_device); } #else static void __init at91_add_device_tdes(void) {} #endif /* -------------------------------------------------------------------- * AES * -------------------------------------------------------------------- */ #if defined(CONFIG_CRYPTO_DEV_ATMEL_AES) || defined(CONFIG_CRYPTO_DEV_ATMEL_AES_MODULE) static struct crypto_platform_data aes_data; static struct crypto_dma_data alt_atslave; static u64 aes_dmamask = DMA_BIT_MASK(32); static struct resource aes_resources[] = { [0] = { .start = AT91SAM9G45_BASE_AES, .end = AT91SAM9G45_BASE_AES + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9g45_aes_device = { .name = "atmel_aes", .id = -1, .dev = { .dma_mask = &aes_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &aes_data, }, .resource = aes_resources, .num_resources = ARRAY_SIZE(aes_resources), }; static void __init at91_add_device_aes(void) { struct at_dma_slave *atslave; /* DMA TX slave channel configuration */ atslave = &alt_atslave.txdata; atslave->dma_dev = &at_hdmac_device.dev; atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_SRC_H2SEL_HW | ATC_SRC_PER(AT_DMA_ID_AES_RX); /* DMA RX slave channel configuration */ atslave = &alt_atslave.rxdata; atslave->dma_dev = &at_hdmac_device.dev; atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_DST_H2SEL_HW | ATC_DST_PER(AT_DMA_ID_AES_TX); aes_data.dma_slave = &alt_atslave; platform_device_register(&at91sam9g45_aes_device); } #else static void __init at91_add_device_aes(void) {} #endif /* -------------------------------------------------------------------- */ /* * These devices are always present and don't need any board-specific * setup. */ static int __init at91_add_standard_devices(void) { if (of_have_populated_dt()) return 0; at91_add_device_hdmac(); at91_add_device_rtc(); at91_add_device_rtt(); at91_add_device_trng(); at91_add_device_watchdog(); at91_add_device_tc(); at91_add_device_sha(); at91_add_device_tdes(); at91_add_device_aes(); return 0; } arch_initcall(at91_add_standard_devices);
gpl-2.0
NooNameR/bravo_kernel_3.0
arch/sh/kernel/cpu/sh4a/clock-sh7763.c
2445
2766
/* * arch/sh/kernel/cpu/sh4a/clock-sh7763.c * * SH7763 support for the clock framework * * Copyright (C) 2005 Paul Mundt * Copyright (C) 2007 Yoshihiro Shimoda * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 }; static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 }; static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 }; static void master_clk_init(struct clk *clk) { clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07]; } static struct clk_ops sh7763_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 4) & 0x07); return clk->parent->rate / p0fc_divisors[idx]; } static struct clk_ops sh7763_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 16) & 0x07); return clk->parent->rate / bfc_divisors[idx]; } static struct clk_ops sh7763_bus_clk_ops = { .recalc = bus_clk_recalc, }; static struct clk_ops sh7763_cpu_clk_ops = { .recalc = followparent_recalc, }; static struct clk_ops *sh7763_clk_ops[] = { &sh7763_master_clk_ops, &sh7763_module_clk_ops, &sh7763_bus_clk_ops, &sh7763_cpu_clk_ops, }; void __init arch_init_clk_ops(struct clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7763_clk_ops)) *ops = sh7763_clk_ops[idx]; } static unsigned long shyway_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 20) & 0x07); return clk->parent->rate / cfc_divisors[idx]; } static struct clk_ops sh7763_shyway_clk_ops = { .recalc = shyway_clk_recalc, }; static struct clk sh7763_shyway_clk = { .flags = CLK_ENABLE_ON_INIT, .ops = &sh7763_shyway_clk_ops, }; /* * Additional SH7763-specific on-chip clocks that aren't already part of the * clock framework */ static struct clk *sh7763_onchip_clocks[] = { &sh7763_shyway_clk, }; #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk), }; int __init arch_clk_init(void) { struct clk *clk; int i, ret = 0; cpg_clk_init(); clk = clk_get(NULL, "master_clk"); for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) { struct clk *clkp = sh7763_onchip_clocks[i]; clkp->parent = clk; ret |= clk_register(clkp); } clk_put(clk); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); return ret; }
gpl-2.0
Clumsy-Kernel-Development/M8_Kernel
arch/arm/mach-msm/msm_bus/msm_bus_config.c
2445
2350
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/radix-tree.h> #include <linux/clk.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include "msm_bus_core.h" static DEFINE_MUTEX(msm_bus_config_lock); /** * msm_bus_axi_porthalt() - Halt the given axi master port * @master_port: AXI Master port to be halted */ int msm_bus_axi_porthalt(int master_port) { int ret = 0; int priv_id; struct msm_bus_fabric_device *fabdev; priv_id = msm_bus_board_get_iid(master_port); MSM_BUS_DBG("master_port: %d iid: %d fabid%d\n", master_port, priv_id, GET_FABID(priv_id)); fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); if (IS_ERR_OR_NULL(fabdev)) { MSM_BUS_ERR("Fabric device not found for mport: %d\n", master_port); return -ENODEV; } mutex_lock(&msm_bus_config_lock); ret = fabdev->algo->port_halt(fabdev, priv_id); mutex_unlock(&msm_bus_config_lock); return ret; } EXPORT_SYMBOL(msm_bus_axi_porthalt); /** * msm_bus_axi_portunhalt() - Unhalt the given axi master port * @master_port: AXI Master port to be unhalted */ int msm_bus_axi_portunhalt(int master_port) { int ret = 0; int priv_id; struct msm_bus_fabric_device *fabdev; priv_id = msm_bus_board_get_iid(master_port); MSM_BUS_DBG("master_port: %d iid: %d fabid: %d\n", master_port, priv_id, GET_FABID(priv_id)); fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); if (IS_ERR_OR_NULL(fabdev)) { MSM_BUS_ERR("Fabric device not found for mport: %d\n", master_port); return -ENODEV; } mutex_lock(&msm_bus_config_lock); ret = fabdev->algo->port_unhalt(fabdev, priv_id); mutex_unlock(&msm_bus_config_lock); return ret; } EXPORT_SYMBOL(msm_bus_axi_portunhalt);
gpl-2.0
JAV-Team-qcom/android_kernel_wingtech_msm8916
lib/raid6/avx2.c
3469
8675
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 2012 Intel Corporation * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com> * * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 53 Temple Place Ste 330, * Boston MA 02111-1307, USA; either version 2 of the License, or * (at your option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * AVX2 implementation of RAID-6 syndrome functions * */ #ifdef CONFIG_AS_AVX2 #include <linux/raid/pq.h> #include "x86.h" static const struct raid6_avx2_constants { u64 x1d[4]; } raid6_avx2_constants __aligned(32) = { { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,}, }; static int raid6_have_avx2(void) { return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX); } /* * Plain AVX2 implementation */ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */ for (d = 0; d < bytes; d += 32) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */ asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); for (z = z0-2; z >= 0; z--) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5"); asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); asm volatile("vpand %ymm0,%ymm5,%ymm5"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm6,%ymm2,%ymm2"); asm volatile("vpxor %ymm6,%ymm4,%ymm4"); asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); } asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5"); asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); asm volatile("vpand %ymm0,%ymm5,%ymm5"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm6,%ymm2,%ymm2"); asm volatile("vpxor %ymm6,%ymm4,%ymm4"); asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); asm volatile("vpxor %ymm2,%ymm2,%ymm2"); asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); asm volatile("vpxor %ymm4,%ymm4,%ymm4"); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_avx2x1 = { raid6_avx21_gen_syndrome, raid6_have_avx2, "avx2x1", 1 /* Has cache hints */ }; /* * Unrolled-by-2 AVX2 implementation */ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */ /* We uniformly assume a single prefetch covers at least 32 bytes */ for (d = 0; d < bytes; d += 64) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32])); asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */ asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */ asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */ for (z = z0-1; z >= 0; z--) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32])); asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5"); asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7"); asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); asm volatile("vpand %ymm0,%ymm5,%ymm5"); asm volatile("vpand %ymm0,%ymm7,%ymm7"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm7,%ymm6,%ymm6"); asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d])); asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32])); asm volatile("vpxor %ymm5,%ymm2,%ymm2"); asm volatile("vpxor %ymm7,%ymm3,%ymm3"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm7,%ymm6,%ymm6"); } asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32])); asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_avx2x2 = { raid6_avx22_gen_syndrome, raid6_have_avx2, "avx2x2", 1 /* Has cache hints */ }; #ifdef CONFIG_X86_64 /* * Unrolled-by-4 AVX2 implementation */ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */ asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */ asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */ asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */ asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */ asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */ asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */ asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */ asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */ for (d = 0; d < bytes; d += 128) { for (z = z0; z >= 0; z--) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32])); asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64])); asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96])); asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5"); asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7"); asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13"); asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15"); asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); asm volatile("vpaddb %ymm12,%ymm12,%ymm12"); asm volatile("vpaddb %ymm14,%ymm14,%ymm14"); asm volatile("vpand %ymm0,%ymm5,%ymm5"); asm volatile("vpand %ymm0,%ymm7,%ymm7"); asm volatile("vpand %ymm0,%ymm13,%ymm13"); asm volatile("vpand %ymm0,%ymm15,%ymm15"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm7,%ymm6,%ymm6"); asm volatile("vpxor %ymm13,%ymm12,%ymm12"); asm volatile("vpxor %ymm15,%ymm14,%ymm14"); asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d])); asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32])); asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64])); asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96])); asm volatile("vpxor %ymm5,%ymm2,%ymm2"); asm volatile("vpxor %ymm7,%ymm3,%ymm3"); asm volatile("vpxor %ymm13,%ymm10,%ymm10"); asm volatile("vpxor %ymm15,%ymm11,%ymm11"); asm volatile("vpxor %ymm5,%ymm4,%ymm4"); asm volatile("vpxor %ymm7,%ymm6,%ymm6"); asm volatile("vpxor %ymm13,%ymm12,%ymm12"); asm volatile("vpxor %ymm15,%ymm14,%ymm14"); } asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); asm volatile("vpxor %ymm2,%ymm2,%ymm2"); asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32])); asm volatile("vpxor %ymm3,%ymm3,%ymm3"); asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64])); asm volatile("vpxor %ymm10,%ymm10,%ymm10"); asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96])); asm volatile("vpxor %ymm11,%ymm11,%ymm11"); asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); asm volatile("vpxor %ymm4,%ymm4,%ymm4"); asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); asm volatile("vpxor %ymm6,%ymm6,%ymm6"); asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64])); asm volatile("vpxor %ymm12,%ymm12,%ymm12"); asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96])); asm volatile("vpxor %ymm14,%ymm14,%ymm14"); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_avx2x4 = { raid6_avx24_gen_syndrome, raid6_have_avx2, "avx2x4", 1 /* Has cache hints */ }; #endif #endif /* CONFIG_AS_AVX2 */
gpl-2.0
FennyFatal/SGS4-M919-FennyKernel
kernel/compat.c
4749
31208
/* * linux/kernel/compat.c * * Kernel compatibililty routines for e.g. 32 bit syscall support * on 64 bit kernels. * * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/signal.h> #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/security.h> #include <linux/timex.h> #include <linux/export.h> #include <linux/migrate.h> #include <linux/posix-timers.h> #include <linux/times.h> #include <linux/ptrace.h> #include <linux/gfp.h> #include <asm/uaccess.h> /* * Get/set struct timeval with struct timespec on the native side */ static int compat_get_timeval_convert(struct timespec *o, struct compat_timeval __user *i) { long usec; if (get_user(o->tv_sec, &i->tv_sec) || get_user(usec, &i->tv_usec)) return -EFAULT; o->tv_nsec = usec * 1000; return 0; } static int compat_put_timeval_convert(struct compat_timeval __user *o, struct timeval *i) { return (put_user(i->tv_sec, &o->tv_sec) || put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; } static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) { memset(txc, 0, sizeof(struct timex)); if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || __get_user(txc->modes, &utp->modes) || __get_user(txc->offset, &utp->offset) || __get_user(txc->freq, &utp->freq) || __get_user(txc->maxerror, &utp->maxerror) || __get_user(txc->esterror, &utp->esterror) || __get_user(txc->status, &utp->status) || __get_user(txc->constant, &utp->constant) || __get_user(txc->precision, &utp->precision) || __get_user(txc->tolerance, &utp->tolerance) || __get_user(txc->time.tv_sec, &utp->time.tv_sec) || __get_user(txc->time.tv_usec, &utp->time.tv_usec) || __get_user(txc->tick, &utp->tick) || __get_user(txc->ppsfreq, &utp->ppsfreq) || __get_user(txc->jitter, &utp->jitter) || __get_user(txc->shift, &utp->shift) || __get_user(txc->stabil, &utp->stabil) || __get_user(txc->jitcnt, &utp->jitcnt) || __get_user(txc->calcnt, &utp->calcnt) || __get_user(txc->errcnt, &utp->errcnt) || __get_user(txc->stbcnt, &utp->stbcnt)) return -EFAULT; return 0; } static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) { if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || __put_user(txc->modes, &utp->modes) || __put_user(txc->offset, &utp->offset) || __put_user(txc->freq, &utp->freq) || __put_user(txc->maxerror, &utp->maxerror) || __put_user(txc->esterror, &utp->esterror) || __put_user(txc->status, &utp->status) || __put_user(txc->constant, &utp->constant) || __put_user(txc->precision, &utp->precision) || __put_user(txc->tolerance, &utp->tolerance) || __put_user(txc->time.tv_sec, &utp->time.tv_sec) || __put_user(txc->time.tv_usec, &utp->time.tv_usec) || __put_user(txc->tick, &utp->tick) || __put_user(txc->ppsfreq, &utp->ppsfreq) || __put_user(txc->jitter, &utp->jitter) || __put_user(txc->shift, &utp->shift) || __put_user(txc->stabil, &utp->stabil) || __put_user(txc->jitcnt, &utp->jitcnt) || __put_user(txc->calcnt, &utp->calcnt) || __put_user(txc->errcnt, &utp->errcnt) || __put_user(txc->stbcnt, &utp->stbcnt) || __put_user(txc->tai, &utp->tai)) return -EFAULT; return 0; } asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (compat_put_timeval_convert(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { struct timespec kts; struct timezone ktz; if (tv) { if (compat_get_timeval_convert(&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(ktz))) return -EFAULT; } return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || __get_user(tv->tv_sec, &ctv->tv_sec) || __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timeval); int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || __put_user(tv->tv_sec, &ctv->tv_sec) || __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timeval); int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timespec); int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timespec); int compat_get_timeval(struct timeval *tv, const void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; else return get_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_get_timeval); int compat_put_timeval(const struct timeval *tv, void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; else return put_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_put_timeval); int compat_get_timespec(struct timespec *ts, const void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; else return get_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_get_timespec); int compat_put_timespec(const struct timespec *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; else return put_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_put_timespec); static long compat_nanosleep_restart(struct restart_block *restart) { struct compat_timespec __user *rmtp; struct timespec rmt; mm_segment_t oldfs; long ret; restart->nanosleep.rmtp = (struct timespec __user *) &rmt; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep_restart(restart); set_fs(oldfs); if (ret) { rmtp = restart->nanosleep.compat_rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec tu, rmt; mm_segment_t oldfs; long ret; if (get_compat_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep(&tu, rmtp ? (struct timespec __user *)&rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); set_fs(oldfs); if (ret) { struct restart_block *restart = &current_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } static inline long get_compat_itimerval(struct itimerval *o, struct compat_itimerval __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_compat_itimerval(struct compat_itimerval __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_compat_itimerval(it, &kit)) error = -EFAULT; return error; } asmlinkage long compat_sys_setitimer(int which, struct compat_itimerval __user *in, struct compat_itimerval __user *out) { struct itimerval kin, kout; int error; if (in) { if (get_compat_itimerval(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_compat_itimerval(out, &kout)) return -EFAULT; return 0; } static compat_clock_t clock_t_to_compat_clock_t(clock_t x) { return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); } asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) { if (tbuf) { struct tms tms; struct compat_tms tmp; do_sys_times(&tms); /* Convert our struct tms to the compat version. */ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); if (copy_to_user(tbuf, &tmp, sizeof(tmp))) return -EFAULT; } force_successful_syscall_return(); return compat_jiffies_to_clock_t(jiffies); } #ifdef __ARCH_WANT_SYS_SIGPENDING /* * Assumption: old_sigset_t and compat_old_sigset_t are both * types that can be passed to put_user()/get_user(). */ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) { old_sigset_t s; long ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sigpending((old_sigset_t __user *) &s); set_fs(old_fs); if (ret == 0) ret = put_user(s, set); return ret; } #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK /* * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the * blocked set of signals to the supplied signal set */ static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set) { memcpy(blocked->sig, &set, sizeof(set)); } asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, compat_old_sigset_t __user *oset) { old_sigset_t old_set, new_set; sigset_t new_blocked; old_set = current->blocked.sig[0]; if (nset) { if (get_user(new_set, nset)) return -EFAULT; new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); new_blocked = current->blocked; switch (how) { case SIG_BLOCK: sigaddsetmask(&new_blocked, new_set); break; case SIG_UNBLOCK: sigdelsetmask(&new_blocked, new_set); break; case SIG_SETMASK: compat_sig_setmask(&new_blocked, new_set); break; default: return -EINVAL; } set_current_blocked(&new_blocked); } if (oset) { if (put_user(old_set, oset)) return -EFAULT; } return 0; } #endif asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || __get_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; if (r.rlim_cur == COMPAT_RLIM_INFINITY) r.rlim_cur = RLIM_INFINITY; if (r.rlim_max == COMPAT_RLIM_INFINITY) r.rlim_max = RLIM_INFINITY; return do_prlimit(current, resource, &r, NULL); } #ifdef COMPAT_RLIM_OLD_INFINITY asmlinkage long compat_sys_old_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_old_getrlimit(resource, &r); set_fs(old_fs); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } #endif asmlinkage long compat_sys_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; ret = do_prlimit(current, resource, NULL, &r); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) { if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || __put_user(r->ru_maxrss, &ru->ru_maxrss) || __put_user(r->ru_ixrss, &ru->ru_ixrss) || __put_user(r->ru_idrss, &ru->ru_idrss) || __put_user(r->ru_isrss, &ru->ru_isrss) || __put_user(r->ru_minflt, &ru->ru_minflt) || __put_user(r->ru_majflt, &ru->ru_majflt) || __put_user(r->ru_nswap, &ru->ru_nswap) || __put_user(r->ru_inblock, &ru->ru_inblock) || __put_user(r->ru_oublock, &ru->ru_oublock) || __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || __put_user(r->ru_nsignals, &ru->ru_nsignals) || __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) return -EFAULT; return 0; } asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) { struct rusage r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_getrusage(who, (struct rusage __user *) &r); set_fs(old_fs); if (ret) return ret; if (put_compat_rusage(&r, ru)) return -EFAULT; return 0; } asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru) { if (!ru) { return sys_wait4(pid, stat_addr, options, NULL); } else { struct rusage r; int ret; unsigned int status; mm_segment_t old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (stat_addr ? (unsigned int __user *) &status : NULL), options, (struct rusage __user *) &r); set_fs (old_fs); if (ret > 0) { if (put_compat_rusage(&r, ru)) return -EFAULT; if (stat_addr && put_user(status, stat_addr)) return -EFAULT; } return ret; } } asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, struct compat_siginfo __user *uinfo, int options, struct compat_rusage __user *uru) { siginfo_t info; struct rusage ru; long ret; mm_segment_t old_fs = get_fs(); memset(&info, 0, sizeof(info)); set_fs(KERNEL_DS); ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, uru ? (struct rusage __user *)&ru : NULL); set_fs(old_fs); if ((ret < 0) || (info.si_signo == 0)) return ret; if (uru) { ret = put_compat_rusage(&ru, uru); if (ret) return ret; } BUG_ON(info.si_code & __SI_MASK); info.si_code |= __SI_CHLD; return copy_siginfo_to_user32(uinfo, &info); } static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { unsigned long *k; if (len < cpumask_size()) memset(new_mask, 0, cpumask_size()); else if (len > cpumask_size()) len = cpumask_size(); k = cpumask_bits(new_mask); return compat_get_bitmap(k, user_mask_ptr, len * 8); } asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval) goto out; retval = sched_setaffinity(pid, new_mask); out: free_cpumask_var(new_mask); return retval; } asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) { if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; } int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) { if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; } long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id) { struct sigevent __user *event = NULL; if (timer_event_spec) { struct sigevent kevent; event = compat_alloc_user_space(sizeof(*event)); if (get_compat_sigevent(&kevent, timer_event_spec) || copy_to_user(event, &kevent, sizeof(*event))) return -EFAULT; } return sys_timer_create(which_clock, event, created_timer_id); } long compat_sys_timer_settime(timer_t timer_id, int flags, struct compat_itimerspec __user *new, struct compat_itimerspec __user *old) { long err; mm_segment_t oldfs; struct itimerspec newts, oldts; if (!new) return -EINVAL; if (get_compat_itimerspec(&newts, new)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_settime(timer_id, flags, (struct itimerspec __user *) &newts, (struct itimerspec __user *) &oldts); set_fs(oldfs); if (!err && old && put_compat_itimerspec(old, &oldts)) return -EFAULT; return err; } long compat_sys_timer_gettime(timer_t timer_id, struct compat_itimerspec __user *setting) { long err; mm_segment_t oldfs; struct itimerspec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_gettime(timer_id, (struct itimerspec __user *) &ts); set_fs(oldfs); if (!err && put_compat_itimerspec(setting, &ts)) return -EFAULT; return err; } long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; if (get_compat_timespec(&ts, tp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_settime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); return err; } long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_gettime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } long compat_sys_clock_adjtime(clockid_t which_clock, struct compat_timex __user *utp) { struct timex txc; mm_segment_t oldfs; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; oldfs = get_fs(); set_fs(KERNEL_DS); ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); set_fs(oldfs); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_getres(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && tp && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } static long compat_clock_nanosleep_restart(struct restart_block *restart) { long err; mm_segment_t oldfs; struct timespec tu; struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; restart->nanosleep.rmtp = (struct timespec __user *) &tu; oldfs = get_fs(); set_fs(KERNEL_DS); err = clock_nanosleep_restart(restart); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&tu, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { long err; mm_segment_t oldfs; struct timespec in, out; struct restart_block *restart; if (get_compat_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_nanosleep(which_clock, flags, (struct timespec __user *) &in, (struct timespec __user *) &out); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&out, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart = &current_thread_info()->restart_block; restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } /* * We currently only need the following fields from the sigevent * structure: sigev_value, sigev_signo, sig_notify and (sometimes * sigev_notify_thread_id). The others are handled in user mode. * We also assume that copying sigev_value.sival_int is sufficient * to keep all the bits of sigev_value.sival_ptr intact. */ int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event) { memset(event, 0, sizeof(*event)); return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || __get_user(event->sigev_value.sival_int, &u_event->sigev_value.sival_int) || __get_user(event->sigev_signo, &u_event->sigev_signo) || __get_user(event->sigev_notify, &u_event->sigev_notify) || __get_user(event->sigev_notify_thread_id, &u_event->sigev_notify_thread_id)) ? -EFAULT : 0; } long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = 0; for (j = 0; j < sizeof(m)/sizeof(um); j++) { /* * We dont want to read past the end of the userspace * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ if (nr_compat_longs-- > 0) { if (__get_user(um, umask)) return -EFAULT; } else { um = 0; } umask++; m |= (long)um << (j * BITS_PER_COMPAT_LONG); } *mask++ = m; } return 0; } long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = *mask++; for (j = 0; j < sizeof(m)/sizeof(um); j++) { um = m; /* * We dont want to write past the end of the userspace * bitmap. */ if (nr_compat_longs-- > 0) { if (__put_user(um, umask)) return -EFAULT; } umask++; m >>= 4*sizeof(um); m >>= 4*sizeof(um); } } return 0; } void sigset_from_compat (sigset_t *set, compat_sigset_t *compat) { switch (_NSIG_WORDS) { case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); } } EXPORT_SYMBOL_GPL(sigset_from_compat); asmlinkage long compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct compat_timespec __user *uts, compat_size_t sigsetsize) { compat_sigset_t s32; sigset_t s; struct timespec t; siginfo_t info; long ret; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&s, &s32); if (uts) { if (get_compat_timespec(&t, uts)) return -EFAULT; } ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); if (ret > 0 && uinfo) { if (copy_siginfo_to_user32(uinfo, &info)) ret = -EFAULT; } return ret; } asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } #ifdef __ARCH_WANT_COMPAT_SYS_TIME /* compat_time_t is a 32 bit "long" and needs to get converted. */ asmlinkage long compat_sys_time(compat_time_t __user * tloc) { compat_time_t i; struct timeval tv; do_gettimeofday(&tv); i = tv.tv_sec; if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } asmlinkage long compat_sys_stime(compat_time_t __user *tptr) { struct timespec tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime(&tv, NULL); if (err) return err; do_settimeofday(&tv); return 0; } #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) { sigset_t newset; compat_sigset_t newset32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&newset, &newset32); sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); current->saved_sigmask = current->blocked; set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) { struct timex txc; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; ret = do_adjtimex(&txc); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } #ifdef CONFIG_NUMA asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, compat_uptr_t __user *pages32, const int __user *nodes, int __user *status, int flags) { const void __user * __user *pages; int i; pages = compat_alloc_user_space(nr_pages * sizeof(void *)); for (i = 0; i < nr_pages; i++) { compat_uptr_t p; if (get_user(p, pages32 + i) || put_user(compat_ptr(p), pages + i)) return -EFAULT; } return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); } asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return sys_migrate_pages(pid, nr_bits + 1, old, new); } #endif struct compat_sysinfo { s32 uptime; u32 loads[3]; u32 totalram; u32 freeram; u32 sharedram; u32 bufferram; u32 totalswap; u32 freeswap; u16 procs; u16 pad; u32 totalhigh; u32 freehigh; u32 mem_unit; char _f[20-2*sizeof(u32)-sizeof(int)]; }; asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info) { struct sysinfo s; do_sysinfo(&s); /* Check to see if any memory value is too large for 32-bit and scale * down if needed */ if ((s.totalram >> 32) || (s.totalswap >> 32)) { int bitcount = 0; while (s.mem_unit < PAGE_SIZE) { s.mem_unit <<= 1; bitcount++; } s.totalram >>= bitcount; s.freeram >>= bitcount; s.sharedram >>= bitcount; s.bufferram >>= bitcount; s.totalswap >>= bitcount; s.freeswap >>= bitcount; s.totalhigh >>= bitcount; s.freehigh >>= bitcount; } if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || __put_user (s.uptime, &info->uptime) || __put_user (s.loads[0], &info->loads[0]) || __put_user (s.loads[1], &info->loads[1]) || __put_user (s.loads[2], &info->loads[2]) || __put_user (s.totalram, &info->totalram) || __put_user (s.freeram, &info->freeram) || __put_user (s.sharedram, &info->sharedram) || __put_user (s.bufferram, &info->bufferram) || __put_user (s.totalswap, &info->totalswap) || __put_user (s.freeswap, &info->freeswap) || __put_user (s.procs, &info->procs) || __put_user (s.totalhigh, &info->totalhigh) || __put_user (s.freehigh, &info->freehigh) || __put_user (s.mem_unit, &info->mem_unit)) return -EFAULT; return 0; } /* * Allocate user-space memory for the duration of a single system call, * in order to marshall parameters inside a compat thunk. */ void __user *compat_alloc_user_space(unsigned long len) { void __user *ptr; /* If len would occupy more than half of the entire compat space... */ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) return NULL; ptr = arch_compat_alloc_user_space(len); if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) return NULL; return ptr; } EXPORT_SYMBOL_GPL(compat_alloc_user_space);
gpl-2.0
finnq/android_kernel_lge_g3
drivers/power/sbs-battery.c
4749
21347
/* * Gas Gauge driver for SBS Compliant Batteries * * Copyright (c) 2010, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/power_supply.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/power/sbs-battery.h> enum { REG_MANUFACTURER_DATA, REG_TEMPERATURE, REG_VOLTAGE, REG_CURRENT, REG_CAPACITY, REG_TIME_TO_EMPTY, REG_TIME_TO_FULL, REG_STATUS, REG_CYCLE_COUNT, REG_SERIAL_NUMBER, REG_REMAINING_CAPACITY, REG_REMAINING_CAPACITY_CHARGE, REG_FULL_CHARGE_CAPACITY, REG_FULL_CHARGE_CAPACITY_CHARGE, REG_DESIGN_CAPACITY, REG_DESIGN_CAPACITY_CHARGE, REG_DESIGN_VOLTAGE, }; /* Battery Mode defines */ #define BATTERY_MODE_OFFSET 0x03 #define BATTERY_MODE_MASK 0x8000 enum sbs_battery_mode { BATTERY_MODE_AMPS, BATTERY_MODE_WATTS }; /* manufacturer access defines */ #define MANUFACTURER_ACCESS_STATUS 0x0006 #define MANUFACTURER_ACCESS_SLEEP 0x0011 /* battery status value bits */ #define BATTERY_DISCHARGING 0x40 #define BATTERY_FULL_CHARGED 0x20 #define BATTERY_FULL_DISCHARGED 0x10 #define SBS_DATA(_psp, _addr, _min_value, _max_value) { \ .psp = _psp, \ .addr = _addr, \ .min_value = _min_value, \ .max_value = _max_value, \ } static const struct chip_data { enum power_supply_property psp; u8 addr; int min_value; int max_value; } sbs_data[] = { [REG_MANUFACTURER_DATA] = SBS_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535), [REG_TEMPERATURE] = SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535), [REG_VOLTAGE] = SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000), [REG_CURRENT] = SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767), [REG_CAPACITY] = SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100), [REG_REMAINING_CAPACITY] = SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535), [REG_REMAINING_CAPACITY_CHARGE] = SBS_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535), [REG_FULL_CHARGE_CAPACITY] = SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535), [REG_FULL_CHARGE_CAPACITY_CHARGE] = SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535), [REG_TIME_TO_EMPTY] = SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535), [REG_TIME_TO_FULL] = SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535), [REG_STATUS] = SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535), [REG_CYCLE_COUNT] = SBS_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535), [REG_DESIGN_CAPACITY] = SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0, 65535), [REG_DESIGN_CAPACITY_CHARGE] = SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0, 65535), [REG_DESIGN_VOLTAGE] = SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0, 65535), [REG_SERIAL_NUMBER] = SBS_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535), }; static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_SERIAL_NUMBER, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_ENERGY_FULL, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, }; struct sbs_info { struct i2c_client *client; struct power_supply power_supply; struct sbs_platform_data *pdata; bool is_present; bool gpio_detect; bool enable_detection; int irq; int last_state; int poll_time; struct delayed_work work; int ignore_changes; }; static int sbs_read_word_data(struct i2c_client *client, u8 address) { struct sbs_info *chip = i2c_get_clientdata(client); s32 ret = 0; int retries = 1; if (chip->pdata) retries = max(chip->pdata->i2c_retry_count + 1, 1); while (retries > 0) { ret = i2c_smbus_read_word_data(client, address); if (ret >= 0) break; retries--; } if (ret < 0) { dev_dbg(&client->dev, "%s: i2c read at address 0x%x failed\n", __func__, address); return ret; } return le16_to_cpu(ret); } static int sbs_write_word_data(struct i2c_client *client, u8 address, u16 value) { struct sbs_info *chip = i2c_get_clientdata(client); s32 ret = 0; int retries = 1; if (chip->pdata) retries = max(chip->pdata->i2c_retry_count + 1, 1); while (retries > 0) { ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value)); if (ret >= 0) break; retries--; } if (ret < 0) { dev_dbg(&client->dev, "%s: i2c write to address 0x%x failed\n", __func__, address); return ret; } return 0; } static int sbs_get_battery_presence_and_health( struct i2c_client *client, enum power_supply_property psp, union power_supply_propval *val) { s32 ret; struct sbs_info *chip = i2c_get_clientdata(client); if (psp == POWER_SUPPLY_PROP_PRESENT && chip->gpio_detect) { ret = gpio_get_value(chip->pdata->battery_detect); if (ret == chip->pdata->battery_detect_present) val->intval = 1; else val->intval = 0; chip->is_present = val->intval; return ret; } /* Write to ManufacturerAccess with * ManufacturerAccess command and then * read the status */ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr, MANUFACTURER_ACCESS_STATUS); if (ret < 0) { if (psp == POWER_SUPPLY_PROP_PRESENT) val->intval = 0; /* battery removed */ return ret; } ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr); if (ret < 0) return ret; if (ret < sbs_data[REG_MANUFACTURER_DATA].min_value || ret > sbs_data[REG_MANUFACTURER_DATA].max_value) { val->intval = 0; return 0; } /* Mask the upper nibble of 2nd byte and * lower byte of response then * shift the result by 8 to get status*/ ret &= 0x0F00; ret >>= 8; if (psp == POWER_SUPPLY_PROP_PRESENT) { if (ret == 0x0F) /* battery removed */ val->intval = 0; else val->intval = 1; } else if (psp == POWER_SUPPLY_PROP_HEALTH) { if (ret == 0x09) val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; else if (ret == 0x0B) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (ret == 0x0C) val->intval = POWER_SUPPLY_HEALTH_DEAD; else val->intval = POWER_SUPPLY_HEALTH_GOOD; } return 0; } static int sbs_get_battery_property(struct i2c_client *client, int reg_offset, enum power_supply_property psp, union power_supply_propval *val) { struct sbs_info *chip = i2c_get_clientdata(client); s32 ret; ret = sbs_read_word_data(client, sbs_data[reg_offset].addr); if (ret < 0) return ret; /* returned values are 16 bit */ if (sbs_data[reg_offset].min_value < 0) ret = (s16)ret; if (ret >= sbs_data[reg_offset].min_value && ret <= sbs_data[reg_offset].max_value) { val->intval = ret; if (psp != POWER_SUPPLY_PROP_STATUS) return 0; if (ret & BATTERY_FULL_CHARGED) val->intval = POWER_SUPPLY_STATUS_FULL; else if (ret & BATTERY_FULL_DISCHARGED) val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (ret & BATTERY_DISCHARGING) val->intval = POWER_SUPPLY_STATUS_DISCHARGING; else val->intval = POWER_SUPPLY_STATUS_CHARGING; if (chip->poll_time == 0) chip->last_state = val->intval; else if (chip->last_state != val->intval) { cancel_delayed_work_sync(&chip->work); power_supply_changed(&chip->power_supply); chip->poll_time = 0; } } else { if (psp == POWER_SUPPLY_PROP_STATUS) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else val->intval = 0; } return 0; } static void sbs_unit_adjustment(struct i2c_client *client, enum power_supply_property psp, union power_supply_propval *val) { #define BASE_UNIT_CONVERSION 1000 #define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION) #define TIME_UNIT_CONVERSION 60 #define TEMP_KELVIN_TO_CELSIUS 2731 switch (psp) { case POWER_SUPPLY_PROP_ENERGY_NOW: case POWER_SUPPLY_PROP_ENERGY_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: /* sbs provides energy in units of 10mWh. * Convert to µWh */ val->intval *= BATTERY_MODE_CAP_MULT_WATT; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: val->intval *= BASE_UNIT_CONVERSION; break; case POWER_SUPPLY_PROP_TEMP: /* sbs provides battery temperature in 0.1K * so convert it to 0.1°C */ val->intval -= TEMP_KELVIN_TO_CELSIUS; break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: /* sbs provides time to empty and time to full in minutes. * Convert to seconds */ val->intval *= TIME_UNIT_CONVERSION; break; default: dev_dbg(&client->dev, "%s: no need for unit conversion %d\n", __func__, psp); } } static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client, enum sbs_battery_mode mode) { int ret, original_val; original_val = sbs_read_word_data(client, BATTERY_MODE_OFFSET); if (original_val < 0) return original_val; if ((original_val & BATTERY_MODE_MASK) == mode) return mode; if (mode == BATTERY_MODE_AMPS) ret = original_val & ~BATTERY_MODE_MASK; else ret = original_val | BATTERY_MODE_MASK; ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret); if (ret < 0) return ret; return original_val & BATTERY_MODE_MASK; } static int sbs_get_battery_capacity(struct i2c_client *client, int reg_offset, enum power_supply_property psp, union power_supply_propval *val) { s32 ret; enum sbs_battery_mode mode = BATTERY_MODE_WATTS; if (power_supply_is_amp_property(psp)) mode = BATTERY_MODE_AMPS; mode = sbs_set_battery_mode(client, mode); if (mode < 0) return mode; ret = sbs_read_word_data(client, sbs_data[reg_offset].addr); if (ret < 0) return ret; if (psp == POWER_SUPPLY_PROP_CAPACITY) { /* sbs spec says that this can be >100 % * even if max value is 100 % */ val->intval = min(ret, 100); } else val->intval = ret; ret = sbs_set_battery_mode(client, mode); if (ret < 0) return ret; return 0; } static char sbs_serial[5]; static int sbs_get_battery_serial_number(struct i2c_client *client, union power_supply_propval *val) { int ret; ret = sbs_read_word_data(client, sbs_data[REG_SERIAL_NUMBER].addr); if (ret < 0) return ret; ret = sprintf(sbs_serial, "%04x", ret); val->strval = sbs_serial; return 0; } static int sbs_get_property_index(struct i2c_client *client, enum power_supply_property psp) { int count; for (count = 0; count < ARRAY_SIZE(sbs_data); count++) if (psp == sbs_data[count].psp) return count; dev_warn(&client->dev, "%s: Invalid Property - %d\n", __func__, psp); return -EINVAL; } static int sbs_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct sbs_info *chip = container_of(psy, struct sbs_info, power_supply); struct i2c_client *client = chip->client; switch (psp) { case POWER_SUPPLY_PROP_PRESENT: case POWER_SUPPLY_PROP_HEALTH: ret = sbs_get_battery_presence_and_health(client, psp, val); if (psp == POWER_SUPPLY_PROP_PRESENT) return 0; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_ENERGY_NOW: case POWER_SUPPLY_PROP_ENERGY_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_CAPACITY: ret = sbs_get_property_index(client, psp); if (ret < 0) break; ret = sbs_get_battery_capacity(client, ret, psp, val); break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: ret = sbs_get_battery_serial_number(client, val); break; case POWER_SUPPLY_PROP_STATUS: case POWER_SUPPLY_PROP_CYCLE_COUNT: case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_TEMP: case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: ret = sbs_get_property_index(client, psp); if (ret < 0) break; ret = sbs_get_battery_property(client, ret, psp, val); break; default: dev_err(&client->dev, "%s: INVALID property\n", __func__); return -EINVAL; } if (!chip->enable_detection) goto done; if (!chip->gpio_detect && chip->is_present != (ret >= 0)) { chip->is_present = (ret >= 0); power_supply_changed(&chip->power_supply); } done: if (!ret) { /* Convert units to match requirements for power supply class */ sbs_unit_adjustment(client, psp, val); } dev_dbg(&client->dev, "%s: property = %d, value = %x\n", __func__, psp, val->intval); if (ret && chip->is_present) return ret; /* battery not present, so return NODATA for properties */ if (ret) return -ENODATA; return 0; } static irqreturn_t sbs_irq(int irq, void *devid) { struct power_supply *battery = devid; power_supply_changed(battery); return IRQ_HANDLED; } static void sbs_external_power_changed(struct power_supply *psy) { struct sbs_info *chip; chip = container_of(psy, struct sbs_info, power_supply); if (chip->ignore_changes > 0) { chip->ignore_changes--; return; } /* cancel outstanding work */ cancel_delayed_work_sync(&chip->work); schedule_delayed_work(&chip->work, HZ); chip->poll_time = chip->pdata->poll_retry_count; } static void sbs_delayed_work(struct work_struct *work) { struct sbs_info *chip; s32 ret; chip = container_of(work, struct sbs_info, work.work); ret = sbs_read_word_data(chip->client, sbs_data[REG_STATUS].addr); /* if the read failed, give up on this work */ if (ret < 0) { chip->poll_time = 0; return; } if (ret & BATTERY_FULL_CHARGED) ret = POWER_SUPPLY_STATUS_FULL; else if (ret & BATTERY_FULL_DISCHARGED) ret = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (ret & BATTERY_DISCHARGING) ret = POWER_SUPPLY_STATUS_DISCHARGING; else ret = POWER_SUPPLY_STATUS_CHARGING; if (chip->last_state != ret) { chip->poll_time = 0; power_supply_changed(&chip->power_supply); return; } if (chip->poll_time > 0) { schedule_delayed_work(&chip->work, HZ); chip->poll_time--; return; } } #if defined(CONFIG_OF) #include <linux/of_device.h> #include <linux/of_gpio.h> static const struct of_device_id sbs_dt_ids[] = { { .compatible = "sbs,sbs-battery" }, { .compatible = "ti,bq20z75" }, { } }; MODULE_DEVICE_TABLE(of, sbs_dt_ids); static struct sbs_platform_data *sbs_of_populate_pdata( struct i2c_client *client) { struct device_node *of_node = client->dev.of_node; struct sbs_platform_data *pdata = client->dev.platform_data; enum of_gpio_flags gpio_flags; int rc; u32 prop; /* verify this driver matches this device */ if (!of_node) return NULL; /* if platform data is set, honor it */ if (pdata) return pdata; /* first make sure at least one property is set, otherwise * it won't change behavior from running without pdata. */ if (!of_get_property(of_node, "sbs,i2c-retry-count", NULL) && !of_get_property(of_node, "sbs,poll-retry-count", NULL) && !of_get_property(of_node, "sbs,battery-detect-gpios", NULL)) goto of_out; pdata = devm_kzalloc(&client->dev, sizeof(struct sbs_platform_data), GFP_KERNEL); if (!pdata) goto of_out; rc = of_property_read_u32(of_node, "sbs,i2c-retry-count", &prop); if (!rc) pdata->i2c_retry_count = prop; rc = of_property_read_u32(of_node, "sbs,poll-retry-count", &prop); if (!rc) pdata->poll_retry_count = prop; if (!of_get_property(of_node, "sbs,battery-detect-gpios", NULL)) { pdata->battery_detect = -1; goto of_out; } pdata->battery_detect = of_get_named_gpio_flags(of_node, "sbs,battery-detect-gpios", 0, &gpio_flags); if (gpio_flags & OF_GPIO_ACTIVE_LOW) pdata->battery_detect_present = 0; else pdata->battery_detect_present = 1; of_out: return pdata; } #else #define sbs_dt_ids NULL static struct sbs_platform_data *sbs_of_populate_pdata( struct i2c_client *client) { return client->dev.platform_data; } #endif static int __devinit sbs_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sbs_info *chip; struct sbs_platform_data *pdata = client->dev.platform_data; int rc; int irq; char *name; name = kasprintf(GFP_KERNEL, "sbs-%s", dev_name(&client->dev)); if (!name) { dev_err(&client->dev, "Failed to allocate device name\n"); return -ENOMEM; } chip = kzalloc(sizeof(struct sbs_info), GFP_KERNEL); if (!chip) { rc = -ENOMEM; goto exit_free_name; } chip->client = client; chip->enable_detection = false; chip->gpio_detect = false; chip->power_supply.name = name; chip->power_supply.type = POWER_SUPPLY_TYPE_BATTERY; chip->power_supply.properties = sbs_properties; chip->power_supply.num_properties = ARRAY_SIZE(sbs_properties); chip->power_supply.get_property = sbs_get_property; /* ignore first notification of external change, it is generated * from the power_supply_register call back */ chip->ignore_changes = 1; chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN; chip->power_supply.external_power_changed = sbs_external_power_changed; pdata = sbs_of_populate_pdata(client); if (pdata) { chip->gpio_detect = gpio_is_valid(pdata->battery_detect); chip->pdata = pdata; } i2c_set_clientdata(client, chip); if (!chip->gpio_detect) goto skip_gpio; rc = gpio_request(pdata->battery_detect, dev_name(&client->dev)); if (rc) { dev_warn(&client->dev, "Failed to request gpio: %d\n", rc); chip->gpio_detect = false; goto skip_gpio; } rc = gpio_direction_input(pdata->battery_detect); if (rc) { dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc); gpio_free(pdata->battery_detect); chip->gpio_detect = false; goto skip_gpio; } irq = gpio_to_irq(pdata->battery_detect); if (irq <= 0) { dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq); gpio_free(pdata->battery_detect); chip->gpio_detect = false; goto skip_gpio; } rc = request_irq(irq, sbs_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&client->dev), &chip->power_supply); if (rc) { dev_warn(&client->dev, "Failed to request irq: %d\n", rc); gpio_free(pdata->battery_detect); chip->gpio_detect = false; goto skip_gpio; } chip->irq = irq; skip_gpio: rc = power_supply_register(&client->dev, &chip->power_supply); if (rc) { dev_err(&client->dev, "%s: Failed to register power supply\n", __func__); goto exit_psupply; } dev_info(&client->dev, "%s: battery gas gauge device registered\n", client->name); INIT_DELAYED_WORK(&chip->work, sbs_delayed_work); chip->enable_detection = true; return 0; exit_psupply: if (chip->irq) free_irq(chip->irq, &chip->power_supply); if (chip->gpio_detect) gpio_free(pdata->battery_detect); kfree(chip); exit_free_name: kfree(name); return rc; } static int __devexit sbs_remove(struct i2c_client *client) { struct sbs_info *chip = i2c_get_clientdata(client); if (chip->irq) free_irq(chip->irq, &chip->power_supply); if (chip->gpio_detect) gpio_free(chip->pdata->battery_detect); power_supply_unregister(&chip->power_supply); cancel_delayed_work_sync(&chip->work); kfree(chip->power_supply.name); kfree(chip); chip = NULL; return 0; } #if defined CONFIG_PM static int sbs_suspend(struct i2c_client *client, pm_message_t state) { struct sbs_info *chip = i2c_get_clientdata(client); s32 ret; if (chip->poll_time > 0) cancel_delayed_work_sync(&chip->work); /* write to manufacturer access with sleep command */ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr, MANUFACTURER_ACCESS_SLEEP); if (chip->is_present && ret < 0) return ret; return 0; } #else #define sbs_suspend NULL #endif /* any smbus transaction will wake up sbs */ #define sbs_resume NULL static const struct i2c_device_id sbs_id[] = { { "bq20z75", 0 }, { "sbs-battery", 1 }, {} }; MODULE_DEVICE_TABLE(i2c, sbs_id); static struct i2c_driver sbs_battery_driver = { .probe = sbs_probe, .remove = __devexit_p(sbs_remove), .suspend = sbs_suspend, .resume = sbs_resume, .id_table = sbs_id, .driver = { .name = "sbs-battery", .of_match_table = sbs_dt_ids, }, }; module_i2c_driver(sbs_battery_driver); MODULE_DESCRIPTION("SBS battery monitor driver"); MODULE_LICENSE("GPL");
gpl-2.0
dsb9938/DNA_JB_KERNEL
arch/arm/mach-ixp4xx/nas100d-setup.c
5005
8137
/* * arch/arm/mach-ixp4xx/nas100d-setup.c * * NAS 100d board-setup * * Copyright (C) 2008 Rod Whitby <rod@whitby.id.au> * * based on ixdp425-setup.c: * Copyright (C) 2003-2004 MontaVista Software, Inc. * based on nas100d-power.c: * Copyright (C) 2005 Tower Technologies * based on nas100d-io.c * Copyright (C) 2004 Karen Spearel * * Author: Alessandro Zummo <a.zummo@towertech.it> * Author: Rod Whitby <rod@whitby.id.au> * Maintainers: http://www.nslu2-linux.org/ * */ #include <linux/gpio.h> #include <linux/if_ether.h> #include <linux/irq.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/leds.h> #include <linux/reboot.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/io.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #define NAS100D_SDA_PIN 5 #define NAS100D_SCL_PIN 6 /* Buttons */ #define NAS100D_PB_GPIO 14 /* power button */ #define NAS100D_RB_GPIO 4 /* reset button */ /* Power control */ #define NAS100D_PO_GPIO 12 /* power off */ /* LEDs */ #define NAS100D_LED_WLAN_GPIO 0 #define NAS100D_LED_DISK_GPIO 3 #define NAS100D_LED_PWR_GPIO 15 static struct flash_platform_data nas100d_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource nas100d_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device nas100d_flash = { .name = "IXP4XX-Flash", .id = 0, .dev.platform_data = &nas100d_flash_data, .num_resources = 1, .resource = &nas100d_flash_resource, }; static struct i2c_board_info __initdata nas100d_i2c_board_info [] = { { I2C_BOARD_INFO("pcf8563", 0x51), }, }; static struct gpio_led nas100d_led_pins[] = { { .name = "nas100d:green:wlan", .gpio = NAS100D_LED_WLAN_GPIO, .active_low = true, }, { .name = "nas100d:blue:power", /* (off=flashing) */ .gpio = NAS100D_LED_PWR_GPIO, .active_low = true, }, { .name = "nas100d:yellow:disk", .gpio = NAS100D_LED_DISK_GPIO, .active_low = true, }, }; static struct gpio_led_platform_data nas100d_led_data = { .num_leds = ARRAY_SIZE(nas100d_led_pins), .leds = nas100d_led_pins, }; static struct platform_device nas100d_leds = { .name = "leds-gpio", .id = -1, .dev.platform_data = &nas100d_led_data, }; static struct i2c_gpio_platform_data nas100d_i2c_gpio_data = { .sda_pin = NAS100D_SDA_PIN, .scl_pin = NAS100D_SCL_PIN, }; static struct platform_device nas100d_i2c_gpio = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &nas100d_i2c_gpio_data, }, }; static struct resource nas100d_uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, } }; static struct plat_serial8250_port nas100d_uart_data[] = { { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { } }; static struct platform_device nas100d_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = nas100d_uart_data, .num_resources = 2, .resource = nas100d_uart_resources, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info nas100d_plat_eth[] = { { .phy = 0, .rxq = 3, .txreadyq = 20, } }; static struct platform_device nas100d_eth[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = nas100d_plat_eth, } }; static struct platform_device *nas100d_devices[] __initdata = { &nas100d_i2c_gpio, &nas100d_flash, &nas100d_leds, &nas100d_eth[0], }; static void nas100d_power_off(void) { /* This causes the box to drop the power and go dead. */ /* enable the pwr cntl gpio */ gpio_line_config(NAS100D_PO_GPIO, IXP4XX_GPIO_OUT); /* do the deed */ gpio_line_set(NAS100D_PO_GPIO, IXP4XX_GPIO_HIGH); } /* This is used to make sure the power-button pusher is serious. The button * must be held until the value of this counter reaches zero. */ static int power_button_countdown; /* Must hold the button down for at least this many counts to be processed */ #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */ static void nas100d_power_handler(unsigned long data); static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler, 0, 0); static void nas100d_power_handler(unsigned long data) { /* This routine is called twice per second to check the * state of the power button. */ if (gpio_get_value(NAS100D_PB_GPIO)) { /* IO Pin is 1 (button pushed) */ if (power_button_countdown > 0) power_button_countdown--; } else { /* Done on button release, to allow for auto-power-on mods. */ if (power_button_countdown == 0) { /* Signal init to do the ctrlaltdel action, * this will bypass init if it hasn't started * and do a kernel_restart. */ ctrl_alt_del(); /* Change the state of the power LED to "blink" */ gpio_line_set(NAS100D_LED_PWR_GPIO, IXP4XX_GPIO_LOW); } else { power_button_countdown = PBUTTON_HOLDDOWN_COUNT; } } mod_timer(&nas100d_power_timer, jiffies + msecs_to_jiffies(500)); } static irqreturn_t nas100d_reset_handler(int irq, void *dev_id) { /* This is the paper-clip reset, it shuts the machine down directly. */ machine_power_off(); return IRQ_HANDLED; } static void __init nas100d_init(void) { uint8_t __iomem *f; int i; ixp4xx_sys_init(); /* gpio 14 and 15 are _not_ clocks */ *IXP4XX_GPIO_GPCLKR = 0; nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); nas100d_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; i2c_register_board_info(0, nas100d_i2c_board_info, ARRAY_SIZE(nas100d_i2c_board_info)); /* * This is only useful on a modified machine, but it is valuable * to have it first in order to see debug messages, and so that * it does *not* get removed if platform_add_devices fails! */ (void)platform_device_register(&nas100d_uart); platform_add_devices(nas100d_devices, ARRAY_SIZE(nas100d_devices)); pm_power_off = nas100d_power_off; if (request_irq(gpio_to_irq(NAS100D_RB_GPIO), &nas100d_reset_handler, IRQF_DISABLED | IRQF_TRIGGER_LOW, "NAS100D reset button", NULL) < 0) { printk(KERN_DEBUG "Reset Button IRQ %d not available\n", gpio_to_irq(NAS100D_RB_GPIO)); } /* The power button on the Iomega NAS100d is on GPIO 14, but * it cannot handle interrupts on that GPIO line. So we'll * have to poll it with a kernel timer. */ /* Make sure that the power button GPIO is set up as an input */ gpio_line_config(NAS100D_PB_GPIO, IXP4XX_GPIO_IN); /* Set the initial value for the power button IRQ handler */ power_button_countdown = PBUTTON_HOLDDOWN_COUNT; mod_timer(&nas100d_power_timer, jiffies + msecs_to_jiffies(500)); /* * Map in a portion of the flash and read the MAC address. * Since it is stored in BE in the flash itself, we need to * byteswap it if we're in LE mode. */ f = ioremap(IXP4XX_EXP_BUS_BASE(0), 0x1000000); if (f) { for (i = 0; i < 6; i++) #ifdef __ARMEB__ nas100d_plat_eth[0].hwaddr[i] = readb(f + 0xFC0FD8 + i); #else nas100d_plat_eth[0].hwaddr[i] = readb(f + 0xFC0FD8 + (i^3)); #endif iounmap(f); } printk(KERN_INFO "NAS100D: Using MAC address %pM for port 0\n", nas100d_plat_eth[0].hwaddr); } MACHINE_START(NAS100D, "Iomega NAS 100d") /* Maintainer: www.nslu2-linux.org */ .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = nas100d_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END
gpl-2.0
flar2/jewel-ElementalX
drivers/input/misc/cma3000_d0x.c
5005
9517
/* * VTI CMA3000_D0x Accelerometer driver * * Copyright (C) 2010 Texas Instruments * Author: Hemanth V <hemanthv@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/cma3000.h> #include <linux/module.h> #include "cma3000_d0x.h" #define CMA3000_WHOAMI 0x00 #define CMA3000_REVID 0x01 #define CMA3000_CTRL 0x02 #define CMA3000_STATUS 0x03 #define CMA3000_RSTR 0x04 #define CMA3000_INTSTATUS 0x05 #define CMA3000_DOUTX 0x06 #define CMA3000_DOUTY 0x07 #define CMA3000_DOUTZ 0x08 #define CMA3000_MDTHR 0x09 #define CMA3000_MDFFTMR 0x0A #define CMA3000_FFTHR 0x0B #define CMA3000_RANGE2G (1 << 7) #define CMA3000_RANGE8G (0 << 7) #define CMA3000_BUSI2C (0 << 4) #define CMA3000_MODEMASK (7 << 1) #define CMA3000_GRANGEMASK (1 << 7) #define CMA3000_STATUS_PERR 1 #define CMA3000_INTSTATUS_FFDET (1 << 2) /* Settling time delay in ms */ #define CMA3000_SETDELAY 30 /* Delay for clearing interrupt in us */ #define CMA3000_INTDELAY 44 /* * Bit weights in mg for bit 0, other bits need * multipy factor 2^n. Eight bit is the sign bit. */ #define BIT_TO_2G 18 #define BIT_TO_8G 71 struct cma3000_accl_data { const struct cma3000_bus_ops *bus_ops; const struct cma3000_platform_data *pdata; struct device *dev; struct input_dev *input_dev; int bit_to_mg; int irq; int g_range; u8 mode; struct mutex mutex; bool opened; bool suspended; }; #define CMA3000_READ(data, reg, msg) \ (data->bus_ops->read(data->dev, reg, msg)) #define CMA3000_SET(data, reg, val, msg) \ ((data)->bus_ops->write(data->dev, reg, val, msg)) /* * Conversion for each of the eight modes to g, depending * on G range i.e 2G or 8G. Some modes always operate in * 8G. */ static int mode_to_mg[8][2] = { { 0, 0 }, { BIT_TO_8G, BIT_TO_2G }, { BIT_TO_8G, BIT_TO_2G }, { BIT_TO_8G, BIT_TO_8G }, { BIT_TO_8G, BIT_TO_8G }, { BIT_TO_8G, BIT_TO_2G }, { BIT_TO_8G, BIT_TO_2G }, { 0, 0}, }; static void decode_mg(struct cma3000_accl_data *data, int *datax, int *datay, int *dataz) { /* Data in 2's complement, convert to mg */ *datax = ((s8)*datax) * data->bit_to_mg; *datay = ((s8)*datay) * data->bit_to_mg; *dataz = ((s8)*dataz) * data->bit_to_mg; } static irqreturn_t cma3000_thread_irq(int irq, void *dev_id) { struct cma3000_accl_data *data = dev_id; int datax, datay, dataz, intr_status; u8 ctrl, mode, range; intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status"); if (intr_status < 0) return IRQ_NONE; /* Check if free fall is detected, report immediately */ if (intr_status & CMA3000_INTSTATUS_FFDET) { input_report_abs(data->input_dev, ABS_MISC, 1); input_sync(data->input_dev); } else { input_report_abs(data->input_dev, ABS_MISC, 0); } datax = CMA3000_READ(data, CMA3000_DOUTX, "X"); datay = CMA3000_READ(data, CMA3000_DOUTY, "Y"); dataz = CMA3000_READ(data, CMA3000_DOUTZ, "Z"); ctrl = CMA3000_READ(data, CMA3000_CTRL, "ctrl"); mode = (ctrl & CMA3000_MODEMASK) >> 1; range = (ctrl & CMA3000_GRANGEMASK) >> 7; data->bit_to_mg = mode_to_mg[mode][range]; /* Interrupt not for this device */ if (data->bit_to_mg == 0) return IRQ_NONE; /* Decode register values to milli g */ decode_mg(data, &datax, &datay, &dataz); input_report_abs(data->input_dev, ABS_X, datax); input_report_abs(data->input_dev, ABS_Y, datay); input_report_abs(data->input_dev, ABS_Z, dataz); input_sync(data->input_dev); return IRQ_HANDLED; } static int cma3000_reset(struct cma3000_accl_data *data) { int val; /* Reset sequence */ CMA3000_SET(data, CMA3000_RSTR, 0x02, "Reset"); CMA3000_SET(data, CMA3000_RSTR, 0x0A, "Reset"); CMA3000_SET(data, CMA3000_RSTR, 0x04, "Reset"); /* Settling time delay */ mdelay(10); val = CMA3000_READ(data, CMA3000_STATUS, "Status"); if (val < 0) { dev_err(data->dev, "Reset failed\n"); return val; } if (val & CMA3000_STATUS_PERR) { dev_err(data->dev, "Parity Error\n"); return -EIO; } return 0; } static int cma3000_poweron(struct cma3000_accl_data *data) { const struct cma3000_platform_data *pdata = data->pdata; u8 ctrl = 0; int ret; if (data->g_range == CMARANGE_2G) { ctrl = (data->mode << 1) | CMA3000_RANGE2G; } else if (data->g_range == CMARANGE_8G) { ctrl = (data->mode << 1) | CMA3000_RANGE8G; } else { dev_info(data->dev, "Invalid G range specified, assuming 8G\n"); ctrl = (data->mode << 1) | CMA3000_RANGE8G; } ctrl |= data->bus_ops->ctrl_mod; CMA3000_SET(data, CMA3000_MDTHR, pdata->mdthr, "Motion Detect Threshold"); CMA3000_SET(data, CMA3000_MDFFTMR, pdata->mdfftmr, "Time register"); CMA3000_SET(data, CMA3000_FFTHR, pdata->ffthr, "Free fall threshold"); ret = CMA3000_SET(data, CMA3000_CTRL, ctrl, "Mode setting"); if (ret < 0) return -EIO; msleep(CMA3000_SETDELAY); return 0; } static int cma3000_poweroff(struct cma3000_accl_data *data) { int ret; ret = CMA3000_SET(data, CMA3000_CTRL, CMAMODE_POFF, "Mode setting"); msleep(CMA3000_SETDELAY); return ret; } static int cma3000_open(struct input_dev *input_dev) { struct cma3000_accl_data *data = input_get_drvdata(input_dev); mutex_lock(&data->mutex); if (!data->suspended) cma3000_poweron(data); data->opened = true; mutex_unlock(&data->mutex); return 0; } static void cma3000_close(struct input_dev *input_dev) { struct cma3000_accl_data *data = input_get_drvdata(input_dev); mutex_lock(&data->mutex); if (!data->suspended) cma3000_poweroff(data); data->opened = false; mutex_unlock(&data->mutex); } void cma3000_suspend(struct cma3000_accl_data *data) { mutex_lock(&data->mutex); if (!data->suspended && data->opened) cma3000_poweroff(data); data->suspended = true; mutex_unlock(&data->mutex); } EXPORT_SYMBOL(cma3000_suspend); void cma3000_resume(struct cma3000_accl_data *data) { mutex_lock(&data->mutex); if (data->suspended && data->opened) cma3000_poweron(data); data->suspended = false; mutex_unlock(&data->mutex); } EXPORT_SYMBOL(cma3000_resume); struct cma3000_accl_data *cma3000_init(struct device *dev, int irq, const struct cma3000_bus_ops *bops) { const struct cma3000_platform_data *pdata = dev->platform_data; struct cma3000_accl_data *data; struct input_dev *input_dev; int rev; int error; if (!pdata) { dev_err(dev, "platform data not found\n"); error = -EINVAL; goto err_out; } /* if no IRQ return error */ if (irq == 0) { error = -EINVAL; goto err_out; } data = kzalloc(sizeof(struct cma3000_accl_data), GFP_KERNEL); input_dev = input_allocate_device(); if (!data || !input_dev) { error = -ENOMEM; goto err_free_mem; } data->dev = dev; data->input_dev = input_dev; data->bus_ops = bops; data->pdata = pdata; data->irq = irq; mutex_init(&data->mutex); data->mode = pdata->mode; if (data->mode < CMAMODE_DEFAULT || data->mode > CMAMODE_POFF) { data->mode = CMAMODE_MOTDET; dev_warn(dev, "Invalid mode specified, assuming Motion Detect\n"); } data->g_range = pdata->g_range; if (data->g_range != CMARANGE_2G && data->g_range != CMARANGE_8G) { dev_info(dev, "Invalid G range specified, assuming 8G\n"); data->g_range = CMARANGE_8G; } input_dev->name = "cma3000-accelerometer"; input_dev->id.bustype = bops->bustype; input_dev->open = cma3000_open; input_dev->close = cma3000_close; __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_X, -data->g_range, data->g_range, pdata->fuzz_x, 0); input_set_abs_params(input_dev, ABS_Y, -data->g_range, data->g_range, pdata->fuzz_y, 0); input_set_abs_params(input_dev, ABS_Z, -data->g_range, data->g_range, pdata->fuzz_z, 0); input_set_abs_params(input_dev, ABS_MISC, 0, 1, 0, 0); input_set_drvdata(input_dev, data); error = cma3000_reset(data); if (error) goto err_free_mem; rev = CMA3000_READ(data, CMA3000_REVID, "Revid"); if (rev < 0) { error = rev; goto err_free_mem; } pr_info("CMA3000 Accelerometer: Revision %x\n", rev); error = request_threaded_irq(irq, NULL, cma3000_thread_irq, pdata->irqflags | IRQF_ONESHOT, "cma3000_d0x", data); if (error) { dev_err(dev, "request_threaded_irq failed\n"); goto err_free_mem; } error = input_register_device(data->input_dev); if (error) { dev_err(dev, "Unable to register input device\n"); goto err_free_irq; } return data; err_free_irq: free_irq(irq, data); err_free_mem: input_free_device(input_dev); kfree(data); err_out: return ERR_PTR(error); } EXPORT_SYMBOL(cma3000_init); void cma3000_exit(struct cma3000_accl_data *data) { free_irq(data->irq, data); input_unregister_device(data->input_dev); kfree(data); } EXPORT_SYMBOL(cma3000_exit); MODULE_DESCRIPTION("CMA3000-D0x Accelerometer Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
gpl-2.0
playfulgod/Kernel_AS85-LG-Ignite
drivers/input/touchscreen/elo.c
9101
9104
/* * Elo serial touchscreen driver * * Copyright (c) 2004 Vojtech Pavlik */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * This driver can handle serial Elo touchscreens using either the Elo standard * 'E271-2210' 10-byte protocol, Elo legacy 'E281A-4002' 6-byte protocol, Elo * legacy 'E271-140' 4-byte protocol and Elo legacy 'E261-280' 3-byte protocol. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #include <linux/ctype.h> #define DRIVER_DESC "Elo serial touchscreen driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define ELO_MAX_LENGTH 10 #define ELO10_PACKET_LEN 8 #define ELO10_TOUCH 0x03 #define ELO10_PRESSURE 0x80 #define ELO10_LEAD_BYTE 'U' #define ELO10_ID_CMD 'i' #define ELO10_TOUCH_PACKET 'T' #define ELO10_ACK_PACKET 'A' #define ELI10_ID_PACKET 'I' /* * Per-touchscreen data. */ struct elo { struct input_dev *dev; struct serio *serio; struct mutex cmd_mutex; struct completion cmd_done; int id; int idx; unsigned char expected_packet; unsigned char csum; unsigned char data[ELO_MAX_LENGTH]; unsigned char response[ELO10_PACKET_LEN]; char phys[32]; }; static void elo_process_data_10(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: elo->csum = 0xaa; if (data != ELO10_LEAD_BYTE) { dev_dbg(&elo->serio->dev, "unsynchronized data: 0x%02x\n", data); elo->idx = 0; } break; case 9: elo->idx = 0; if (data != elo->csum) { dev_dbg(&elo->serio->dev, "bad checksum: 0x%02x, expected 0x%02x\n", data, elo->csum); break; } if (elo->data[1] != elo->expected_packet) { if (elo->data[1] != ELO10_TOUCH_PACKET) dev_dbg(&elo->serio->dev, "unexpected packet: 0x%02x\n", elo->data[1]); break; } if (likely(elo->data[1] == ELO10_TOUCH_PACKET)) { input_report_abs(dev, ABS_X, (elo->data[4] << 8) | elo->data[3]); input_report_abs(dev, ABS_Y, (elo->data[6] << 8) | elo->data[5]); if (elo->data[2] & ELO10_PRESSURE) input_report_abs(dev, ABS_PRESSURE, (elo->data[8] << 8) | elo->data[7]); input_report_key(dev, BTN_TOUCH, elo->data[2] & ELO10_TOUCH); input_sync(dev); } else if (elo->data[1] == ELO10_ACK_PACKET) { if (elo->data[2] == '0') elo->expected_packet = ELO10_TOUCH_PACKET; complete(&elo->cmd_done); } else { memcpy(elo->response, &elo->data[1], ELO10_PACKET_LEN); elo->expected_packet = ELO10_ACK_PACKET; } break; } elo->csum += data; } static void elo_process_data_6(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: if ((data & 0xc0) != 0xc0) elo->idx = 0; break; case 1: if ((data & 0xc0) != 0x80) elo->idx = 0; break; case 2: if ((data & 0xc0) != 0x40) elo->idx = 0; break; case 3: if (data & 0xc0) { elo->idx = 0; break; } input_report_abs(dev, ABS_X, ((elo->data[0] & 0x3f) << 6) | (elo->data[1] & 0x3f)); input_report_abs(dev, ABS_Y, ((elo->data[2] & 0x3f) << 6) | (elo->data[3] & 0x3f)); if (elo->id == 2) { input_report_key(dev, BTN_TOUCH, 1); input_sync(dev); elo->idx = 0; } break; case 4: if (data) { input_sync(dev); elo->idx = 0; } break; case 5: if ((data & 0xf0) == 0) { input_report_abs(dev, ABS_PRESSURE, elo->data[5]); input_report_key(dev, BTN_TOUCH, !!elo->data[5]); } input_sync(dev); elo->idx = 0; break; } } static void elo_process_data_3(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: if ((data & 0x7f) != 0x01) elo->idx = 0; break; case 2: input_report_key(dev, BTN_TOUCH, !(elo->data[1] & 0x80)); input_report_abs(dev, ABS_X, elo->data[1]); input_report_abs(dev, ABS_Y, elo->data[2]); input_sync(dev); elo->idx = 0; break; } } static irqreturn_t elo_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct elo *elo = serio_get_drvdata(serio); switch (elo->id) { case 0: elo_process_data_10(elo, data); break; case 1: case 2: elo_process_data_6(elo, data); break; case 3: elo_process_data_3(elo, data); break; } return IRQ_HANDLED; } static int elo_command_10(struct elo *elo, unsigned char *packet) { int rc = -1; int i; unsigned char csum = 0xaa + ELO10_LEAD_BYTE; mutex_lock(&elo->cmd_mutex); serio_pause_rx(elo->serio); elo->expected_packet = toupper(packet[0]); init_completion(&elo->cmd_done); serio_continue_rx(elo->serio); if (serio_write(elo->serio, ELO10_LEAD_BYTE)) goto out; for (i = 0; i < ELO10_PACKET_LEN; i++) { csum += packet[i]; if (serio_write(elo->serio, packet[i])) goto out; } if (serio_write(elo->serio, csum)) goto out; wait_for_completion_timeout(&elo->cmd_done, HZ); if (elo->expected_packet == ELO10_TOUCH_PACKET) { /* We are back in reporting mode, the command was ACKed */ memcpy(packet, elo->response, ELO10_PACKET_LEN); rc = 0; } out: mutex_unlock(&elo->cmd_mutex); return rc; } static int elo_setup_10(struct elo *elo) { static const char *elo_types[] = { "Accu", "Dura", "Intelli", "Carroll" }; struct input_dev *dev = elo->dev; unsigned char packet[ELO10_PACKET_LEN] = { ELO10_ID_CMD }; if (elo_command_10(elo, packet)) return -1; dev->id.version = (packet[5] << 8) | packet[4]; input_set_abs_params(dev, ABS_X, 96, 4000, 0, 0); input_set_abs_params(dev, ABS_Y, 96, 4000, 0, 0); if (packet[3] & ELO10_PRESSURE) input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); dev_info(&elo->serio->dev, "%sTouch touchscreen, fw: %02x.%02x, features: 0x%02x, controller: 0x%02x\n", elo_types[(packet[1] -'0') & 0x03], packet[5], packet[4], packet[3], packet[7]); return 0; } /* * elo_disconnect() is the opposite of elo_connect() */ static void elo_disconnect(struct serio *serio) { struct elo *elo = serio_get_drvdata(serio); input_get_device(elo->dev); input_unregister_device(elo->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(elo->dev); kfree(elo); } /* * elo_connect() is the routine that is called when someone adds a * new serio device that supports Gunze protocol and registers it as * an input device. */ static int elo_connect(struct serio *serio, struct serio_driver *drv) { struct elo *elo; struct input_dev *input_dev; int err; elo = kzalloc(sizeof(struct elo), GFP_KERNEL); input_dev = input_allocate_device(); if (!elo || !input_dev) { err = -ENOMEM; goto fail1; } elo->serio = serio; elo->id = serio->id.id; elo->dev = input_dev; elo->expected_packet = ELO10_TOUCH_PACKET; mutex_init(&elo->cmd_mutex); init_completion(&elo->cmd_done); snprintf(elo->phys, sizeof(elo->phys), "%s/input0", serio->phys); input_dev->name = "Elo Serial TouchScreen"; input_dev->phys = elo->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_ELO; input_dev->id.product = elo->id; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); serio_set_drvdata(serio, elo); err = serio_open(serio, drv); if (err) goto fail2; switch (elo->id) { case 0: /* 10-byte protocol */ if (elo_setup_10(elo)) goto fail3; break; case 1: /* 6-byte protocol */ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0); case 2: /* 4-byte protocol */ input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0); input_set_abs_params(input_dev, ABS_Y, 96, 4000, 0, 0); break; case 3: /* 3-byte protocol */ input_set_abs_params(input_dev, ABS_X, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 255, 0, 0); break; } err = input_register_device(elo->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(elo); return err; } /* * The serio driver structure. */ static struct serio_device_id elo_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_ELO, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, elo_serio_ids); static struct serio_driver elo_drv = { .driver = { .name = "elo", }, .description = DRIVER_DESC, .id_table = elo_serio_ids, .interrupt = elo_interrupt, .connect = elo_connect, .disconnect = elo_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init elo_init(void) { return serio_register_driver(&elo_drv); } static void __exit elo_exit(void) { serio_unregister_driver(&elo_drv); } module_init(elo_init); module_exit(elo_exit);
gpl-2.0
drsn0w/android_kernel_zte_msm7627
drivers/misc/iwmc3200top/log.c
9101
8042
/* * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver * drivers/misc/iwmc3200top/log.c * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> * - * */ #include <linux/kernel.h> #include <linux/mmc/sdio_func.h> #include <linux/slab.h> #include <linux/ctype.h> #include "fw-msg.h" #include "iwmc3200top.h" #include "log.h" /* Maximal hexadecimal string size of the FW memdump message */ #define LOG_MSG_SIZE_MAX 12400 /* iwmct_logdefs is a global used by log macros */ u8 iwmct_logdefs[LOG_SRC_MAX]; static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX]; static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask) { int i; if (src < size) logdefs[src] = logmask; else if (src == LOG_SRC_ALL) for (i = 0; i < size; i++) logdefs[i] = logmask; else return -1; return 0; } int iwmct_log_set_filter(u8 src, u8 logmask) { return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask); } int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return _log_set_log_filter(iwmct_fw_logdefs, FW_LOG_SRC_MAX, src, logmask); } static int log_msg_format_hex(char *str, int slen, u8 *ibuf, int ilen, char *pref) { int pos = 0; int i; int len; for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++) str[pos] = pref[i]; for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++) len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]); if (i < ilen) return -1; return 0; } /* NOTE: This function is not thread safe. Currently it's called only from sdio rx worker - no race there */ void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len) { struct top_msg *msg; static char logbuf[LOG_MSG_SIZE_MAX]; msg = (struct top_msg *)buf; if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) { LOG_ERROR(priv, FW_MSG, "Log message from TOP " "is too short %d (expected %zd)\n", len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); return; } if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] & BIT(msg->u.log.log_hdr.severity)) || !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity))) return; switch (msg->hdr.category) { case COMM_CATEGORY_TESTABILITY: if (!(iwmct_logdefs[LOG_SRC_TST] & BIT(msg->u.log.log_hdr.severity))) return; if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr), "<TST>")) LOG_WARNING(priv, TST, "TOP TST message is too long, truncating..."); LOG_WARNING(priv, TST, "%s\n", logbuf); break; case COMM_CATEGORY_DEBUG: if (msg->hdr.opcode == OP_DBG_ZSTR_MSG) LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>", ((u8 *)msg) + sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); else { if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr), "<DBG>")) LOG_WARNING(priv, FW_MSG, "TOP DBG message is too long," "truncating..."); LOG_WARNING(priv, FW_MSG, "%s\n", logbuf); } break; default: break; } } static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size) { int i, pos, len; for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) { len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,", i, logdefs[i]); pos += len; } buf[pos-1] = '\n'; buf[pos] = '\0'; if (i < logdefsz) return -1; return 0; } int log_get_filter_str(char *buf, int size) { return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size); } int log_get_fw_filter_str(char *buf, int size) { return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size); } #define HEXADECIMAL_RADIX 16 #define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */ ssize_t show_iwmct_log_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwmct_priv *priv = dev_get_drvdata(d); char *str_buf; int buf_size; ssize_t ret; buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1; str_buf = kzalloc(buf_size, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %d bytes\n", buf_size); ret = -ENOMEM; goto exit; } if (log_get_filter_str(str_buf, buf_size) < 0) { ret = -EINVAL; goto exit; } ret = sprintf(buf, "%s", str_buf); exit: kfree(str_buf); return ret; } ssize_t store_iwmct_log_level(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwmct_priv *priv = dev_get_drvdata(d); char *token, *str_buf = NULL; long val; ssize_t ret = count; u8 src, mask; if (!count) goto exit; str_buf = kzalloc(count, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", count); ret = -ENOMEM; goto exit; } memcpy(str_buf, buf, count); while ((token = strsep(&str_buf, ",")) != NULL) { while (isspace(*token)) ++token; if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { LOG_ERROR(priv, DEBUGFS, "failed to convert string to long %s\n", token); ret = -EINVAL; goto exit; } mask = val & 0xFF; src = (val & 0XFF00) >> 8; iwmct_log_set_filter(src, mask); } exit: kfree(str_buf); return ret; } ssize_t show_iwmct_log_level_fw(struct device *d, struct device_attribute *attr, char *buf) { struct iwmct_priv *priv = dev_get_drvdata(d); char *str_buf; int buf_size; ssize_t ret; buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2; str_buf = kzalloc(buf_size, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %d bytes\n", buf_size); ret = -ENOMEM; goto exit; } if (log_get_fw_filter_str(str_buf, buf_size) < 0) { ret = -EINVAL; goto exit; } ret = sprintf(buf, "%s", str_buf); exit: kfree(str_buf); return ret; } ssize_t store_iwmct_log_level_fw(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwmct_priv *priv = dev_get_drvdata(d); struct top_msg cmd; char *token, *str_buf = NULL; ssize_t ret = count; u16 cmdlen = 0; int i; long val; u8 src, mask; if (!count) goto exit; str_buf = kzalloc(count, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", count); ret = -ENOMEM; goto exit; } memcpy(str_buf, buf, count); cmd.hdr.type = COMM_TYPE_H2D; cmd.hdr.category = COMM_CATEGORY_DEBUG; cmd.hdr.opcode = CMD_DBG_LOG_LEVEL; for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) && (i < FW_LOG_SRC_MAX); i++) { while (isspace(*token)) ++token; if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { LOG_ERROR(priv, DEBUGFS, "failed to convert string to long %s\n", token); ret = -EINVAL; goto exit; } mask = val & 0xFF; /* LSB */ src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */ iwmct_log_set_fw_filter(src, mask); cmd.u.logdefs[i].logsource = src; cmd.u.logdefs[i].sevmask = mask; } cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0])); cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr)); ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen); if (ret) { LOG_ERROR(priv, DEBUGFS, "Failed to send %d bytes of fwcmd, ret=%zd\n", cmdlen, ret); goto exit; } else LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen); ret = count; exit: kfree(str_buf); return ret; }
gpl-2.0
charles1018/Nexus_5
arch/powerpc/platforms/8xx/ep88xc.c
11661
4431
/* * Platform setup for the Embedded Planet EP88xC board * * Author: Scott Wood <scottwood@freescale.com> * Copyright 2007 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/udbg.h> #include <asm/cpm1.h> #include "mpc8xx.h" struct cpm_pin { int port, pin, flags; }; static struct cpm_pin ep88xc_pins[] = { /* SMC1 */ {1, 24, CPM_PIN_INPUT}, /* RX */ {1, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ /* SCC2 */ {0, 12, CPM_PIN_INPUT}, /* TX */ {0, 13, CPM_PIN_INPUT}, /* RX */ {2, 8, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CD */ {2, 9, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CTS */ {2, 14, CPM_PIN_INPUT}, /* RTS */ /* MII1 */ {0, 0, CPM_PIN_INPUT}, {0, 1, CPM_PIN_INPUT}, {0, 2, CPM_PIN_INPUT}, {0, 3, CPM_PIN_INPUT}, {0, 4, CPM_PIN_OUTPUT}, {0, 10, CPM_PIN_OUTPUT}, {0, 11, CPM_PIN_OUTPUT}, {1, 19, CPM_PIN_INPUT}, {1, 31, CPM_PIN_INPUT}, {2, 12, CPM_PIN_INPUT}, {2, 13, CPM_PIN_INPUT}, {3, 8, CPM_PIN_INPUT}, {4, 30, CPM_PIN_OUTPUT}, {4, 31, CPM_PIN_OUTPUT}, /* MII2 */ {4, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 16, CPM_PIN_OUTPUT}, {4, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 21, CPM_PIN_OUTPUT}, {4, 22, CPM_PIN_OUTPUT}, {4, 23, CPM_PIN_OUTPUT}, {4, 24, CPM_PIN_OUTPUT}, {4, 25, CPM_PIN_OUTPUT}, {4, 26, CPM_PIN_OUTPUT}, {4, 27, CPM_PIN_OUTPUT}, {4, 28, CPM_PIN_OUTPUT}, {4, 29, CPM_PIN_OUTPUT}, /* USB */ {0, 6, CPM_PIN_INPUT}, /* CLK2 */ {0, 14, CPM_PIN_INPUT}, /* USBOE */ {0, 15, CPM_PIN_INPUT}, /* USBRXD */ {2, 6, CPM_PIN_OUTPUT}, /* USBTXN */ {2, 7, CPM_PIN_OUTPUT}, /* USBTXP */ {2, 10, CPM_PIN_INPUT}, /* USBRXN */ {2, 11, CPM_PIN_INPUT}, /* USBRXP */ /* Misc */ {1, 26, CPM_PIN_INPUT}, /* BRGO2 */ {1, 27, CPM_PIN_INPUT}, /* BRGO1 */ }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(ep88xc_pins); i++) { struct cpm_pin *pin = &ep88xc_pins[i]; cpm1_set_pin(pin->port, pin->pin, pin->flags); } cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_TX); /* USB */ cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); } static u8 __iomem *ep88xc_bcsr; #define BCSR7_SCC2_ENABLE 0x10 #define BCSR8_PHY1_ENABLE 0x80 #define BCSR8_PHY1_POWER 0x40 #define BCSR8_PHY2_ENABLE 0x20 #define BCSR8_PHY2_POWER 0x10 #define BCSR9_USB_ENABLE 0x80 #define BCSR9_USB_POWER 0x40 #define BCSR9_USB_HOST 0x20 #define BCSR9_USB_FULL_SPEED_TARGET 0x10 static void __init ep88xc_setup_arch(void) { struct device_node *np; cpm_reset(); init_ioports(); np = of_find_compatible_node(NULL, NULL, "fsl,ep88xc-bcsr"); if (!np) { printk(KERN_CRIT "Could not find fsl,ep88xc-bcsr node\n"); return; } ep88xc_bcsr = of_iomap(np, 0); of_node_put(np); if (!ep88xc_bcsr) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } setbits8(&ep88xc_bcsr[7], BCSR7_SCC2_ENABLE); setbits8(&ep88xc_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER | BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER); } static int __init ep88xc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,ep88xc"); } static struct of_device_id __initdata of_bus_ids[] = { { .name = "soc", }, { .name = "cpm", }, { .name = "localbus", }, {}, }; static int __init declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(ep88xc, declare_of_platform_devices); define_machine(ep88xc) { .name = "Embedded Planet EP88xC", .probe = ep88xc_probe, .setup_arch = ep88xc_setup_arch, .init_IRQ = mpc8xx_pics_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, .set_rtc_time = mpc8xx_set_rtc_time, .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, };
gpl-2.0
SlimRoms/kernel_sony_msm8x60
arch/arm/vfp/vfpdouble.c
12429
29135
/* * linux/arch/arm/vfp/vfpdouble.c * * This code is derived in part from John R. Housers softfloat library, which * carries the following notice: * * =========================================================================== * This C source file is part of the SoftFloat IEC/IEEE Floating-point * Arithmetic Package, Release 2. * * Written by John R. Hauser. This work was made possible in part by the * International Computer Science Institute, located at Suite 600, 1947 Center * Street, Berkeley, California 94704. Funding was partially provided by the * National Science Foundation under grant MIP-9311980. The original version * of this code was written as part of a project to build a fixed-point vector * processor in collaboration with the University of California at Berkeley, * overseen by Profs. Nelson Morgan and John Wawrzynek. More information * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ * arithmetic/softfloat.html'. * * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. * * Derivative works are acceptable, even for commercial purposes, so long as * (1) they include prominent notice that the work is derivative, and (2) they * include prominent notice akin to these three paragraphs for those parts of * this code that are retained. * =========================================================================== */ #include <linux/kernel.h> #include <linux/bitops.h> #include <asm/div64.h> #include <asm/vfp.h> #include "vfpinstr.h" #include "vfp.h" static struct vfp_double vfp_double_default_qnan = { .exponent = 2047, .sign = 0, .significand = VFP_DOUBLE_SIGNIFICAND_QNAN, }; static void vfp_double_dump(const char *str, struct vfp_double *d) { pr_debug("VFP: %s: sign=%d exponent=%d significand=%016llx\n", str, d->sign != 0, d->exponent, d->significand); } static void vfp_double_normalise_denormal(struct vfp_double *vd) { int bits = 31 - fls(vd->significand >> 32); if (bits == 31) bits = 63 - fls(vd->significand); vfp_double_dump("normalise_denormal: in", vd); if (bits) { vd->exponent -= bits - 1; vd->significand <<= bits; } vfp_double_dump("normalise_denormal: out", vd); } u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func) { u64 significand, incr; int exponent, shift, underflow; u32 rmode; vfp_double_dump("pack: in", vd); /* * Infinities and NaNs are a special case. */ if (vd->exponent == 2047 && (vd->significand == 0 || exceptions)) goto pack; /* * Special-case zero. */ if (vd->significand == 0) { vd->exponent = 0; goto pack; } exponent = vd->exponent; significand = vd->significand; shift = 32 - fls(significand >> 32); if (shift == 32) shift = 64 - fls(significand); if (shift) { exponent -= shift; significand <<= shift; } #ifdef DEBUG vd->exponent = exponent; vd->significand = significand; vfp_double_dump("pack: normalised", vd); #endif /* * Tiny number? */ underflow = exponent < 0; if (underflow) { significand = vfp_shiftright64jamming(significand, -exponent); exponent = 0; #ifdef DEBUG vd->exponent = exponent; vd->significand = significand; vfp_double_dump("pack: tiny number", vd); #endif if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1))) underflow = 0; } /* * Select rounding increment. */ incr = 0; rmode = fpscr & FPSCR_RMODE_MASK; if (rmode == FPSCR_ROUND_NEAREST) { incr = 1ULL << VFP_DOUBLE_LOW_BITS; if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0) incr -= 1; } else if (rmode == FPSCR_ROUND_TOZERO) { incr = 0; } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0)) incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1; pr_debug("VFP: rounding increment = 0x%08llx\n", incr); /* * Is our rounding going to overflow? */ if ((significand + incr) < significand) { exponent += 1; significand = (significand >> 1) | (significand & 1); incr >>= 1; #ifdef DEBUG vd->exponent = exponent; vd->significand = significand; vfp_double_dump("pack: overflow", vd); #endif } /* * If any of the low bits (which will be shifted out of the * number) are non-zero, the result is inexact. */ if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1)) exceptions |= FPSCR_IXC; /* * Do our rounding. */ significand += incr; /* * Infinity? */ if (exponent >= 2046) { exceptions |= FPSCR_OFC | FPSCR_IXC; if (incr == 0) { vd->exponent = 2045; vd->significand = 0x7fffffffffffffffULL; } else { vd->exponent = 2047; /* infinity */ vd->significand = 0; } } else { if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0) exponent = 0; if (exponent || significand > 0x8000000000000000ULL) underflow = 0; if (underflow) exceptions |= FPSCR_UFC; vd->exponent = exponent; vd->significand = significand >> 1; } pack: vfp_double_dump("pack: final", vd); { s64 d = vfp_double_pack(vd); pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func, dd, d, exceptions); vfp_put_double(d, dd); } return exceptions; } /* * Propagate the NaN, setting exceptions if it is signalling. * 'n' is always a NaN. 'm' may be a number, NaN or infinity. */ static u32 vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr) { struct vfp_double *nan; int tn, tm = 0; tn = vfp_double_type(vdn); if (vdm) tm = vfp_double_type(vdm); if (fpscr & FPSCR_DEFAULT_NAN) /* * Default NaN mode - always returns a quiet NaN */ nan = &vfp_double_default_qnan; else { /* * Contemporary mode - select the first signalling * NAN, or if neither are signalling, the first * quiet NAN. */ if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN)) nan = vdn; else nan = vdm; /* * Make the NaN quiet. */ nan->significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; } *vdd = *nan; /* * If one was a signalling NAN, raise invalid operation. */ return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG; } /* * Extended operations */ static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr) { vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd); return 0; } static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr) { vfp_put_double(vfp_get_double(dm), dd); return 0; } static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr) { vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd); return 0; } static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr) { struct vfp_double vdm, vdd; int ret, tm; vfp_double_unpack(&vdm, vfp_get_double(dm)); tm = vfp_double_type(&vdm); if (tm & (VFP_NAN|VFP_INFINITY)) { struct vfp_double *vdp = &vdd; if (tm & VFP_NAN) ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr); else if (vdm.sign == 0) { sqrt_copy: vdp = &vdm; ret = 0; } else { sqrt_invalid: vdp = &vfp_double_default_qnan; ret = FPSCR_IOC; } vfp_put_double(vfp_double_pack(vdp), dd); return ret; } /* * sqrt(+/- 0) == +/- 0 */ if (tm & VFP_ZERO) goto sqrt_copy; /* * Normalise a denormalised number */ if (tm & VFP_DENORMAL) vfp_double_normalise_denormal(&vdm); /* * sqrt(<0) = invalid */ if (vdm.sign) goto sqrt_invalid; vfp_double_dump("sqrt", &vdm); /* * Estimate the square root. */ vdd.sign = 0; vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023; vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31; vfp_double_dump("sqrt estimate1", &vdd); vdm.significand >>= 1 + (vdm.exponent & 1); vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand); vfp_double_dump("sqrt estimate2", &vdd); /* * And now adjust. */ if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) { if (vdd.significand < 2) { vdd.significand = ~0ULL; } else { u64 termh, terml, remh, reml; vdm.significand <<= 2; mul64to128(&termh, &terml, vdd.significand, vdd.significand); sub128(&remh, &reml, vdm.significand, 0, termh, terml); while ((s64)remh < 0) { vdd.significand -= 1; shift64left(&termh, &terml, vdd.significand); terml |= 1; add128(&remh, &reml, remh, reml, termh, terml); } vdd.significand |= (remh | reml) != 0; } } vdd.significand = vfp_shiftright64jamming(vdd.significand, 1); return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt"); } /* * Equal := ZC * Less than := N * Greater than := C * Unordered := CV */ static u32 vfp_compare(int dd, int signal_on_qnan, int dm, u32 fpscr) { s64 d, m; u32 ret = 0; m = vfp_get_double(dm); if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) { ret |= FPSCR_C | FPSCR_V; if (signal_on_qnan || !(vfp_double_packed_mantissa(m) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1)))) /* * Signalling NaN, or signalling on quiet NaN */ ret |= FPSCR_IOC; } d = vfp_get_double(dd); if (vfp_double_packed_exponent(d) == 2047 && vfp_double_packed_mantissa(d)) { ret |= FPSCR_C | FPSCR_V; if (signal_on_qnan || !(vfp_double_packed_mantissa(d) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1)))) /* * Signalling NaN, or signalling on quiet NaN */ ret |= FPSCR_IOC; } if (ret == 0) { if (d == m || vfp_double_packed_abs(d | m) == 0) { /* * equal */ ret |= FPSCR_Z | FPSCR_C; } else if (vfp_double_packed_sign(d ^ m)) { /* * different signs */ if (vfp_double_packed_sign(d)) /* * d is negative, so d < m */ ret |= FPSCR_N; else /* * d is positive, so d > m */ ret |= FPSCR_C; } else if ((vfp_double_packed_sign(d) != 0) ^ (d < m)) { /* * d < m */ ret |= FPSCR_N; } else if ((vfp_double_packed_sign(d) != 0) ^ (d > m)) { /* * d > m */ ret |= FPSCR_C; } } return ret; } static u32 vfp_double_fcmp(int dd, int unused, int dm, u32 fpscr) { return vfp_compare(dd, 0, dm, fpscr); } static u32 vfp_double_fcmpe(int dd, int unused, int dm, u32 fpscr) { return vfp_compare(dd, 1, dm, fpscr); } static u32 vfp_double_fcmpz(int dd, int unused, int dm, u32 fpscr) { return vfp_compare(dd, 0, VFP_REG_ZERO, fpscr); } static u32 vfp_double_fcmpez(int dd, int unused, int dm, u32 fpscr) { return vfp_compare(dd, 1, VFP_REG_ZERO, fpscr); } static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; struct vfp_single vsd; int tm; u32 exceptions = 0; vfp_double_unpack(&vdm, vfp_get_double(dm)); tm = vfp_double_type(&vdm); /* * If we have a signalling NaN, signal invalid operation. */ if (tm == VFP_SNAN) exceptions = FPSCR_IOC; if (tm & VFP_DENORMAL) vfp_double_normalise_denormal(&vdm); vsd.sign = vdm.sign; vsd.significand = vfp_hi64to32jamming(vdm.significand); /* * If we have an infinity or a NaN, the exponent must be 255 */ if (tm & (VFP_INFINITY|VFP_NAN)) { vsd.exponent = 255; if (tm == VFP_QNAN) vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; goto pack_nan; } else if (tm & VFP_ZERO) vsd.exponent = 0; else vsd.exponent = vdm.exponent - (1023 - 127); return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts"); pack_nan: vfp_put_float(vfp_single_pack(&vsd), sd); return exceptions; } static u32 vfp_double_fuito(int dd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; u32 m = vfp_get_float(dm); vdm.sign = 0; vdm.exponent = 1023 + 63 - 1; vdm.significand = (u64)m; return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito"); } static u32 vfp_double_fsito(int dd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; u32 m = vfp_get_float(dm); vdm.sign = (m & 0x80000000) >> 16; vdm.exponent = 1023 + 63 - 1; vdm.significand = vdm.sign ? -m : m; return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito"); } static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; u32 d, exceptions = 0; int rmode = fpscr & FPSCR_RMODE_MASK; int tm; vfp_double_unpack(&vdm, vfp_get_double(dm)); /* * Do we have a denormalised number? */ tm = vfp_double_type(&vdm); if (tm & VFP_DENORMAL) exceptions |= FPSCR_IDC; if (tm & VFP_NAN) vdm.sign = 0; if (vdm.exponent >= 1023 + 32) { d = vdm.sign ? 0 : 0xffffffff; exceptions = FPSCR_IOC; } else if (vdm.exponent >= 1023 - 1) { int shift = 1023 + 63 - vdm.exponent; u64 rem, incr = 0; /* * 2^0 <= m < 2^32-2^8 */ d = (vdm.significand << 1) >> shift; rem = vdm.significand << (65 - shift); if (rmode == FPSCR_ROUND_NEAREST) { incr = 0x8000000000000000ULL; if ((d & 1) == 0) incr -= 1; } else if (rmode == FPSCR_ROUND_TOZERO) { incr = 0; } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { incr = ~0ULL; } if ((rem + incr) < rem) { if (d < 0xffffffff) d += 1; else exceptions |= FPSCR_IOC; } if (d && vdm.sign) { d = 0; exceptions |= FPSCR_IOC; } else if (rem) exceptions |= FPSCR_IXC; } else { d = 0; if (vdm.exponent | vdm.significand) { exceptions |= FPSCR_IXC; if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) d = 1; else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) { d = 0; exceptions |= FPSCR_IOC; } } } pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); vfp_put_float(d, sd); return exceptions; } static u32 vfp_double_ftouiz(int sd, int unused, int dm, u32 fpscr) { return vfp_double_ftoui(sd, unused, dm, FPSCR_ROUND_TOZERO); } static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; u32 d, exceptions = 0; int rmode = fpscr & FPSCR_RMODE_MASK; int tm; vfp_double_unpack(&vdm, vfp_get_double(dm)); vfp_double_dump("VDM", &vdm); /* * Do we have denormalised number? */ tm = vfp_double_type(&vdm); if (tm & VFP_DENORMAL) exceptions |= FPSCR_IDC; if (tm & VFP_NAN) { d = 0; exceptions |= FPSCR_IOC; } else if (vdm.exponent >= 1023 + 32) { d = 0x7fffffff; if (vdm.sign) d = ~d; exceptions |= FPSCR_IOC; } else if (vdm.exponent >= 1023 - 1) { int shift = 1023 + 63 - vdm.exponent; /* 58 */ u64 rem, incr = 0; d = (vdm.significand << 1) >> shift; rem = vdm.significand << (65 - shift); if (rmode == FPSCR_ROUND_NEAREST) { incr = 0x8000000000000000ULL; if ((d & 1) == 0) incr -= 1; } else if (rmode == FPSCR_ROUND_TOZERO) { incr = 0; } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { incr = ~0ULL; } if ((rem + incr) < rem && d < 0xffffffff) d += 1; if (d > 0x7fffffff + (vdm.sign != 0)) { d = 0x7fffffff + (vdm.sign != 0); exceptions |= FPSCR_IOC; } else if (rem) exceptions |= FPSCR_IXC; if (vdm.sign) d = -d; } else { d = 0; if (vdm.exponent | vdm.significand) { exceptions |= FPSCR_IXC; if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) d = 1; else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) d = -1; } } pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); vfp_put_float((s32)d, sd); return exceptions; } static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr) { return vfp_double_ftosi(dd, unused, dm, FPSCR_ROUND_TOZERO); } static struct op fops_ext[32] = { [FEXT_TO_IDX(FEXT_FCPY)] = { vfp_double_fcpy, 0 }, [FEXT_TO_IDX(FEXT_FABS)] = { vfp_double_fabs, 0 }, [FEXT_TO_IDX(FEXT_FNEG)] = { vfp_double_fneg, 0 }, [FEXT_TO_IDX(FEXT_FSQRT)] = { vfp_double_fsqrt, 0 }, [FEXT_TO_IDX(FEXT_FCMP)] = { vfp_double_fcmp, OP_SCALAR }, [FEXT_TO_IDX(FEXT_FCMPE)] = { vfp_double_fcmpe, OP_SCALAR }, [FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_double_fcmpz, OP_SCALAR }, [FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_double_fcmpez, OP_SCALAR }, [FEXT_TO_IDX(FEXT_FCVT)] = { vfp_double_fcvts, OP_SCALAR|OP_SD }, [FEXT_TO_IDX(FEXT_FUITO)] = { vfp_double_fuito, OP_SCALAR|OP_SM }, [FEXT_TO_IDX(FEXT_FSITO)] = { vfp_double_fsito, OP_SCALAR|OP_SM }, [FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_double_ftoui, OP_SCALAR|OP_SD }, [FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_double_ftouiz, OP_SCALAR|OP_SD }, [FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_double_ftosi, OP_SCALAR|OP_SD }, [FEXT_TO_IDX(FEXT_FTOSIZ)] = { vfp_double_ftosiz, OP_SCALAR|OP_SD }, }; static u32 vfp_double_fadd_nonnumber(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr) { struct vfp_double *vdp; u32 exceptions = 0; int tn, tm; tn = vfp_double_type(vdn); tm = vfp_double_type(vdm); if (tn & tm & VFP_INFINITY) { /* * Two infinities. Are they different signs? */ if (vdn->sign ^ vdm->sign) { /* * different signs -> invalid */ exceptions = FPSCR_IOC; vdp = &vfp_double_default_qnan; } else { /* * same signs -> valid */ vdp = vdn; } } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) { /* * One infinity and one number -> infinity */ vdp = vdn; } else { /* * 'n' is a NaN of some type */ return vfp_propagate_nan(vdd, vdn, vdm, fpscr); } *vdd = *vdp; return exceptions; } static u32 vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr) { u32 exp_diff; u64 m_sig; if (vdn->significand & (1ULL << 63) || vdm->significand & (1ULL << 63)) { pr_info("VFP: bad FP values in %s\n", __func__); vfp_double_dump("VDN", vdn); vfp_double_dump("VDM", vdm); } /* * Ensure that 'n' is the largest magnitude number. Note that * if 'n' and 'm' have equal exponents, we do not swap them. * This ensures that NaN propagation works correctly. */ if (vdn->exponent < vdm->exponent) { struct vfp_double *t = vdn; vdn = vdm; vdm = t; } /* * Is 'n' an infinity or a NaN? Note that 'm' may be a number, * infinity or a NaN here. */ if (vdn->exponent == 2047) return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr); /* * We have two proper numbers, where 'vdn' is the larger magnitude. * * Copy 'n' to 'd' before doing the arithmetic. */ *vdd = *vdn; /* * Align 'm' with the result. */ exp_diff = vdn->exponent - vdm->exponent; m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff); /* * If the signs are different, we are really subtracting. */ if (vdn->sign ^ vdm->sign) { m_sig = vdn->significand - m_sig; if ((s64)m_sig < 0) { vdd->sign = vfp_sign_negate(vdd->sign); m_sig = -m_sig; } else if (m_sig == 0) { vdd->sign = (fpscr & FPSCR_RMODE_MASK) == FPSCR_ROUND_MINUSINF ? 0x8000 : 0; } } else { m_sig += vdn->significand; } vdd->significand = m_sig; return 0; } static u32 vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr) { vfp_double_dump("VDN", vdn); vfp_double_dump("VDM", vdm); /* * Ensure that 'n' is the largest magnitude number. Note that * if 'n' and 'm' have equal exponents, we do not swap them. * This ensures that NaN propagation works correctly. */ if (vdn->exponent < vdm->exponent) { struct vfp_double *t = vdn; vdn = vdm; vdm = t; pr_debug("VFP: swapping M <-> N\n"); } vdd->sign = vdn->sign ^ vdm->sign; /* * If 'n' is an infinity or NaN, handle it. 'm' may be anything. */ if (vdn->exponent == 2047) { if (vdn->significand || (vdm->exponent == 2047 && vdm->significand)) return vfp_propagate_nan(vdd, vdn, vdm, fpscr); if ((vdm->exponent | vdm->significand) == 0) { *vdd = vfp_double_default_qnan; return FPSCR_IOC; } vdd->exponent = vdn->exponent; vdd->significand = 0; return 0; } /* * If 'm' is zero, the result is always zero. In this case, * 'n' may be zero or a number, but it doesn't matter which. */ if ((vdm->exponent | vdm->significand) == 0) { vdd->exponent = 0; vdd->significand = 0; return 0; } /* * We add 2 to the destination exponent for the same reason * as the addition case - though this time we have +1 from * each input operand. */ vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2; vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand); vfp_double_dump("VDD", vdd); return 0; } #define NEG_MULTIPLY (1 << 0) #define NEG_SUBTRACT (1 << 1) static u32 vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, char *func) { struct vfp_double vdd, vdp, vdn, vdm; u32 exceptions; vfp_double_unpack(&vdn, vfp_get_double(dn)); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); vfp_double_unpack(&vdm, vfp_get_double(dm)); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); if (negate & NEG_MULTIPLY) vdp.sign = vfp_sign_negate(vdp.sign); vfp_double_unpack(&vdn, vfp_get_double(dd)); if (negate & NEG_SUBTRACT) vdn.sign = vfp_sign_negate(vdn.sign); exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr); return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func); } /* * Standard operations */ /* * sd = sd + (sn * sm) */ static u32 vfp_double_fmac(int dd, int dn, int dm, u32 fpscr) { return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, 0, "fmac"); } /* * sd = sd - (sn * sm) */ static u32 vfp_double_fnmac(int dd, int dn, int dm, u32 fpscr) { return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac"); } /* * sd = -sd + (sn * sm) */ static u32 vfp_double_fmsc(int dd, int dn, int dm, u32 fpscr) { return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc"); } /* * sd = -sd - (sn * sm) */ static u32 vfp_double_fnmsc(int dd, int dn, int dm, u32 fpscr) { return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc"); } /* * sd = sn * sm */ static u32 vfp_double_fmul(int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; u32 exceptions; vfp_double_unpack(&vdn, vfp_get_double(dn)); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); vfp_double_unpack(&vdm, vfp_get_double(dm)); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul"); } /* * sd = -(sn * sm) */ static u32 vfp_double_fnmul(int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; u32 exceptions; vfp_double_unpack(&vdn, vfp_get_double(dn)); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); vfp_double_unpack(&vdm, vfp_get_double(dm)); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); vdd.sign = vfp_sign_negate(vdd.sign); return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul"); } /* * sd = sn + sm */ static u32 vfp_double_fadd(int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; u32 exceptions; vfp_double_unpack(&vdn, vfp_get_double(dn)); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); vfp_double_unpack(&vdm, vfp_get_double(dm)); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd"); } /* * sd = sn - sm */ static u32 vfp_double_fsub(int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; u32 exceptions; vfp_double_unpack(&vdn, vfp_get_double(dn)); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); vfp_double_unpack(&vdm, vfp_get_double(dm)); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); /* * Subtraction is like addition, but with a negated operand. */ vdm.sign = vfp_sign_negate(vdm.sign); exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub"); } /* * sd = sn / sm */ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; u32 exceptions = 0; int tm, tn; vfp_double_unpack(&vdn, vfp_get_double(dn)); vfp_double_unpack(&vdm, vfp_get_double(dm)); vdd.sign = vdn.sign ^ vdm.sign; tn = vfp_double_type(&vdn); tm = vfp_double_type(&vdm); /* * Is n a NAN? */ if (tn & VFP_NAN) goto vdn_nan; /* * Is m a NAN? */ if (tm & VFP_NAN) goto vdm_nan; /* * If n and m are infinity, the result is invalid * If n and m are zero, the result is invalid */ if (tm & tn & (VFP_INFINITY|VFP_ZERO)) goto invalid; /* * If n is infinity, the result is infinity */ if (tn & VFP_INFINITY) goto infinity; /* * If m is zero, raise div0 exceptions */ if (tm & VFP_ZERO) goto divzero; /* * If m is infinity, or n is zero, the result is zero */ if (tm & VFP_INFINITY || tn & VFP_ZERO) goto zero; if (tn & VFP_DENORMAL) vfp_double_normalise_denormal(&vdn); if (tm & VFP_DENORMAL) vfp_double_normalise_denormal(&vdm); /* * Ok, we have two numbers, we can perform division. */ vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1; vdm.significand <<= 1; if (vdm.significand <= (2 * vdn.significand)) { vdn.significand >>= 1; vdd.exponent++; } vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand); if ((vdd.significand & 0x1ff) <= 2) { u64 termh, terml, remh, reml; mul64to128(&termh, &terml, vdm.significand, vdd.significand); sub128(&remh, &reml, vdn.significand, 0, termh, terml); while ((s64)remh < 0) { vdd.significand -= 1; add128(&remh, &reml, remh, reml, 0, vdm.significand); } vdd.significand |= (reml != 0); } return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv"); vdn_nan: exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); pack: vfp_put_double(vfp_double_pack(&vdd), dd); return exceptions; vdm_nan: exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); goto pack; zero: vdd.exponent = 0; vdd.significand = 0; goto pack; divzero: exceptions = FPSCR_DZC; infinity: vdd.exponent = 2047; vdd.significand = 0; goto pack; invalid: vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd); return FPSCR_IOC; } static struct op fops[16] = { [FOP_TO_IDX(FOP_FMAC)] = { vfp_double_fmac, 0 }, [FOP_TO_IDX(FOP_FNMAC)] = { vfp_double_fnmac, 0 }, [FOP_TO_IDX(FOP_FMSC)] = { vfp_double_fmsc, 0 }, [FOP_TO_IDX(FOP_FNMSC)] = { vfp_double_fnmsc, 0 }, [FOP_TO_IDX(FOP_FMUL)] = { vfp_double_fmul, 0 }, [FOP_TO_IDX(FOP_FNMUL)] = { vfp_double_fnmul, 0 }, [FOP_TO_IDX(FOP_FADD)] = { vfp_double_fadd, 0 }, [FOP_TO_IDX(FOP_FSUB)] = { vfp_double_fsub, 0 }, [FOP_TO_IDX(FOP_FDIV)] = { vfp_double_fdiv, 0 }, }; #define FREG_BANK(x) ((x) & 0x0c) #define FREG_IDX(x) ((x) & 3) u32 vfp_double_cpdo(u32 inst, u32 fpscr) { u32 op = inst & FOP_MASK; u32 exceptions = 0; unsigned int dest; unsigned int dn = vfp_get_dn(inst); unsigned int dm; unsigned int vecitr, veclen, vecstride; struct op *fop; vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)); fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)]; /* * fcvtds takes an sN register number as destination, not dN. * It also always operates on scalars. */ if (fop->flags & OP_SD) dest = vfp_get_sd(inst); else dest = vfp_get_dd(inst); /* * f[us]ito takes a sN operand, not a dN operand. */ if (fop->flags & OP_SM) dm = vfp_get_sm(inst); else dm = vfp_get_dm(inst); /* * If destination bank is zero, vector length is always '1'. * ARM DDI0100F C5.1.3, C5.3.2. */ if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0)) veclen = 0; else veclen = fpscr & FPSCR_LENGTH_MASK; pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, (veclen >> FPSCR_LENGTH_BIT) + 1); if (!fop->fn) goto invalid; for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { u32 except; char type; type = fop->flags & OP_SD ? 's' : 'd'; if (op == FOP_EXT) pr_debug("VFP: itr%d (%c%u) = op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, type, dest, dn, dm); else pr_debug("VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, type, dest, dn, FOP_TO_IDX(op), dm); except = fop->fn(dest, dn, dm, fpscr); pr_debug("VFP: itr%d: exceptions=%08x\n", vecitr >> FPSCR_LENGTH_BIT, except); exceptions |= except; /* * CHECK: It appears to be undefined whether we stop when * we encounter an exception. We continue. */ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3); dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3); if (FREG_BANK(dm) != 0) dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3); } return exceptions; invalid: return ~0; }
gpl-2.0
javelinanddart/samsung_GS2
drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
12685
2567
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This source file is specifically designed to interface with the v4l-dvb cs53l32a module. */ #include "pvrusb2-cs53l32a.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <linux/errno.h> struct routing_scheme { const int *def; unsigned int cnt; }; static const int routing_scheme1[] = { [PVR2_CVAL_INPUT_TV] = 2, /* 1 or 2 seems to work here */ [PVR2_CVAL_INPUT_RADIO] = 2, [PVR2_CVAL_INPUT_COMPOSITE] = 0, [PVR2_CVAL_INPUT_SVIDEO] = 0, }; static const struct routing_scheme routing_def1 = { .def = routing_scheme1, .cnt = ARRAY_SIZE(routing_scheme1), }; static const struct routing_scheme *routing_schemes[] = { [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1, }; void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) { if (hdw->input_dirty || hdw->force_dirty) { const struct routing_scheme *sp; unsigned int sid = hdw->hdw_desc->signal_routing_scheme; u32 input; pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", hdw->input_val); sp = (sid < ARRAY_SIZE(routing_schemes)) ? routing_schemes[sid] : NULL; if ((sp == NULL) || (hdw->input_val < 0) || (hdw->input_val >= sp->cnt)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "*** WARNING *** subdev v4l2 set_input:" " Invalid routing scheme (%u)" " and/or input (%d)", sid, hdw->input_val); return; } input = sp->def[hdw->input_val]; sd->ops->audio->s_routing(sd, input, 0, 0); } } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
paulocastro31/android_kernel_motorola_msm8226
drivers/parisc/eisa.c
12685
11782
/* * eisa.c - provide support for EISA adapters in PA-RISC machines * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard * Copyright (c) 2001 Daniel Engstrom <5116@telia.com> * * There are two distinct EISA adapters. Mongoose is found in machines * before the 712; then the Wax ASIC is used. To complicate matters, the * Wax ASIC also includes a PS/2 and RS-232 controller, but those are * dealt with elsewhere; this file is concerned only with the EISA portions * of Wax. * * * HINT: * ----- * To allow an ISA card to work properly in the EISA slot you need to * set an edge trigger level. This may be done on the palo command line * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with * n and n2 as the irq levels you want to use. * * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at * irq levels 10 and 11. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/eisa.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/processor.h> #include <asm/parisc-device.h> #include <asm/delay.h> #include <asm/eisa_bus.h> #include <asm/eisa_eeprom.h> #if 0 #define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg ) #else #define EISA_DBG(msg, arg... ) #endif #define SNAKES_EEPROM_BASE_ADDR 0xF0810400 #define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400 static DEFINE_SPINLOCK(eisa_irq_lock); void __iomem *eisa_eeprom_addr __read_mostly; /* We can only have one EISA adapter in the system because neither * implementation can be flexed. */ static struct eisa_ba { struct pci_hba_data hba; unsigned long eeprom_addr; struct eisa_root_device root; } eisa_dev; /* Port ops */ static inline unsigned long eisa_permute(unsigned short port) { if (port & 0x300) { return 0xfc000000 | ((port & 0xfc00) >> 6) | ((port & 0x3f8) << 9) | (port & 7); } else { return 0xfc000000 | port; } } unsigned char eisa_in8(unsigned short port) { if (EISA_bus) return gsc_readb(eisa_permute(port)); return 0xff; } unsigned short eisa_in16(unsigned short port) { if (EISA_bus) return le16_to_cpu(gsc_readw(eisa_permute(port))); return 0xffff; } unsigned int eisa_in32(unsigned short port) { if (EISA_bus) return le32_to_cpu(gsc_readl(eisa_permute(port))); return 0xffffffff; } void eisa_out8(unsigned char data, unsigned short port) { if (EISA_bus) gsc_writeb(data, eisa_permute(port)); } void eisa_out16(unsigned short data, unsigned short port) { if (EISA_bus) gsc_writew(cpu_to_le16(data), eisa_permute(port)); } void eisa_out32(unsigned int data, unsigned short port) { if (EISA_bus) gsc_writel(cpu_to_le32(data), eisa_permute(port)); } #ifndef CONFIG_PCI /* We call these directly without PCI. See asm/io.h. */ EXPORT_SYMBOL(eisa_in8); EXPORT_SYMBOL(eisa_in16); EXPORT_SYMBOL(eisa_in32); EXPORT_SYMBOL(eisa_out8); EXPORT_SYMBOL(eisa_out16); EXPORT_SYMBOL(eisa_out32); #endif /* Interrupt handling */ /* cached interrupt mask registers */ static int master_mask; static int slave_mask; /* the trig level can be set with the * eisa_irq_edge=n,n,n commandline parameter * We should really read this from the EEPROM * in the furure. */ /* irq 13,8,2,1,0 must be edge */ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ /* called by free irq */ static void eisa_mask_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; EISA_DBG("disable irq %d\n", irq); /* just mask for now */ spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); } /* called by request irq */ static void eisa_unmask_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; EISA_DBG("enable irq %d\n", irq); spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask &= ~(1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask &= ~(1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); } static struct irq_chip eisa_interrupt_type = { .name = "EISA", .irq_unmask = eisa_unmask_irq, .irq_mask = eisa_mask_irq, }; static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) { int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */ unsigned long flags; spin_lock_irqsave(&eisa_irq_lock, flags); /* read IRR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n", irq, eisa_in8(0x20), eisa_in8(0xa0)); /* read ISR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n", eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1)); irq &= 0xf; /* mask irq and write eoi */ if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */ eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */ } spin_unlock_irqrestore(&eisa_irq_lock, flags); generic_handle_irq(irq); spin_lock_irqsave(&eisa_irq_lock, flags); /* unmask */ if (irq & 8) { slave_mask &= ~(1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask &= ~(1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); return IRQ_HANDLED; } static irqreturn_t dummy_irq2_handler(int _, void *dev) { printk(KERN_ALERT "eisa: uhh, irq2?\n"); return IRQ_HANDLED; } static struct irqaction irq2_action = { .handler = dummy_irq2_handler, .name = "cascade", }; static void init_eisa_pic(void) { unsigned long flags; spin_lock_irqsave(&eisa_irq_lock, flags); eisa_out8(0xff, 0x21); /* mask during init */ eisa_out8(0xff, 0xa1); /* mask during init */ /* master pic */ eisa_out8(0x11,0x20); /* ICW1 */ eisa_out8(0x00,0x21); /* ICW2 */ eisa_out8(0x04,0x21); /* ICW3 */ eisa_out8(0x01,0x21); /* ICW4 */ eisa_out8(0x40,0x20); /* OCW2 */ /* slave pic */ eisa_out8(0x11,0xa0); /* ICW1 */ eisa_out8(0x08,0xa1); /* ICW2 */ eisa_out8(0x02,0xa1); /* ICW3 */ eisa_out8(0x01,0xa1); /* ICW4 */ eisa_out8(0x40,0xa0); /* OCW2 */ udelay(100); slave_mask = 0xff; master_mask = 0xfb; eisa_out8(slave_mask, 0xa1); /* OCW1 */ eisa_out8(master_mask, 0x21); /* OCW1 */ /* setup trig level */ EISA_DBG("EISA edge/level %04x\n", eisa_irq_level); eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */ eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0)); EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1)); spin_unlock_irqrestore(&eisa_irq_lock, flags); } /* Device initialisation */ #define is_mongoose(dev) (dev->id.sversion == 0x00076) static int __init eisa_probe(struct parisc_device *dev) { int i, result; char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", name, (unsigned long)dev->hpa.start); eisa_dev.hba.dev = dev; eisa_dev.hba.iommu = ccio_get_iommu(dev); eisa_dev.hba.lmmio_space.name = "EISA"; eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000); eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff); eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM; result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space); if (result < 0) { printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n"); return result; } eisa_dev.hba.io_space.name = "EISA"; eisa_dev.hba.io_space.start = 0; eisa_dev.hba.io_space.end = 0xffff; eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO; result = request_resource(&ioport_resource, &eisa_dev.hba.io_space); if (result < 0) { printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n"); return result; } pcibios_register_hba(&eisa_dev.hba); result = request_irq(dev->irq, eisa_irq, IRQF_SHARED, "EISA", &eisa_dev); if (result) { printk(KERN_ERR "EISA: request_irq failed!\n"); return result; } /* Reserve IRQ2 */ setup_irq(2, &irq2_action); for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &eisa_interrupt_type, handle_simple_irq); } EISA_bus = 1; if (dev->num_addrs) { /* newer firmware hand out the eeprom address */ eisa_dev.eeprom_addr = dev->addr[0]; } else { /* old firmware, need to figure out the box */ if (is_mongoose(dev)) { eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR; } else { eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR; } } eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, &eisa_dev.hba.lmmio_space); init_eisa_pic(); if (result >= 0) { /* FIXME : Don't enumerate the bus twice. */ eisa_dev.root.dev = &dev->dev; dev_set_drvdata(&dev->dev, &eisa_dev.root); eisa_dev.root.bus_base_addr = 0; eisa_dev.root.res = &eisa_dev.hba.io_space; eisa_dev.root.slots = result; eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */ if (eisa_root_register (&eisa_dev.root)) { printk(KERN_ERR "EISA: Failed to register EISA root\n"); return -1; } } return 0; } static const struct parisc_device_id eisa_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */ { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */ { 0, } }; MODULE_DEVICE_TABLE(parisc, eisa_tbl); static struct parisc_driver eisa_driver = { .name = "eisa_ba", .id_table = eisa_tbl, .probe = eisa_probe, }; void __init eisa_init(void) { register_parisc_driver(&eisa_driver); } static unsigned int eisa_irq_configured; void eisa_make_irq_level(int num) { if (eisa_irq_configured& (1<<num)) { printk(KERN_WARNING "IRQ %d polarity configured twice (last to level)\n", num); } eisa_irq_level |= (1<<num); /* set the corresponding bit */ eisa_irq_configured |= (1<<num); /* set the corresponding bit */ } void eisa_make_irq_edge(int num) { if (eisa_irq_configured& (1<<num)) { printk(KERN_WARNING "IRQ %d polarity configured twice (last to edge)\n", num); } eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */ eisa_irq_configured |= (1<<num); /* set the corresponding bit */ } static int __init eisa_irq_setup(char *str) { char *cur = str; int val; EISA_DBG("IRQ setup\n"); while (cur != NULL) { char *pe; val = (int) simple_strtoul(cur, &pe, 0); if (val > 15 || val < 0) { printk(KERN_ERR "eisa: EISA irq value are 0-15\n"); continue; } if (val == 2) { val = 9; } eisa_make_irq_edge(val); /* clear the corresponding bit */ EISA_DBG("setting IRQ %d to edge-triggered mode\n", val); if ((cur = strchr(cur, ','))) { cur++; } else { break; } } return 1; } __setup("eisa_irq_edge=", eisa_irq_setup);
gpl-2.0
popazerty/linux-sh4-2.6.32.y
arch/score/mm/pgtable.c
13709
1690
/* * arch/score/mm/pgtable-32.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/pfn.h> #include <linux/mm.h> void pgd_init(unsigned long page) { unsigned long *p = (unsigned long *) page; int i; for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { p[i + 0] = (unsigned long) invalid_pte_table; p[i + 1] = (unsigned long) invalid_pte_table; p[i + 2] = (unsigned long) invalid_pte_table; p[i + 3] = (unsigned long) invalid_pte_table; p[i + 4] = (unsigned long) invalid_pte_table; p[i + 5] = (unsigned long) invalid_pte_table; p[i + 6] = (unsigned long) invalid_pte_table; p[i + 7] = (unsigned long) invalid_pte_table; } } void __init pagetable_init(void) { /* Initialize the entire pgd. */ pgd_init((unsigned long)swapper_pg_dir); }
gpl-2.0
flexdroid/kernel
mm/memory.c
142
111176
/* * linux/mm/memory.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus */ /* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. */ /* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root. */ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * (Gerhard.Wichert@pdb.siemens.de) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/delayacct.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/writeback.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/kallsyms.h> #include <linux/swapops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include "internal.h" #ifndef CONFIG_NEED_MULTIPLE_NODES /* use the per-pgdat data instead for discontigmem - mbligh */ unsigned long max_mapnr; struct page *mem_map; EXPORT_SYMBOL(max_mapnr); EXPORT_SYMBOL(mem_map); #endif unsigned long num_physpages; /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL * and ZONE_HIGHMEM. */ void * high_memory; EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); /* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. ) */ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK 1; #else 2; #endif static int __init disable_randmaps(char *s) { randomize_va_space = 0; return 1; } __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; unsigned long highest_memmap_pfn __read_mostly; /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ static int __init init_zero_pfn(void) { zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { if (current->rss_stat.count[i]) { add_mm_counter(mm, i, current->rss_stat.count[i]); current->rss_stat.count[i] = 0; } } current->rss_stat.events = 0; } static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) { struct task_struct *task = current; if (likely(task->mm == mm)) task->rss_stat.count[member] += val; else add_mm_counter(mm, member, val); } #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) /* sync counter once per 64 page faults */ #define TASK_RSS_EVENTS_THRESH (64) static void check_sync_rss_stat(struct task_struct *task) { if (unlikely(task != current)) return; if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) sync_mm_rss(task->mm); } #else /* SPLIT_RSS_COUNTING */ #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) static void check_sync_rss_stat(struct task_struct *task) { } #endif /* SPLIT_RSS_COUNTING */ #ifdef HAVE_GENERIC_MMU_GATHER static int tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; batch = tlb->active; if (batch->next) { tlb->active = batch->next; return 1; } batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return 0; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; tlb->active->next = batch; tlb->active = batch; return 1; } /* tlb_gather_mmu * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. The @fullmm argument is used when @mm is without * users and we're going to destroy the full address space (exit/execve). */ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) { tlb->mm = mm; tlb->fullmm = fullmm; tlb->need_flush = 0; tlb->fast_mode = (num_possible_cpus() == 1); tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif } void tlb_flush_mmu(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; if (!tlb->need_flush) return; tlb->need_flush = 0; tlb_flush(tlb); #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif if (tlb_fast_mode(tlb)) return; for (batch = &tlb->local; batch; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } tlb->active = &tlb->local; } /* tlb_finish_mmu * Called at the end of the shootdown operation to free up any resources * that were required. */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { struct mmu_gather_batch *batch, *next; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); } tlb->local.next = NULL; } /* __tlb_remove_page * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while * handling the additional races in SMP caused by other CPUs caching valid * mappings in their TLBs. Returns the number of free page slots left. * When out of page slots we must call tlb_flush_mmu(). */ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->need_flush); if (tlb_fast_mode(tlb)) { free_page_and_swap_cache(page); return 1; /* avoid calling tlb_flush_mmu() */ } batch = tlb->active; batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return 0; batch = tlb->active; } VM_BUG_ON(batch->nr > batch->max); return batch->max - batch->nr; } #endif /* HAVE_GENERIC_MMU_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * See the comment near struct mmu_table_batch. */ static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ } static void tlb_remove_table_one(void *table) { /* * This isn't an RCU grace period and hence the page-tables cannot be * assumed to be actually RCU-freed. * * It is however sufficient for software page-table walkers that rely on * IRQ disabling. See the comment near struct mmu_table_batch. */ smp_call_function(tlb_remove_table_smp_sync, NULL, 1); __tlb_remove_table(table); } static void tlb_remove_table_rcu(struct rcu_head *head) { struct mmu_table_batch *batch; int i; batch = container_of(head, struct mmu_table_batch, rcu); for (i = 0; i < batch->nr; i++) __tlb_remove_table(batch->tables[i]); free_page((unsigned long)batch); } void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; if (*batch) { call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } } void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; tlb->need_flush = 1; /* * When there's less then two users of this mm there cannot be a * concurrent page-table walk. */ if (atomic_read(&tlb->mm->mm_users) < 2) { __tlb_remove_table(table); return; } if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_remove_table_one(table); return; } (*batch)->nr = 0; } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); } #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but * very seldom) called out from the p?d_none_or_clear_bad macros. */ void pgd_clear_bad(pgd_t *pgd) { pgd_ERROR(*pgd); pgd_clear(pgd); } void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); } void pmd_clear_bad(pmd_t *pmd) { pmd_ERROR(*pmd); pmd_clear(pmd); } /* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); tlb->mm->nr_ptes--; } static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud, start); } /* * This function frees user-level page tables of a process. * * Must be called with pagetable lock held. */ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that. */ addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; free_pud_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling) { while (vma) { struct vm_area_struct *next = vma->vm_next; unsigned long addr = vma->vm_start; /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ unlink_anon_vmas(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } else { /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } vma = next; } } int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) { pgtable_t new = pte_alloc_one(mm, address); int wait_split_huge_page; if (!new) return -ENOMEM; /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_read_barrier_depends() barriers in page table walking code. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ spin_lock(&mm->page_table_lock); wait_split_huge_page = 0; if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ mm->nr_ptes++; pmd_populate(mm, pmd, new); new = NULL; } else if (unlikely(pmd_trans_splitting(*pmd))) wait_split_huge_page = 1; spin_unlock(&mm->page_table_lock); if (new) pte_free(mm, new); if (wait_split_huge_page) wait_split_huge_page(vma->anon_vma, pmd); return 0; } int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { pte_t *new = pte_alloc_one_kernel(&init_mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; } else VM_BUG_ON(pmd_trans_splitting(*pmd)); spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); return 0; } static inline void init_rss_vec(int *rss) { memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); } static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) { int i; if (current->mm == mm) sync_mm_rss(mm); for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]); } /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error. */ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); printk(KERN_ALERT "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ if (vma->vm_ops) print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", (unsigned long)vma->vm_ops->fault); if (vma->vm_file && vma->vm_file->f_op) print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); add_taint(TAINT_BAD_PAGE); } static inline int is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } #ifndef is_zero_pfn static inline int is_zero_pfn(unsigned long pfn) { return pfn == zero_pfn; } #endif #ifndef my_zero_pfn static inline unsigned long my_zero_pfn(unsigned long addr) { return zero_pfn; } #endif /* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The disadvantage is that pages are refcounted * (which can be slower and simply not an option for some PFNMAP users). The * advantage is that we don't have to follow the strict linearity rule of * PFNMAP mappings in order to support COWable mappings. * */ #ifdef __HAVE_ARCH_PTE_SPECIAL # define HAVE_PTE_SPECIAL 1 #else # define HAVE_PTE_SPECIAL 0 #endif struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); if (HAVE_PTE_SPECIAL) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (!is_zero_pfn(pfn)) print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !HAVE_PTE_SPECIAL case follows: */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma. */ static inline unsigned long copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) { unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; struct page *page; /* pte contains position in swap or file, so copy. */ if (unlikely(!pte_present(pte))) { if (!pte_file(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); if (swap_duplicate(entry) < 0) return entry.val; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist)) list_add(&dst_mm->mmlist, &src_mm->mmlist); spin_unlock(&mmlist_lock); } if (likely(!non_swap_entry(entry))) rss[MM_SWAPENTS]++; else if (is_migration_entry(entry)) { page = migration_entry_to_page(entry); if (PageAnon(page)) rss[MM_ANONPAGES]++; else rss[MM_FILEPAGES]++; if (is_write_migration_entry(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both * parent and child to be set to read. */ make_migration_entry_read(&entry); pte = swp_entry_to_pte(entry); set_pte_at(src_mm, addr, src_pte, pte); } } } goto out_set_pte; } /* * If it's a COW mapping, write protect it both * in the parent and the child */ if (is_cow_mapping(vm_flags)) { ptep_set_wrprotect(src_mm, addr, src_pte); pte = pte_wrprotect(pte); } /* * If it's a shared mapping, mark it clean in * the child */ if (vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); page = vm_normal_page(vma, addr, pte); if (page) { get_page(page); page_dup_rmap(page); if (PageAnon(page)) rss[MM_ANONPAGES]++; else rss[MM_FILEPAGES]++; } out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; int progress = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; again: init_rss_vec(rss); dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) return -ENOMEM; src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; arch_enter_lazy_mmu_mode(); do { /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ if (progress >= 32) { progress = 0; if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } if (pte_none(*src_pte)) { progress++; continue; } entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); if (entry.val) break; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); spin_unlock(src_ptl); pte_unmap(orig_src_pte); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); if (entry.val) { if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) return -ENOMEM; progress = 0; } if (addr != end) goto again; return 0; } static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pmd_t *src_pmd, *dst_pmd; unsigned long next; dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd) return -ENOMEM; src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*src_pmd)) { int err; VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, vma, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pud_t *src_pud, *dst_pud; unsigned long next; dst_pud = pud_alloc(dst_mm, dst_pgd, addr); if (!dst_pud) return -ENOMEM; src_pud = pud_offset(src_pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, vma, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) { pgd_t *src_pgd, *dst_pgd; unsigned long next; unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; int ret; /* * Don't copy ptes where a page fault will fill them correctly. * Fork becomes much lighter when there are big shared or private * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { if (!vma->anon_vma) return 0; } if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); if (unlikely(is_pfn_mapping(vma))) { /* * We do not free on error cases below as remove_vma * gets called on error from higher level routine */ ret = track_pfn_vma_copy(vma); if (ret) return ret; } /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true. */ if (is_cow_mapping(vma->vm_flags)) mmu_notifier_invalidate_range_start(src_mm, addr, end); ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, vma, addr, next))) { ret = -ENOMEM; break; } } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow_mapping(vma->vm_flags)) mmu_notifier_invalidate_range_end(src_mm, vma->vm_start, end); return ret; } static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { struct mm_struct *mm = tlb->mm; int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = start_pte; arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; if (pte_none(ptent)) { continue; } if (pte_present(ptent)) { struct page *page; page = vm_normal_page(vma, addr, ptent); if (unlikely(details) && page) { /* * unmap_shared_mapping_pages() wants to * invalidate cache without truncating: * unmap shared but keep private pages. */ if (details->check_mapping && details->check_mapping != page->mapping) continue; /* * Each page->index must be checked when * invalidating or truncating nonlinear. */ if (details->nonlinear_vma && (page->index < details->first_index || page->index > details->last_index)) continue; } ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, addr) != page->index) set_pte_at(mm, addr, pte, pgoff_to_pte(page->index)); if (PageAnon(page)) rss[MM_ANONPAGES]--; else { if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent) && likely(!VM_SequentialReadHint(vma))) mark_page_accessed(page); rss[MM_FILEPAGES]--; } page_remove_rmap(page); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); force_flush = !__tlb_remove_page(tlb, page); if (force_flush) break; continue; } /* * If details->check_mapping, we leave swap entries; * if details->nonlinear_vma, we leave file entries. */ if (unlikely(details)) continue; if (pte_file(ptent)) { if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) print_bad_pte(vma, addr, ptent, NULL); } else { swp_entry_t entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) rss[MM_SWAPENTS]--; else if (is_migration_entry(entry)) { struct page *page; page = migration_entry_to_page(entry); if (PageAnon(page)) rss[MM_ANONPAGES]--; else rss[MM_FILEPAGES]--; } if (unlikely(!free_swap_and_cache(entry))) print_bad_pte(vma, addr, ptent, NULL); } pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); /* * mmu_gather ran out of room to batch pages, we break out of * the PTE lock to avoid doing the potential expensive TLB invalidate * and page-free while holding it. */ if (force_flush) { force_flush = 0; tlb_flush_mmu(tlb); if (addr != end) goto again; } return addr; } static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; /* fall through */ } /* * Here there can be other concurrent MADV_DONTNEED or * trans huge page faults running, and if the pmd is * none or trans huge it can change under us. This is * because MADV_DONTNEED holds the mmap_sem in read * mode. */ if (pmd_none_or_trans_huge_or_clear_bad(pmd)) goto next; next = zap_pte_range(tlb, vma, pmd, addr, next, details); next: cond_resched(); } while (pmd++, addr = next, addr != end); return addr; } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; next = zap_pmd_range(tlb, vma, pud, addr, next, details); } while (pud++, addr = next, addr != end); return addr; } static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) { pgd_t *pgd; unsigned long next; if (details && !details->check_mapping && !details->nonlinear_vma) details = NULL; BUG_ON(addr >= end); mem_cgroup_uncharge_start(); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; next = zap_pud_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); mem_cgroup_uncharge_end(); } static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *details) { unsigned long start = max(vma->vm_start, start_addr); unsigned long end; if (start >= vma->vm_end) return; end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return; if (vma->vm_flags & VM_ACCOUNT) *nr_accounted += (end - start) >> PAGE_SHIFT; if (unlikely(is_pfn_mapping(vma))) untrack_pfn_vma(vma, 0, 0); if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of do_mmap_pgoff. When * hugetlbfs ->mmap method fails, * do_mmap_pgoff() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case. */ if (vma->vm_file) unmap_hugepage_range(vma, start, end, NULL); } else unmap_page_range(tlb, vma, start, end, details); } } /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here * @details: details of nonlinear truncation or shared cache invalidation * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, details); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); } /** * zap_page_range - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of nonlinear truncation or shared cache invalidation * * Caller must protect the VMA list */ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); tlb_finish_mmu(&tlb, address, end); } /** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of nonlinear truncation or shared cache invalidation * * The range must fit into one VMA. */ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); mmu_notifier_invalidate_range_end(mm, address, end); tlb_finish_mmu(&tlb, address, end); } /** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. * * Returns 0 if successful. */ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { if (address < vma->vm_start || address + size > vma->vm_end || !(vma->vm_flags & VM_PFNMAP)) return -1; zap_page_range_single(vma, address, size, NULL); return 0; } EXPORT_SYMBOL_GPL(zap_vma_ptes); /** * follow_page - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * Returns the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); goto out; } page = NULL; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto no_page_table; pud = pud_offset(pgd, address); if (pud_none(*pud)) goto no_page_table; if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); goto out; } if (unlikely(pud_bad(*pud))) goto no_page_table; pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto no_page_table; if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); goto out; } if (pmd_trans_huge(*pmd)) { if (flags & FOLL_SPLIT) { split_huge_page_pmd(mm, pmd); goto split_fallthrough; } spin_lock(&mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { spin_unlock(&mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); } else { page = follow_trans_huge_pmd(mm, address, pmd, flags); spin_unlock(&mm->page_table_lock); goto out; } } else spin_unlock(&mm->page_table_lock); /* fall through */ } split_fallthrough: if (unlikely(pmd_bad(*pmd))) goto no_page_table; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) goto no_page; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; page = vm_normal_page(vma, address, pte); if (unlikely(!page)) { if ((flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(pte))) goto bad_page; page = pte_page(pte); } if (flags & FOLL_GET) get_page_foll(page); if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here and migration is * blocked by the pte's page reference, we need * only check for file-cache page truncation. */ if (page->mapping) mlock_vma_page(page); unlock_page(page); } } unlock: pte_unmap_unlock(ptep, ptl); out: return page; bad_page: pte_unmap_unlock(ptep, ptl); return ERR_PTR(-EFAULT); no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return page; no_page_table: /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return page; } static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { return stack_guard_page_start(vma, addr) || stack_guard_page_end(vma, addr+PAGE_SIZE); } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { int i; unsigned long vm_flags; if (nr_pages <= 0) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * Require read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (gup_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (gup_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; do { struct vm_area_struct *vma; vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { unsigned long pg = start & PAGE_MASK; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return i ? : -EFAULT; if (pg > TASK_SIZE) pgd = pgd_offset_k(pg); else pgd = pgd_offset_gate(mm, pg); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, pg); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, pg); if (pmd_none(*pmd)) return i ? : -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, pg); if (pte_none(*pte)) { pte_unmap(pte); return i ? : -EFAULT; } vma = get_gate_vma(mm); if (pages) { struct page *page; page = vm_normal_page(vma, start, *pte); if (!page) { if (!(gup_flags & FOLL_DUMP) && is_zero_pfn(pte_pfn(*pte))) page = pte_page(*pte); else { pte_unmap(pte); return i ? : -EFAULT; } } pages[i] = page; get_page(page); } pte_unmap(pte); goto next_page; } if (use_user_accessible_timers()) { if (!vma && in_user_timers_area(mm, start)) { int goto_next_page = 0; int user_timer_ret = get_user_timer_page(vma, mm, start, gup_flags, pages, i, &goto_next_page); if (goto_next_page) goto next_page; else return user_timer_ret; } } if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags); continue; } do { struct page *page; unsigned int foll_flags = gup_flags; /* * If we have a pending SIGKILL, don't keep faulting * pages and potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; unsigned int fault_flags = 0; /* For mlock, just skip the stack guard page. */ if (foll_flags & FOLL_MLOCK) { if (stack_guard_page(vma, start)) goto next_page; } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (foll_flags & FOLL_NOWAIT) fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT); ret = handle_mm_fault(mm, vma, start, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { if (i) return i; else if (gup_flags & FOLL_HWPOISON) return -EHWPOISON; else return -EFAULT; } if (ret & VM_FAULT_SIGBUS) return i ? i : -EFAULT; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking) *nonblocking = 0; return i; } /* * The VM_FAULT_WRITE bit tells us that * do_wp_page has broken COW when necessary, * even if maybe_mkwrite decided not to set * pte_write. We can thus safely do subsequent * page lookups as if they were reads. But only * do so when looping for pte_write is futile: * in some cases userspace may also be wanting * to write to the gotten user page, which a * read fault here might prevent (a readonly * page might get reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) foll_flags &= ~FOLL_WRITE; cond_resched(); } if (IS_ERR(page)) return i ? i : PTR_ERR(page); if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); } next_page: if (vmas) vmas[i] = vma; i++; start += PAGE_SIZE; nr_pages--; } while (nr_pages && start < vma->vm_end); } while (nr_pages); return i; } EXPORT_SYMBOL(__get_user_pages); /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * handle_mm_fault() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This should be called with the mm_sem held for read. */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags) { struct vm_area_struct *vma; int ret; vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; ret = handle_mm_fault(mm, vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return -EHWPOISON; if (ret & VM_FAULT_SIGBUS) return -EFAULT; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } return 0; } /* * get_user_pages() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to by the caller * @force: whether to force write access even if user mapping is * readonly. This will result in the page being COWed even * in MAP_SHARED mappings. You do not want this. * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If write=0, the page must not be written to. If the page is written to, * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called * after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. */ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = FOLL_TOUCH; if (pages) flags |= FOLL_GET; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, NULL); } EXPORT_SYMBOL(get_user_pages); /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by page_cache_release() or put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pgd_t * pgd = pgd_offset(mm, addr); pud_t * pud = pud_alloc(mm, pgd, addr); if (pud) { pmd_t * pmd = pmd_alloc(mm, pud, addr); if (pmd) { VM_BUG_ON(pmd_trans_huge(*pmd)); return pte_alloc_map_lock(mm, pmd, addr, ptl); } } return NULL; } /* * This is the old fallback for page remapping. * * For historical reasons, it only allows reserved pages. Only * old drivers should use this, and they needed to mark their * pages reserved for the old functions anyway. */ static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte; spinlock_t *ptl; retval = -EINVAL; if (PageAnon(page)) goto out; retval = -ENOMEM; flush_dcache_page(page); pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ get_page(page); inc_mm_counter_fast(mm, MM_FILEPAGES); page_add_file_rmap(page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); retval = 0; pte_unmap_unlock(pte, ptl); return retval; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page * * This allows drivers to insert individual pages they've allocated * into a user vma. * * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow * that. Your vma protection will have to be set up correctly, which * means that if you want a shared writable mapping, you'd better * ask for a shared writable mapping! * * The page does not need to be reserved. */ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!page_count(page)) return -EINVAL; vma->vm_flags |= VM_INSERTPAGE; return insert_page(vma, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte, entry; spinlock_t *ptl; retval = -ENOMEM; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_pfn - insert single pfn into user vma * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * * Similar to vm_inert_page, this allows drivers to insert individual pages * they've allocated into a user vma. Same comments apply. * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return NULL. * * vma cannot be a COW mapping. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like * consistency in testing and feature parity among all, so we should * try to keep these invariants in place for everybody. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) return -EINVAL; ret = insert_pfn(vma, addr, pfn, pgprot); if (ret) untrack_pfn_vma(vma, pfn, PAGE_SIZE); return ret; } EXPORT_SYMBOL(vm_insert_pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* * refcount the page if pfn_valid is true (hence insert_page rather * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { struct page *page; page = pfn_to_page(pfn); return insert_page(vma, addr, page, vma->vm_page_prot); } return insert_pfn(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_mixed); /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte; spinlock_t *ptl; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); return 0; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; unsigned long next; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); if (remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; unsigned long next; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (remap_pmd_range(mm, pud, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to * @addr: target user address to start at * @pfn: physical address of kernel memory * @size: size of map area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; int err; /* * Physically remapped pages are special. Tell the * rest of the world about it: * VM_IO tells people not to look at these pages * (accesses can have side effects). * VM_RESERVED is specified all over the place, because * in 2.4 it kept swapout's vma scan off this vma; but * in 2.6 the LRU scan won't even find its pages, so this * flag means no more than count its pages in reserved_vm, * and omit it from core dump, even when VM_IO turned off. * VM_PFNMAP tells the core MM that the base pages are just * raw PFN mappings, and do not have a "struct page" associated * with them. * * There's a horrible special case to handle copy-on-write * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". */ if (addr == vma->vm_start && end == vma->vm_end) { vma->vm_pgoff = pfn; vma->vm_flags |= VM_PFN_AT_MMAP; } else if (is_cow_mapping(vma->vm_flags)) return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); if (err) { /* * To indicate that track_pfn related cleanup is not * needed from higher level routine calling unmap_vmas */ vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); vma->vm_flags &= ~VM_PFN_AT_MMAP; return -EINVAL; } BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); err = remap_pud_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) break; } while (pgd++, addr = next, addr != end); if (err) untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); return err; } EXPORT_SYMBOL(remap_pfn_range); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pte_t *pte; int err; pgtable_t token; spinlock_t *uninitialized_var(ptl); pte = (mm == &init_mm) ? pte_alloc_kernel(pmd, addr) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; BUG_ON(pmd_huge(*pmd)); arch_enter_lazy_mmu_mode(); token = pmd_pgtable(*pmd); do { err = fn(pte++, token, addr, data); if (err) break; } while (addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); if (mm != &init_mm) pte_unmap_unlock(pte-1, ptl); return err; } static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pmd_t *pmd; unsigned long next; int err; BUG_ON(pud_huge(*pud)); pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); err = apply_to_pte_range(mm, pmd, addr, next, fn, data); if (err) break; } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pud_t *pud; unsigned long next; int err; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); err = apply_to_pmd_range(mm, pud, addr, next, fn, data); if (err) break; } while (pud++, addr = next, addr != end); return err; } /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. */ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { pgd_t *pgd; unsigned long next; unsigned long end = addr + size; int err; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); err = apply_to_pud_range(mm, pgd, addr, next, fn, data); if (err) break; } while (pgd++, addr = next, addr != end); return err; } EXPORT_SYMBOL_GPL(apply_to_page_range); /* * handle_pte_fault chooses page fault handler according to an entry * which was read non-atomically. Before making any commitment, on * those architectures or configurations (e.g. i386 with PAE) which * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault * must check under lock before unmapping the pte and proceeding * (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) if (sizeof(pte_t) > sizeof(unsigned long)) { spinlock_t *ptl = pte_lockptr(mm, pmd); spin_lock(ptl); same = pte_same(*page_table, orig_pte); spin_unlock(ptl); } #endif pte_unmap(page_table); return same; } static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ if (unlikely(!src)) { void *kaddr = kmap_atomic(dst); void __user *uaddr = (void __user *)(va & PAGE_MASK); /* * This really shouldn't fail, because the page is there * in the page tables. But it might just be unreadable, * in which case we just give up and fill the result with * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); } else copy_user_highpage(dst, src, va, vma); } /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte) __releases(ptl) { struct page *old_page, *new_page; pte_t entry; int ret = 0; int page_mkwrite = 0; struct page *dirty_page = NULL; old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable as we can't do any dirty * accounting on raw pfn maps. */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) goto reuse; goto gotten; } /* * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ if (PageAnon(old_page) && !PageKsm(old_page)) { if (!trylock_page(old_page)) { page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); lock_page(old_page); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); goto unlock; } page_cache_release(old_page); } if (reuse_swap_page(old_page)) { /* * The page is all ours. Move it to our anon_vma so * the rmap code will not search our parent or siblings. * Protected against the rmap code by the page lock. */ page_move_anon_rmap(old_page, vma, address); unlock_page(old_page); goto reuse; } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { /* * Only catch write-faults on shared writable pages, * read-only shared pages can get COWed by * get_user_pages(.write=1, .force=1). */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { struct vm_fault vmf; int tmp; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = old_page->index; vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; vmf.page = old_page; /* * Notify the address space that the page is about to * become writable so that it can prohibit this or wait * for the page to get into an appropriate state. * * We do this without the lock held, so that it can * sleep if it needs to. */ page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); tmp = vma->vm_ops->page_mkwrite(vma, &vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { ret = tmp; goto unwritable_page; } if (unlikely(!(tmp & VM_FAULT_LOCKED))) { lock_page(old_page); if (!old_page->mapping) { ret = 0; /* retry the fault */ unlock_page(old_page); goto unwritable_page; } } else VM_BUG_ON(!PageLocked(old_page)); /* * Since we dropped the lock we need to revalidate * the PTE as someone else may have changed it. If * they did, we just return, as we can count on the * MMU to tell us if they didn't also make it writable. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); goto unlock; } page_mkwrite = 1; } dirty_page = old_page; get_page(dirty_page); reuse: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) update_mmu_cache(vma, address, page_table); pte_unmap_unlock(page_table, ptl); ret |= VM_FAULT_WRITE; if (!dirty_page) return ret; /* * Yes, Virginia, this is actually required to prevent a race * with clear_page_dirty_for_io() from clearing the page dirty * bit after it clear all dirty ptes, but before a racing * do_wp_page installs a dirty pte. * * __do_fault is protected similarly. */ if (!page_mkwrite) { wait_on_page_locked(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite); } put_page(dirty_page); if (page_mkwrite) { struct address_space *mapping = dirty_page->mapping; set_page_dirty(dirty_page); unlock_page(dirty_page); page_cache_release(dirty_page); if (mapping) { /* * Some device drivers do not set page.mapping * but still dirty their pages */ balance_dirty_pages_ratelimited(mapping); } } /* file_update_time outside page_lock */ if (vma->vm_file) file_update_time(vma->vm_file); return ret; } /* * Ok, we need to copy. Oh, well.. */ page_cache_get(old_page); gotten: pte_unmap_unlock(page_table, ptl); if (unlikely(anon_vma_prepare(vma))) goto oom; if (is_zero_pfn(pte_pfn(orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, address); if (!new_page) goto oom; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (!new_page) goto oom; cow_user_page(new_page, old_page, address, vma); } __SetPageUptodate(new_page); if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; /* * Re-check the pte - we dropped the lock */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, MM_FILEPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES); } } else inc_mm_counter_fast(mm, MM_ANONPAGES); flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition * seen in the presence of one thread doing SMC and another * thread doing COW. */ ptep_clear_flush(vma, address, page_table); page_add_new_anon_rmap(new_page, vma, address); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, address, page_table, entry); update_mmu_cache(vma, address, page_table); if (old_page) { /* * Only after switching the pte to the new page may * we remove the mapcount here. Otherwise another * process may come and find the rmap count decremented * before the pte is switched to the new page, and * "reuse" the old page writing into it while our pte * here still points into it and can be read by other * threads. * * The critical issue is to order this * page_remove_rmap with the ptp_clear_flush above. * Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative * in page_remove_rmap. * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the * decremented mapcount is visible. And the old page * cannot be reused until after the decremented * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ page_remove_rmap(old_page); } /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; } else mem_cgroup_uncharge_page(new_page); if (new_page) page_cache_release(new_page); unlock: pte_unmap_unlock(page_table, ptl); if (old_page) { /* * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ munlock_vma_page(old_page); unlock_page(old_page); } page_cache_release(old_page); } return ret; oom_free_new: page_cache_release(new_page); oom: if (old_page) { if (page_mkwrite) { unlock_page(old_page); page_cache_release(old_page); } page_cache_release(old_page); } return VM_FAULT_OOM; unwritable_page: page_cache_release(old_page); return ret; } static void unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } static inline void unmap_mapping_range_tree(struct prio_tree_root *root, struct zap_details *details) { struct vm_area_struct *vma; struct prio_tree_iter iter; pgoff_t vba, vea, zba, zea; vma_prio_tree_foreach(vma, &iter, root, details->first_index, details->last_index) { vba = vma->vm_pgoff; vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ zba = details->first_index; if (zba < vba) zba = vba; zea = details->last_index; if (zea > vea) zea = vea; unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } } static inline void unmap_mapping_range_list(struct list_head *head, struct zap_details *details) { struct vm_area_struct *vma; /* * In nonlinear VMAs there is no correspondence between virtual address * offset and file offset. So we must perform an exhaustive search * across *all* the pages in each nonlinear VMA, not just the pages * whose virtual address lies outside the file truncation point. */ list_for_each_entry(vma, head, shared.vm_set.list) { details->nonlinear_vma = vma; unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details); } } /** * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE * boundary. Note that this is different from truncate_pagecache(), which * must keep the partial page. In contrast, we must get rid of * partial pages. * @holelen: size of prospective hole in bytes. This will be rounded * up to a PAGE_SIZE boundary. A holelen of zero truncates to the * end of the file. * @even_cows: 1 when truncating a file, unmap even private COWed pages; * but 0 when invalidating pagecache, don't throw away private data. */ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { struct zap_details details; pgoff_t hba = holebegin >> PAGE_SHIFT; pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { long long holeend = (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; if (holeend & ~(long long)ULONG_MAX) hlen = ULONG_MAX - hba + 1; } details.check_mapping = even_cows? NULL: mapping; details.nonlinear_vma = NULL; details.first_index = hba; details.last_index = hba + hlen - 1; if (details.last_index < details.first_index) details.last_index = ULONG_MAX; mutex_lock(&mapping->i_mmap_mutex); if (unlikely(!prio_tree_empty(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); mutex_unlock(&mapping->i_mmap_mutex); } EXPORT_SYMBOL(unmap_mapping_range); /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { spinlock_t *ptl; struct page *page, *swapcache = NULL; swp_entry_t entry; pte_t pte; int locked; struct mem_cgroup *ptr; int exclusive = 0; int ret = 0; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) goto out; entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { #ifdef CONFIG_CMA /* * FIXME: mszyprow: cruel, brute-force method for * letting cma/migration to finish it's job without * stealing the lock migration_entry_wait() and creating * a live-lock on the faulted page * (page->_count == 2 migration failure issue) */ mdelay(10); #endif migration_entry_wait(mm, pmd, address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { print_bad_pte(vma, address, orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; } delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { grab_swap_token(mm); /* Contend for token _before_ read-in */ page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma, address); if (!page) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(mm, PGMAJFAULT); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto out_release; } locked = lock_page_or_retry(page, mm, flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); if (!locked) { ret |= VM_FAULT_RETRY; goto out_release; } /* * Make sure try_to_free_swap or reuse_swap_page or swapoff did not * release the swapcache from under us. The page pin, and pte_same * test below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not changed. */ if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) goto out_page; if (ksm_might_need_to_copy(page, vma, address)) { swapcache = page; page = ksm_does_need_to_copy(page, vma, address); if (unlikely(!page)) { ret = VM_FAULT_OOM; page = swapcache; swapcache = NULL; goto out_page; } } if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; goto out_page; } /* * Back out if somebody else already faulted in this pte. */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (unlikely(!pte_same(*page_table, orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { ret = VM_FAULT_SIGBUS; goto out_nomap; } /* * The page isn't present yet, go ahead with the fault. * * Be careful about the sequence of operations here. * To get its accounting right, reuse_swap_page() must be called * while the page is counted on swap but not yet in mapcount i.e. * before page_add_anon_rmap() and swap_free(); try_to_free_swap() * must be called after the swap_free(), or it will never succeed. * Because delete_from_swap_page() may be called by reuse_swap_page(), * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry * in page->private. In this case, a record in swap_cgroup is silently * discarded at swap_free(). */ inc_mm_counter_fast(mm, MM_ANONPAGES); dec_mm_counter_fast(mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = 1; } flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); do_page_add_anon_rmap(page, vma, address, exclusive); /* It's better to call commit-charge after rmap is established */ mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check * (to avoid false positives from pte_same). For * further safety release the lock after the swap_free * so that the swap count won't change under a * parallel locked swapcache. */ unlock_page(swapcache); page_cache_release(swapcache); } if (flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); out: return ret; out_nomap: mem_cgroup_cancel_charge_swapin(ptr); pte_unmap_unlock(page_table, ptl); out_page: unlock_page(page); out_release: page_cache_release(page); if (swapcache) { unlock_page(swapcache); page_cache_release(swapcache); } return ret; } /* * This is like a special single-page "expand_{down|up}wards()", * except we must first make sure that 'address{-|+}PAGE_SIZE' * doesn't hit another vma. */ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) { address &= PAGE_MASK; if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { struct vm_area_struct *prev = vma->vm_prev; /* * Is there a mapping abutting this one below? * * That's only ok if it's the same stack mapping * that has gotten split.. */ if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; /* As VM_GROWSDOWN but s/below/above/ */ if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; expand_upwards(vma, address + PAGE_SIZE); } return 0; } /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { struct page *page; spinlock_t *ptl; pte_t entry; pte_unmap(page_table); /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGBUS; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); if (!page) goto oom; __SetPageUptodate(page); if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; release: mem_cgroup_uncharge_page(page); page_cache_release(page); goto unlock; oom_free_page: page_cache_release(page); oom: return VM_FAULT_OOM; } /* * __do_fault() tries to create a new page mapping. It aggressively * tries to share with existing pages, but makes a separate copy if * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid * the next page fault. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte neither mapped nor locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { pte_t *page_table; spinlock_t *ptl; struct page *page; struct page *cow_page; pte_t entry; int anon = 0; struct page *dirty_page = NULL; struct vm_fault vmf; int ret; int page_mkwrite = 0; /* * If we do COW later, allocate page befor taking lock_page() * on the file cache page. This will reduce lock holding time. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (!cow_page) return VM_FAULT_OOM; if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) { page_cache_release(cow_page); return VM_FAULT_OOM; } } else cow_page = NULL; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = pgoff; vmf.flags = flags; vmf.page = NULL; ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf.page); ret = VM_FAULT_HWPOISON; goto uncharge_out; } /* * For consistency in subsequent calls, make the faulted page always * locked. */ if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf.page); else VM_BUG_ON(!PageLocked(vmf.page)); /* * Should we do an early C-O-W break? */ page = vmf.page; if (flags & FAULT_FLAG_WRITE) { if (!(vma->vm_flags & VM_SHARED)) { page = cow_page; anon = 1; copy_user_highpage(page, vmf.page, address, vma); __SetPageUptodate(page); } else { /* * If the page will be shareable, see if the backing * address space wants to know that the page is about * to become writable */ if (vma->vm_ops->page_mkwrite) { int tmp; unlock_page(page); vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; tmp = vma->vm_ops->page_mkwrite(vma, &vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { ret = tmp; goto unwritable_page; } if (unlikely(!(tmp & VM_FAULT_LOCKED))) { lock_page(page); if (!page->mapping) { ret = 0; /* retry the fault */ unlock_page(page); goto unwritable_page; } } else VM_BUG_ON(!PageLocked(page)); page_mkwrite = 1; } } } page_table = pte_offset_map_lock(mm, pmd, address, &ptl); /* * This silly early PAGE_DIRTY setting removes a race * due to the bad i386 page protection. But it's valid * for other architectures too. * * Note that if FAULT_FLAG_WRITE is set, we either now have * an exclusive copy of the page, or this is a shared mapping, * so we can make it writable and dirty to avoid having to * handle that later. */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (anon) { inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); } else { inc_mm_counter_fast(mm, MM_FILEPAGES); page_add_file_rmap(page); if (flags & FAULT_FLAG_WRITE) { dirty_page = page; get_page(dirty_page); } } set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, page_table); } else { if (cow_page) mem_cgroup_uncharge_page(cow_page); if (anon) page_cache_release(page); else anon = 1; /* no anon but release faulted_page */ } pte_unmap_unlock(page_table, ptl); if (dirty_page) { struct address_space *mapping = page->mapping; if (set_page_dirty(dirty_page)) page_mkwrite = 1; unlock_page(dirty_page); put_page(dirty_page); if (page_mkwrite && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages */ balance_dirty_pages_ratelimited(mapping); } /* file_update_time outside page_lock */ if (vma->vm_file) file_update_time(vma->vm_file); } else { unlock_page(vmf.page); if (anon) page_cache_release(vmf.page); } return ret; unwritable_page: page_cache_release(page); return ret; uncharge_out: /* fs's fault handler get error */ if (cow_page) { mem_cgroup_uncharge_page(cow_page); page_cache_release(cow_page); } return ret; } static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { pgoff_t pgoff = (((address & PAGE_MASK) - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pte_unmap(page_table); return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); } /* * Fault of a previously existing named mapping. Repopulate the pte * from the encoded file_pte if possible. This enables swappable * nonlinear vmas. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { pgoff_t pgoff; flags |= FAULT_FLAG_NONLINEAR; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) return 0; if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { /* * Page table corrupted: show pte and kill process. */ print_bad_pte(vma, address, orig_pte, NULL); return VM_FAULT_SIGBUS; } pgoff = pte_to_pgoff(orig_pte); return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); } /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most * RISC architectures). The early dirtying is also good on the i386. * * There is also a hook called "update_mmu_cache()" that architectures * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; entry = *pte; if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) { if (likely(vma->vm_ops->fault)) return do_linear_fault(mm, vma, address, pte, pmd, flags, entry); } return do_anonymous_page(mm, vma, address, pte, pmd, flags); } if (pte_file(entry)) return do_nonlinear_fault(mm, vma, address, pte, pmd, flags, entry); return do_swap_page(mm, vma, address, pte, pmd, flags, entry); } ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*pte, entry))) goto unlock; if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } unlock: pte_unmap_unlock(pte, ptl); return 0; } /* * By the time we get here, we already hold the mm semaphore */ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); mem_cgroup_count_vm_event(mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) return VM_FAULT_OOM; pmd = pmd_alloc(mm, pud, address); if (!pmd) return VM_FAULT_OOM; if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { if (!vma->vm_ops) return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); } else { pmd_t orig_pmd = *pmd; barrier(); if (pmd_trans_huge(orig_pmd)) { if (flags & FAULT_FLAG_WRITE && !pmd_write(orig_pmd) && !pmd_trans_splitting(orig_pmd)) return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); return 0; } } /* * Use __pte_alloc instead of pte_alloc_map, because we can't * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) return 0; /* * A regular pmd is established and it can't morph into a huge pmd * from under us anymore at this point because we hold the mmap_sem * read mode and khugepaged takes it in write mode. So now it's * safe to run pte_offset_map(). */ pte = pte_offset_map(pmd, address); return handle_pte_fault(mm, vma, address, pte, pmd, flags); } #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) /* Another has populated it */ pud_free(mm, new); else pgd_populate(mm, pgd, new); spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * We've already handled the fast-path in-line. */ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); #ifndef __ARCH_HAS_4LEVEL_HACK if (pud_present(*pud)) /* Another has populated it */ pmd_free(mm, new); else pud_populate(mm, pud, new); #else if (pgd_present(*pud)) /* Another has populated it */ pmd_free(mm, new); else pgd_populate(mm, pud, new); #endif /* __ARCH_HAS_4LEVEL_HACK */ spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PMD_FOLDED */ int make_pages_present(unsigned long addr, unsigned long end) { int ret, len, write; struct vm_area_struct * vma; vma = find_vma(current->mm, addr); if (!vma) return -ENOMEM; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE; BUG_ON(addr >= end); BUG_ON(end > vma->vm_end); len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; ret = get_user_pages(current, current->mm, addr, len, write, 0, NULL, NULL); if (ret < 0) return ret; return ret == len ? 0 : -EFAULT; } #if !defined(__HAVE_ARCH_GATE_AREA) #if defined(AT_SYSINFO_EHDR) static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { gate_vma.vm_mm = NULL; gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; return 0; } __initcall(gate_vma_init); #endif struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef AT_SYSINFO_EHDR return &gate_vma; #else return NULL; #endif } int in_gate_area_no_mm(unsigned long addr) { #ifdef AT_SYSINFO_EHDR if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) return 1; #endif return 0; } #endif /* __HAVE_ARCH_GATE_AREA */ static int __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; pud = pud_offset(pgd, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; /* We cannot handle huge page PFN maps. Luckily they don't exist. */ if (pmd_huge(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; return 0; unlock: pte_unmap_unlock(ptep, *ptlp); out: return -EINVAL; } static inline int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { int res; /* (void) is needed to make gcc happy */ (void) __cond_lock(*ptlp, !(res = __follow_pte(mm, address, ptepp, ptlp))); return res; } /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * Returns zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { int ret = -EINVAL; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(*ptep); pte_unmap_unlock(ptep, ptl); return 0; } EXPORT_SYMBOL(follow_pfn); #ifdef CONFIG_HAVE_IOREMAP_PROT int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) { int ret = -EINVAL; pte_t *ptep, pte; spinlock_t *ptl; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = *ptep; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; *prot = pgprot_val(pte_pgprot(pte)); *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; ret = 0; unlock: pte_unmap_unlock(ptep, ptl); out: return ret; } int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; unsigned long prot = 0; void __iomem *maddr; int offset = addr & (PAGE_SIZE-1); if (follow_phys(vma, addr, write, &prot, &phys_addr)) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); if (write) memcpy_toio(maddr + offset, buf, len); else memcpy_fromio(buf, maddr + offset, len); iounmap(maddr); return len; } #endif /* * Access another process' address space as given in mm. If non-NULL, use the * given task for page fault accounting. */ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { struct vm_area_struct *vma; void *old_buf = buf; down_read(&mm->mmap_sem); /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, ret, offset; void *maddr; struct page *page = NULL; ret = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma); if (ret <= 0) { /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ #ifdef CONFIG_HAVE_IOREMAP_PROT vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) break; if (vma->vm_ops && vma->vm_ops->access) ret = vma->vm_ops->access(vma, addr, buf, len, write); if (ret <= 0) #endif break; bytes = ret; } else { bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) bytes = PAGE_SIZE-offset; maddr = kmap(page); if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); } kunmap(page); page_cache_release(page); } len -= bytes; buf += bytes; addr += bytes; } up_read(&mm->mmap_sem); return buf - old_buf; } /** * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @write: whether the access is a write * * The caller must hold a reference on @mm. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { return __access_remote_vm(NULL, mm, addr, buf, len, write); } /* * Access another process' address space. * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { struct mm_struct *mm; int ret; mm = get_task_mm(tsk); if (!mm) return 0; ret = __access_remote_vm(tsk, mm, addr, buf, len, write); mmput(mm); return ret; } /* * Print the name of a VMA. */ void print_vma_addr(char *prefix, unsigned long ip) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; /* * Do not print if we are in atomic * contexts (in exception stacks, etc.): */ if (preempt_count()) return; down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma && vma->vm_file) { struct file *f = vma->vm_file; char *buf = (char *)__get_free_page(GFP_KERNEL); if (buf) { char *p, *s; p = d_path(&f->f_path, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; s = strrchr(p, '/'); if (s) p = s+1; printk("%s%s[%lx+%lx]", prefix, p, vma->vm_start, vma->vm_end - vma->vm_start); free_page((unsigned long)buf); } } up_read(&current->mm->mmap_sem); } #ifdef CONFIG_PROVE_LOCKING void might_fault(void) { /* * Some code (nfs/sunrpc) uses socket ops on kernel memory while * holding the mmap_sem, this is safe because kernel memory doesn't * get paged out, therefore we'll never actually fault, and the * below annotations will generate false positives. */ if (segment_eq(get_fs(), KERNEL_DS)) return; might_sleep(); /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and * providing helpers like get_user_atomic. */ if (!in_atomic() && current->mm) might_lock_read(&current->mm->mmap_sem); } EXPORT_SYMBOL(might_fault); #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) static void clear_gigantic_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; struct page *p = page; might_sleep(); for (i = 0; i < pages_per_huge_page; i++, p = mem_map_next(p, page, i)) { cond_resched(); clear_user_highpage(p, addr + i * PAGE_SIZE); } } void clear_huge_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { clear_gigantic_page(page, addr, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } static void copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < pages_per_huge_page; ) { cond_resched(); copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } } void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { copy_user_gigantic_page(dst, src, addr, vma, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
gpl-2.0
SlimRoms/kernel_htc_flounder
drivers/iio/magnetometer/inv_compass/inv_ami306_trigger.c
398
2098
/* * Copyright (C) 2012 Invensense, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /** * @addtogroup DRIVERS * @brief Hardware drivers. * * @{ * @file inv_ami306_trigger.c * @brief Invensense implementation for AMI306 * @details This driver currently works for the AMI306 */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/sysfs.h> #include <linux/jiffies.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/poll.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include "iio.h" #include "sysfs.h" #include "trigger.h" #include "inv_ami306_iio.h" static const struct iio_trigger_ops inv_ami306_trigger_ops = { .owner = THIS_MODULE, }; int inv_ami306_probe_trigger(struct iio_dev *indio_dev) { int ret; struct inv_ami306_state_s *st = iio_priv(indio_dev); st->trig = iio_allocate_trigger("%s-dev%d", indio_dev->name, indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } /* select default trigger */ st->trig->dev.parent = &st->i2c->dev; st->trig->private_data = indio_dev; st->trig->ops = &inv_ami306_trigger_ops; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_trig; return 0; error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void inv_ami306_remove_trigger(struct iio_dev *indio_dev) { struct inv_ami306_state_s *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); iio_free_trigger(st->trig); } /** * @} */
gpl-2.0