code
stringlengths
6
250k
repo_name
stringlengths
5
70
path
stringlengths
3
177
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
250k
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/libra_sdioif.h> #include <linux/delay.h> #include <linux/mmc/sdio.h> #include <linux/mmc/mmc.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> /* Libra SDIO function device */ static struct sdio_func *libra_sdio_func; static struct mmc_host *libra_mmc_host; static int libra_mmc_host_index; /* SDIO Card ID / Device ID */ static unsigned short libra_sdio_card_id; /* completion variables */ struct completion gCard_rem_event_var; EXPORT_SYMBOL(gCard_rem_event_var); struct completion gShutdown_event_var; EXPORT_SYMBOL(gShutdown_event_var); static suspend_handler_t *libra_suspend_hldr; static resume_handler_t *libra_resume_hldr; static notify_card_removal_t *libra_notify_card_removal_hdlr; static shutdown_handler_t *libra_sdio_shutdown_hdlr; int libra_enable_sdio_irq_in_chip(struct sdio_func *func, u8 enable) { unsigned char reg = 0; int err = 0; sdio_claim_host(func); /* Read the value into reg */ libra_sdiocmd52(func, SDIO_CCCR_IENx, &reg, 0, &err); if (err) printk(KERN_ERR "%s: Could not read SDIO_CCCR_IENx register " "err=%d\n", __func__, err); if (libra_mmc_host) { if (enable) { reg |= 1 << func->num; reg |= 1; } else { reg &= ~(1 << func->num); } libra_sdiocmd52(func, SDIO_CCCR_IENx, &reg, 1, &err); if (err) printk(KERN_ERR "%s: Could not enable/disable irq " "err=%d\n", __func__, err); } sdio_release_host(func); return err; } EXPORT_SYMBOL(libra_enable_sdio_irq_in_chip); /** * libra_sdio_configure() - Function to configure the SDIO device param * @libra_sdio_rxhandler Rx handler * @func_drv_fn Function driver function for special setup * @funcdrv_timeout Function Enable timeout * @blksize Block size * * Configure SDIO device, enable function and set block size */ int libra_sdio_configure(sdio_irq_handler_t libra_sdio_rxhandler, void (*func_drv_fn)(int *status), unsigned int funcdrv_timeout, unsigned int blksize) { int err_ret = 0; struct sdio_func *func = libra_sdio_func; if (libra_sdio_func == NULL) { printk(KERN_ERR "%s: Error SDIO card not detected\n", __func__); goto cfg_error; } sdio_claim_host(func); /* Currently block sizes are set here. */ func->max_blksize = blksize; if (sdio_set_block_size(func, blksize)) { printk(KERN_ERR "%s: Unable to set the block size.\n", __func__); sdio_release_host(func); goto cfg_error; } /* Function driver specific configuration. */ if (func_drv_fn) { (*func_drv_fn)(&err_ret); if (err_ret) { printk(KERN_ERR "%s: function driver provided configure function error=%d\n", __func__, err_ret); sdio_release_host(func); goto cfg_error; } } /* We set this based on the function card. */ func->enable_timeout = funcdrv_timeout; err_ret = sdio_enable_func(func); if (err_ret != 0) { printk(KERN_ERR "%s: Unable to enable function %d\n", __func__, err_ret); sdio_release_host(func); goto cfg_error; } if (sdio_claim_irq(func, libra_sdio_rxhandler)) { sdio_disable_func(func); printk(KERN_ERR "%s: Unable to claim irq.\n", __func__); sdio_release_host(func); goto cfg_error; } libra_enable_sdio_irq_in_chip(func, 0); sdio_release_host(func); return 0; cfg_error: return -1; } EXPORT_SYMBOL(libra_sdio_configure); int libra_sdio_configure_suspend_resume( suspend_handler_t *libra_sdio_suspend_hdlr, resume_handler_t *libra_sdio_resume_hdlr) { libra_suspend_hldr = libra_sdio_suspend_hdlr; libra_resume_hldr = libra_sdio_resume_hdlr; return 0; } EXPORT_SYMBOL(libra_sdio_configure_suspend_resume); /* * libra_sdio_deconfigure() - Function to reset the SDIO device param */ void libra_sdio_deconfigure(struct sdio_func *func) { if (NULL == libra_sdio_func) return; sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); } EXPORT_SYMBOL(libra_sdio_deconfigure); int libra_enable_sdio_irq(struct sdio_func *func, u8 enable) { if (libra_mmc_host && libra_mmc_host->ops && libra_mmc_host->ops->enable_sdio_irq) { libra_mmc_host->ops->enable_sdio_irq(libra_mmc_host, enable); return 0; } printk(KERN_ERR "%s: Could not enable disable irq\n", __func__); return -EINVAL; } EXPORT_SYMBOL(libra_enable_sdio_irq); int libra_disable_sdio_irq_capability(struct sdio_func *func, u8 disable) { if (libra_mmc_host) { if (disable) libra_mmc_host->caps &= ~MMC_CAP_SDIO_IRQ; else libra_mmc_host->caps |= MMC_CAP_SDIO_IRQ; return 0; } printk(KERN_ERR "%s: Could not change sdio capabilities to polling\n", __func__); return -EINVAL; } EXPORT_SYMBOL(libra_disable_sdio_irq_capability); /* * libra_sdio_release_irq() - Function to release IRQ */ void libra_sdio_release_irq(struct sdio_func *func) { if (NULL == libra_sdio_func) return; sdio_release_irq(func); } EXPORT_SYMBOL(libra_sdio_release_irq); /* * libra_sdio_disable_func() - Function to disable sdio func */ void libra_sdio_disable_func(struct sdio_func *func) { if (NULL == libra_sdio_func) return; sdio_disable_func(func); } EXPORT_SYMBOL(libra_sdio_disable_func); /* * Return the SDIO Function device */ struct sdio_func *libra_getsdio_funcdev(void) { return libra_sdio_func; } EXPORT_SYMBOL(libra_getsdio_funcdev); /* * Set function driver as the private data for the function device */ void libra_sdio_setprivdata(struct sdio_func *sdio_func_dev, void *padapter) { if (NULL == libra_sdio_func) return; sdio_set_drvdata(sdio_func_dev, padapter); } EXPORT_SYMBOL(libra_sdio_setprivdata); /* * Return private data of the function device. */ void *libra_sdio_getprivdata(struct sdio_func *sdio_func_dev) { return sdio_get_drvdata(sdio_func_dev); } EXPORT_SYMBOL(libra_sdio_getprivdata); /* * Function driver claims the SDIO device */ void libra_claim_host(struct sdio_func *sdio_func_dev, pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count) { if (NULL == libra_sdio_func) return; if (*curr_claimed == current_pid) { atomic_inc(claim_count); return; } /* Go ahead and claim the host if not locked by anybody. */ sdio_claim_host(sdio_func_dev); *curr_claimed = current_pid; atomic_inc(claim_count); } EXPORT_SYMBOL(libra_claim_host); /* * Function driver releases the SDIO device */ void libra_release_host(struct sdio_func *sdio_func_dev, pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count) { if (NULL == libra_sdio_func) return; if (*curr_claimed != current_pid) { /* Dont release */ return; } atomic_dec(claim_count); if (atomic_read(claim_count) == 0) { *curr_claimed = 0; sdio_release_host(sdio_func_dev); } } EXPORT_SYMBOL(libra_release_host); void libra_sdiocmd52(struct sdio_func *sdio_func_dev, unsigned int addr, u8 *byte_var, int write, int *err_ret) { if (write) sdio_writeb(sdio_func_dev, byte_var[0], addr, err_ret); else byte_var[0] = sdio_readb(sdio_func_dev, addr, err_ret); } EXPORT_SYMBOL(libra_sdiocmd52); u8 libra_sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr, int count) { return sdio_readsb(func, dst, addr, count); } EXPORT_SYMBOL(libra_sdio_readsb); int libra_sdio_memcpy_fromio(struct sdio_func *func, void *dst, unsigned int addr, int count) { return sdio_memcpy_fromio(func, dst, addr, count); } EXPORT_SYMBOL(libra_sdio_memcpy_fromio); int libra_sdio_writesb(struct sdio_func *func, unsigned int addr, void *src, int count) { return sdio_writesb(func, addr, src, count); } EXPORT_SYMBOL(libra_sdio_writesb); int libra_sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, void *src, int count) { return sdio_memcpy_toio(func, addr, src, count); } EXPORT_SYMBOL(libra_sdio_memcpy_toio); int libra_detect_card_change(void) { if (libra_mmc_host) { if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host") && (libra_mmc_host_index == libra_mmc_host->index)) { mmc_detect_change(libra_mmc_host, 0); return 0; } } printk(KERN_ERR "%s: Could not trigger card change\n", __func__); return -EINVAL; } EXPORT_SYMBOL(libra_detect_card_change); int libra_sdio_enable_polling(void) { if (libra_mmc_host) { if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host") && (libra_mmc_host_index == libra_mmc_host->index)) { libra_mmc_host->caps |= MMC_CAP_NEEDS_POLL; mmc_detect_change(libra_mmc_host, 0); return 0; } } printk(KERN_ERR "%s: Could not trigger SDIO scan\n", __func__); return -1; } EXPORT_SYMBOL(libra_sdio_enable_polling); void libra_sdio_set_clock(struct sdio_func *func, unsigned int clk_freq) { struct mmc_host *host = func->card->host; host->ios.clock = clk_freq; host->ops->set_ios(host, &host->ios); } EXPORT_SYMBOL(libra_sdio_set_clock); /* * API to get SDIO Device Card ID */ void libra_sdio_get_card_id(struct sdio_func *func, unsigned short *card_id) { if (card_id) *card_id = libra_sdio_card_id; } EXPORT_SYMBOL(libra_sdio_get_card_id); /* * SDIO Probe */ static int libra_sdio_probe(struct sdio_func *func, const struct sdio_device_id *sdio_dev_id) { libra_mmc_host = func->card->host; libra_mmc_host_index = libra_mmc_host->index; libra_sdio_func = func; libra_sdio_card_id = sdio_dev_id->device; printk(KERN_INFO "%s: success with block size of %d device_id=0x%x\n", __func__, func->cur_blksize, sdio_dev_id->device); /* Turn off SDIO polling from now on */ libra_mmc_host->caps &= ~MMC_CAP_NEEDS_POLL; return 0; } static void libra_sdio_remove(struct sdio_func *func) { if (libra_notify_card_removal_hdlr) libra_notify_card_removal_hdlr(); libra_sdio_func = NULL; printk(KERN_INFO "%s : Module removed.\n", __func__); } #ifdef CONFIG_PM static int libra_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); int ret = 0; ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { printk(KERN_ERR "%s: Error Host doesn't support the keep power capability\n" , __func__); return ret; } if (libra_suspend_hldr) { /* Disable SDIO IRQ when driver is being suspended */ libra_enable_sdio_irq(func, 0); ret = libra_suspend_hldr(func); if (ret) { printk(KERN_ERR "%s: Libra driver is not able to suspend\n" , __func__); /* Error - Restore SDIO IRQ */ libra_enable_sdio_irq(func, 1); return ret; } } return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); } static int libra_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); if (libra_resume_hldr) { libra_resume_hldr(func); /* Restore SDIO IRQ */ libra_enable_sdio_irq(func, 1); } return 0; } #else #define libra_sdio_suspend 0 #define libra_sdio_resume 0 #endif static void libra_sdio_shutdown(struct device *dev) { if (libra_sdio_shutdown_hdlr) { libra_sdio_shutdown_hdlr(); printk(KERN_INFO "%s : Notified shutdown event to Libra driver.\n", __func__); } } int libra_sdio_register_shutdown_hdlr( shutdown_handler_t *libra_shutdown_hdlr) { libra_sdio_shutdown_hdlr = libra_shutdown_hdlr; return 0; } EXPORT_SYMBOL(libra_sdio_register_shutdown_hdlr); int libra_sdio_notify_card_removal( notify_card_removal_t *libra_sdio_notify_card_removal_hdlr) { libra_notify_card_removal_hdlr = libra_sdio_notify_card_removal_hdlr; return 0; } EXPORT_SYMBOL(libra_sdio_notify_card_removal); static struct sdio_device_id libra_sdioid[] = { {.class = 0, .vendor = LIBRA_MAN_ID, .device = LIBRA_REV_1_0_CARD_ID}, {.class = 0, .vendor = VOLANS_MAN_ID, .device = VOLANS_REV_2_0_CARD_ID}, {} }; static const struct dev_pm_ops libra_sdio_pm_ops = { .suspend = libra_sdio_suspend, .resume = libra_sdio_resume, }; static struct sdio_driver libra_sdiofn_driver = { .name = "libra_sdiofn", .id_table = libra_sdioid, .probe = libra_sdio_probe, .remove = libra_sdio_remove, .drv.pm = &libra_sdio_pm_ops, .drv.shutdown = libra_sdio_shutdown, }; static int __init libra_sdioif_init(void) { libra_sdio_func = NULL; libra_mmc_host = NULL; libra_mmc_host_index = -1; libra_suspend_hldr = NULL; libra_resume_hldr = NULL; libra_notify_card_removal_hdlr = NULL; libra_sdio_shutdown_hdlr = NULL; sdio_register_driver(&libra_sdiofn_driver); printk(KERN_INFO "%s: Loaded Successfully\n", __func__); return 0; } static void __exit libra_sdioif_exit(void) { unsigned int attempts = 0; if (!libra_detect_card_change()) { do { ++attempts; msleep(500); } while (libra_sdio_func != NULL && attempts < 3); } if (libra_sdio_func != NULL) printk(KERN_ERR "%s: Card removal not detected\n", __func__); sdio_unregister_driver(&libra_sdiofn_driver); libra_sdio_func = NULL; libra_mmc_host = NULL; libra_mmc_host_index = -1; printk(KERN_INFO "%s: Unloaded Successfully\n", __func__); } module_init(libra_sdioif_init); module_exit(libra_sdioif_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_DESCRIPTION("WLAN SDIODriver");
KlinkOnE/caf-port
drivers/net/wireless/libra/libra_sdioif.c
C
gpl-2.0
13,417
/* * Flash partitions described by the OF (or flattened) device tree * * Copyright (C) 2006 MontaVista Software Inc. * Author: Vitaly Wool <vwool@ru.mvista.com> * * Revised to handle newer style flash binding by: * Copyright (C) 2007 David Gibson, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/of.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> int __devinit of_mtd_parse_partitions(struct device *dev, struct mtd_info *mtd, struct device_node *node, struct mtd_partition **pparts) { const char *partname; struct device_node *pp; int nr_parts, i; /* First count the subnodes */ pp = NULL; nr_parts = 0; while ((pp = of_get_next_child(node, pp))) nr_parts++; if (nr_parts == 0) return 0; *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL); if (!*pparts) return -ENOMEM; pp = NULL; i = 0; while ((pp = of_get_next_child(node, pp))) { const u32 *reg; int len; reg = of_get_property(pp, "reg", &len); if (!reg || (len != 2 * sizeof(u32))) { of_node_put(pp); dev_err(dev, "Invalid 'reg' on %s\n", node->full_name); kfree(*pparts); *pparts = NULL; return -EINVAL; } (*pparts)[i].offset = reg[0]; (*pparts)[i].size = reg[1]; partname = of_get_property(pp, "label", &len); if (!partname) partname = of_get_property(pp, "name", &len); (*pparts)[i].name = (char *)partname; if (of_get_property(pp, "read-only", &len)) (*pparts)[i].mask_flags = MTD_WRITEABLE; i++; } return nr_parts; } EXPORT_SYMBOL(of_mtd_parse_partitions);
jkoelndorfer/android-kernel-msm
drivers/mtd/ofpart.c
C
gpl-2.0
1,941
/* $NoKeywords:$ */ /** * @file * * Config FCH HD Audio Controller * * * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: FCH * @e \$Revision: 63425 $ @e \$Date: 2011-12-22 11:24:10 -0600 (Thu, 22 Dec 2011) $ * */ /* ***************************************************************************** * * Copyright 2008 - 2012 ADVANCED MICRO DEVICES, INC. All Rights Reserved. * * AMD is granting you permission to use this software (the Materials) * pursuant to the terms and conditions of your Software License Agreement * with AMD. This header does *NOT* give you permission to use the Materials * or any rights under AMD's intellectual property. Your use of any portion * of these Materials shall constitute your acceptance of those terms and * conditions. If you do not agree to the terms and conditions of the Software * License Agreement, please do not use any portion of these Materials. * * CONFIDENTIALITY: The Materials and all other information, identified as * confidential and provided to you by AMD shall be kept confidential in * accordance with the terms and conditions of the Software License Agreement. * * LIMITATION OF LIABILITY: THE MATERIALS AND ANY OTHER RELATED INFORMATION * PROVIDED TO YOU BY AMD ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED * WARRANTY OF ANY KIND, INCLUDING BUT NOT LIMITED TO WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, TITLE, FITNESS FOR ANY PARTICULAR PURPOSE, * OR WARRANTIES ARISING FROM CONDUCT, COURSE OF DEALING, OR USAGE OF TRADE. * IN NO EVENT SHALL AMD OR ITS LICENSORS BE LIABLE FOR ANY DAMAGES WHATSOEVER * (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF PROFITS, BUSINESS * INTERRUPTION, OR LOSS OF INFORMATION) ARISING OUT OF AMD'S NEGLIGENCE, * GROSS NEGLIGENCE, THE USE OF OR INABILITY TO USE THE MATERIALS OR ANY OTHER * RELATED INFORMATION PROVIDED TO YOU BY AMD, EVEN IF AMD HAS BEEN ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. BECAUSE SOME JURISDICTIONS PROHIBIT THE * EXCLUSION OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR INCIDENTAL DAMAGES, * THE ABOVE LIMITATION MAY NOT APPLY TO YOU. * * AMD does not assume any responsibility for any errors which may appear in * the Materials or any other related information provided to you by AMD, or * result from use of the Materials or any related information. * * You agree that you will not reverse engineer or decompile the Materials. * * NO SUPPORT OBLIGATION: AMD is not obligated to furnish, support, or make any * further information, software, technical information, know-how, or show-how * available to you. Additionally, AMD retains the right to modify the * Materials at any time, without notice, and is not obligated to provide such * modified Materials to you. * * U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with * "RESTRICTED RIGHTS." Use, duplication, or disclosure by the Government is * subject to the restrictions as set forth in FAR 52.227-14 and * DFAR252.227-7013, et seq., or its successor. Use of the Materials by the * Government constitutes acknowledgement of AMD's proprietary rights in them. * * EXPORT ASSURANCE: You agree and certify that neither the Materials, nor any * direct product thereof will be exported directly or indirectly, into any * country prohibited by the United States Export Administration Act and the * regulations thereunder, without the required authorization from the U.S. * government nor will be used for any purpose prohibited by the same. **************************************************************************** */ #include "FchPlatform.h" #define FILECODE (0xB003) // // Declaration of local functions // VOID ConfigureAzaliaPinCmd ( IN FCH_DATA_BLOCK *FchDataPtr, IN UINT32 BAR0, IN UINT8 ChannelNum ); VOID ConfigureAzaliaSetConfigD4Dword ( IN CODEC_ENTRY *TempAzaliaCodecEntryPtr, IN UINT32 ChannelNumDword, IN UINT32 BAR0, IN AMD_CONFIG_PARAMS *StdHeader ); /** * FchInitMidAzalia - Config Azalia controller after PCI * emulation * * * * @param[in] FchDataPtr Fch configuration structure pointer. * */ VOID FchInitMidAzalia ( IN VOID *FchDataPtr ) { UINT8 Index; BOOLEAN EnableAzalia; UINT32 PinRouting; UINT8 ChannelNum; UINT8 AzaliaTempVariableByte; UINT16 AzaliaTempVariableWord; UINT32 BAR0; FCH_DATA_BLOCK *LocalCfgPtr; AMD_CONFIG_PARAMS *StdHeader; LocalCfgPtr = (FCH_DATA_BLOCK *) FchDataPtr; StdHeader = LocalCfgPtr->StdHeader; EnableAzalia = FALSE; ChannelNum = 0; AzaliaTempVariableByte = 0; AzaliaTempVariableWord = 0; BAR0 = 0; if ( LocalCfgPtr->Azalia.AzaliaEnable == AzDisable) { return; } else { RwPci ((((0x14<<3)+2) << 16) + 0x04, AccessWidth8, (UINT32)~BIT1, (UINT32)BIT1, StdHeader); if ( LocalCfgPtr->Azalia.AzaliaSsid != 0 ) { RwPci ((((0x14<<3)+2) << 16) + 0x2C, AccessWidth32, 0x00, LocalCfgPtr->Azalia.AzaliaSsid, StdHeader); } ReadPci ((((0x14<<3)+2) << 16) + 0x10, AccessWidth32, &BAR0, StdHeader); if ( BAR0 != 0 ) { if ( BAR0 != 0xFFFFFFFF ) { BAR0 &= ~(0x03FFF); EnableAzalia = TRUE; } } } if ( EnableAzalia ) { // // Get SDIN Configuration // if ( LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin0 == 2 ) { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG167, AccessWidth8, 0, 0x3E); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG167, AccessWidth8, 0, 0x00); } else { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG167, AccessWidth8, 0, 0x0); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG167, AccessWidth8, 0, 0x01); } if ( LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin1 == 2 ) { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG168, AccessWidth8, 0, 0x3E); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG168, AccessWidth8, 0, 0x00); } else { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG168, AccessWidth8, 0, 0x0); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG168, AccessWidth8, 0, 0x01); } if ( LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin2 == 2 ) { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG169, AccessWidth8, 0, 0x3E); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG169, AccessWidth8, 0, 0x00); } else { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG169, AccessWidth8, 0, 0x0); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG169, AccessWidth8, 0, 0x01); } if ( LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin3 == 2 ) { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG170, AccessWidth8, 0, 0x3E); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG170, AccessWidth8, 0, 0x00); } else { RwMem (ACPI_MMIO_BASE + GPIO_BASE + FCH_GPIO_REG170, AccessWidth8, 0, 0x0); RwMem (ACPI_MMIO_BASE + IOMUX_BASE + FCH_GPIO_REG170, AccessWidth8, 0, 0x01); } Index = 11; do { ReadMem ( BAR0 + 0x08, AccessWidth8, &AzaliaTempVariableByte); AzaliaTempVariableByte |= BIT0; WriteMem (BAR0 + 0x08, AccessWidth8, &AzaliaTempVariableByte); FchStall (1000, StdHeader); ReadMem (BAR0 + 0x08, AccessWidth8, &AzaliaTempVariableByte); Index--; } while ((! (AzaliaTempVariableByte & BIT0)) && (Index > 0) ); if ( Index == 0 ) { return; } FchStall (1000, StdHeader); ReadMem ( BAR0 + 0x0E, AccessWidth16, &AzaliaTempVariableWord); if ( AzaliaTempVariableWord & 0x0F ) { // //at least one azalia codec found // //PinRouting = LocalCfgPtr->Azalia.AZALIA_CONFIG.AzaliaSdinPin; //new structure need make up PinRouting //need adjust later!!! // PinRouting = 0; PinRouting = (UINT32 )LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin3; PinRouting <<= 8; PinRouting |= (UINT32 )LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin2; PinRouting <<= 8; PinRouting |= (UINT32 )LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin1; PinRouting <<= 8; PinRouting |= (UINT32 )LocalCfgPtr->Azalia.AzaliaConfig.AzaliaSdin0; do { if ( ( ! (PinRouting & BIT0) ) && (PinRouting & BIT1) ) { ConfigureAzaliaPinCmd (LocalCfgPtr, BAR0, ChannelNum); } PinRouting >>= 8; ChannelNum++; } while ( ChannelNum != 4 ); } else { // //No Azalia codec found // if ( LocalCfgPtr->Azalia.AzaliaEnable != AzEnable ) { EnableAzalia = FALSE; ///set flag to disable Azalia } } } if ( EnableAzalia ) { if ( LocalCfgPtr->Azalia.AzaliaSnoop == 1 ) { RwPci ((((0x14<<3)+2) << 16) + 0x42, AccessWidth8, 0xFF, BIT1 + BIT0, StdHeader); } } else { // //disable Azalia controller // RwPci ((((0x14<<3)+2) << 16) + 0x04, AccessWidth16, 0, 0, StdHeader); RwMem (ACPI_MMIO_BASE + PMIO_BASE + 0xEB , AccessWidth8, (UINT32)~BIT0, 0); RwMem (ACPI_MMIO_BASE + PMIO_BASE + 0xEB , AccessWidth8, (UINT32)~BIT0, 0); } } /** * Pin Config for ALC880, ALC882 and ALC883. * * * */ CODEC_ENTRY AzaliaCodecAlc882Table[] = { {0x14, 0x01014010}, {0x15, 0x01011012}, {0x16, 0x01016011}, {0x17, 0x01012014}, {0x18, 0x01A19030}, {0x19, 0x411111F0}, {0x1a, 0x01813080}, {0x1b, 0x411111F0}, {0x1C, 0x411111F0}, {0x1d, 0x411111F0}, {0x1e, 0x01441150}, {0x1f, 0x01C46160}, {0xff, 0xffffffff} }; /** * Pin Config for ALC0262. * * * */ CODEC_ENTRY AzaliaCodecAlc262Table[] = { {0x14, 0x01014010}, {0x15, 0x411111F0}, {0x16, 0x411111F0}, {0x18, 0x01A19830}, {0x19, 0x02A19C40}, {0x1a, 0x01813031}, {0x1b, 0x02014C20}, {0x1c, 0x411111F0}, {0x1d, 0x411111F0}, {0x1e, 0x0144111E}, {0x1f, 0x01C46150}, {0xff, 0xffffffff} }; /** * Pin Config for ALC0269. * * * */ CODEC_ENTRY AzaliaCodecAlc269Table[] = { {0x12, 0x99A308F0}, {0x14, 0x99130010}, {0x15, 0x0121101F}, {0x16, 0x99036120}, {0x18, 0x01A19850}, {0x19, 0x99A309F0}, {0x1a, 0x01813051}, {0x1b, 0x0181405F}, {0x1d, 0x40134601}, {0x1e, 0x01442130}, {0x11, 0x99430140}, {0x20, 0x0030FFFF}, {0xff, 0xffffffff} }; /** * Pin Config for ALC0861. * * * */ CODEC_ENTRY AzaliaCodecAlc861Table[] = { {0x01, 0x8086C601}, {0x0B, 0x01014110}, {0x0C, 0x01813140}, {0x0D, 0x01A19941}, {0x0E, 0x411111F0}, {0x0F, 0x02214420}, {0x10, 0x02A1994E}, {0x11, 0x99330142}, {0x12, 0x01451130}, {0x1F, 0x411111F0}, {0x20, 0x411111F0}, {0x23, 0x411111F0}, {0xff, 0xffffffff} }; /** * Pin Config for ALC0889. * * * */ CODEC_ENTRY AzaliaCodecAlc889Table[] = { {0x11, 0x411111F0}, {0x14, 0x01014010}, {0x15, 0x01011012}, {0x16, 0x01016011}, {0x17, 0x01013014}, {0x18, 0x01A19030}, {0x19, 0x411111F0}, {0x1a, 0x411111F0}, {0x1b, 0x411111F0}, {0x1C, 0x411111F0}, {0x1d, 0x411111F0}, {0x1e, 0x01442150}, {0x1f, 0x01C42160}, {0xff, 0xffffffff} }; /** * Pin Config for ADI1984. * * * */ CODEC_ENTRY AzaliaCodecAd1984Table[] = { {0x11, 0x0221401F}, {0x12, 0x90170110}, {0x13, 0x511301F0}, {0x14, 0x02A15020}, {0x15, 0x50A301F0}, {0x16, 0x593301F0}, {0x17, 0x55A601F0}, {0x18, 0x55A601F0}, {0x1A, 0x91F311F0}, {0x1B, 0x014511A0}, {0x1C, 0x599301F0}, {0xff, 0xffffffff} }; /** * FrontPanel Config table list * * * */ CODEC_ENTRY FrontPanelAzaliaCodecTableList[] = { {0x19, 0x02A19040}, {0x1b, 0x02214020}, {0xff, 0xffffffff} }; /** * Current HD Audio support codec list * * * */ CODEC_TBL_LIST AzaliaCodecTableList[] = { {0x010ec0880, &AzaliaCodecAlc882Table[0]}, {0x010ec0882, &AzaliaCodecAlc882Table[0]}, {0x010ec0883, &AzaliaCodecAlc882Table[0]}, {0x010ec0885, &AzaliaCodecAlc882Table[0]}, {0x010ec0889, &AzaliaCodecAlc889Table[0]}, {0x010ec0262, &AzaliaCodecAlc262Table[0]}, {0x010ec0269, &AzaliaCodecAlc269Table[0]}, {0x010ec0861, &AzaliaCodecAlc861Table[0]}, {0x011d41984, &AzaliaCodecAd1984Table[0]}, { (UINT32) 0x0FFFFFFFF, (CODEC_ENTRY*) (UINTN)0x0FFFFFFFF} }; /** * ConfigureAzaliaPinCmd - Configuration HD Audio PIN Command * * * @param[in] FchDataPtr Fch configuration structure pointer. * @param[in] BAR0 HD Audio BAR0 base address. * @param[in] ChannelNum Channel Number. * */ VOID ConfigureAzaliaPinCmd ( IN FCH_DATA_BLOCK *FchDataPtr, IN UINT32 BAR0, IN UINT8 ChannelNum ) { UINT32 AzaliaTempVariable; UINT32 ChannelNumDword; CODEC_TBL_LIST *TempAzaliaOemCodecTablePtr; CODEC_ENTRY *TempAzaliaCodecEntryPtr; if ( (FchDataPtr->Azalia.AzaliaPinCfg) != 1 ) { return; } ChannelNumDword = ChannelNum << 28; AzaliaTempVariable = 0xF0000; AzaliaTempVariable |= ChannelNumDword; WriteMem (BAR0 + 0x60, AccessWidth32, &AzaliaTempVariable); FchStall (600, FchDataPtr->StdHeader); ReadMem (BAR0 + 0x64, AccessWidth32, &AzaliaTempVariable); if ( ((FchDataPtr->Azalia.AzaliaOemCodecTablePtr) == NULL) || ((FchDataPtr->Azalia.AzaliaOemCodecTablePtr) == ((CODEC_TBL_LIST*) (UINTN)0xFFFFFFFF))) { TempAzaliaOemCodecTablePtr = (CODEC_TBL_LIST*) (&AzaliaCodecTableList[0]); } else { TempAzaliaOemCodecTablePtr = (CODEC_TBL_LIST*) FchDataPtr->Azalia.AzaliaOemCodecTablePtr; } while ( TempAzaliaOemCodecTablePtr->CodecId != 0xFFFFFFFF ) { if ( TempAzaliaOemCodecTablePtr->CodecId == AzaliaTempVariable ) { break; } else { ++TempAzaliaOemCodecTablePtr; } } if ( TempAzaliaOemCodecTablePtr->CodecId != 0xFFFFFFFF ) { TempAzaliaCodecEntryPtr = (CODEC_ENTRY*) TempAzaliaOemCodecTablePtr->CodecTablePtr; if ( ((FchDataPtr->Azalia.AzaliaOemCodecTablePtr) == NULL) || ((FchDataPtr->Azalia.AzaliaOemCodecTablePtr) == ((CODEC_TBL_LIST*) (UINTN)0xFFFFFFFF)) ) { TempAzaliaCodecEntryPtr = (CODEC_ENTRY*) (TempAzaliaCodecEntryPtr); } ConfigureAzaliaSetConfigD4Dword (TempAzaliaCodecEntryPtr, ChannelNumDword, BAR0, FchDataPtr->StdHeader); if ( FchDataPtr->Azalia.AzaliaFrontPanel != 1 ) { if ( (FchDataPtr->Azalia.AzaliaFrontPanel == 2) || (FchDataPtr->Azalia.FrontPanelDetected == 1) ) { if ( ((FchDataPtr->Azalia.AzaliaOemFpCodecTablePtr) == NULL) || ((FchDataPtr->Azalia.AzaliaOemFpCodecTablePtr) == (VOID*) (UINTN)0xFFFFFFFF) ) { TempAzaliaCodecEntryPtr = (CODEC_ENTRY*) (&FrontPanelAzaliaCodecTableList[0]); } else { TempAzaliaCodecEntryPtr = (CODEC_ENTRY*) FchDataPtr->Azalia.AzaliaOemFpCodecTablePtr; } ConfigureAzaliaSetConfigD4Dword (TempAzaliaCodecEntryPtr, ChannelNumDword, BAR0, FchDataPtr->StdHeader); } } } } /** * ConfigureAzaliaSetConfigD4Dword - Configuration HD Audio Codec table * * * @param[in] TempAzaliaCodecEntryPtr HD Audio Codec table structure pointer. * @param[in] ChannelNumDword HD Audio Channel Number. * @param[in] BAR0 HD Audio BAR0 base address. * @param[in] StdHeader * */ VOID ConfigureAzaliaSetConfigD4Dword ( IN CODEC_ENTRY *TempAzaliaCodecEntryPtr, IN UINT32 ChannelNumDword, IN UINT32 BAR0, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 TempByte1; UINT8 TempByte2; UINT8 Index; UINT32 TempDword1; UINT32 TempDword2; TempDword1 = 0; TempDword2 = 0; while ( (TempAzaliaCodecEntryPtr->Nid) != 0xFF ) { TempByte1 = 0x20; if ( (TempAzaliaCodecEntryPtr->Nid) == 0x1 ) { TempByte1 = 0x24; } TempDword1 = TempAzaliaCodecEntryPtr->Nid; TempDword1 &= 0xff; TempDword1 <<= 20; TempDword1 |= ChannelNumDword; TempDword1 |= (0x700 << 8); for ( Index = 4; Index > 0; Index-- ) { do { ReadMem (BAR0 + 0x68, AccessWidth32, &TempDword2); } while ( (TempDword2 & BIT0) != 0 ); TempByte2 = (UINT8) (( (TempAzaliaCodecEntryPtr->Byte40) >> ((4 - Index) * 8 ) ) & 0xff); TempDword1 = (TempDword1 & 0xFFFF0000) + ((TempByte1 - Index) << 8) + TempByte2; WriteMem (BAR0 + 0x60, AccessWidth32, &TempDword1); FchStall (60, StdHeader); } ++TempAzaliaCodecEntryPtr; } }
hustcalm/coreboot-hacking
src/vendorcode/amd/agesa/f15tn/Proc/Fch/Azalia/AzaliaMid.c
C
gpl-2.0
16,108
/* This project is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Deviation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Deviation. If not, see <http://www.gnu.org/licenses/>. */ #include <libopencm3/cm3/systick.h> #include <libopencm3/stm32/timer.h> #include <libopencm3/stm32/usart.h> #include <libopencm3/stm32/pwr.h> #include <libopencm3/stm32/f2/rcc.h> #include <libopencm3/stm32/f2/rtc.h> #include <libopencm3/cm3/nvic.h> #include <libopencm3/stm32/iwdg.h> #include "common.h" #include "rtc.h" #include "../common/devo/devo.h" //The following is from an unreleased libopencm3 //We should remove it eventually #if 1 void iwdg_start(void); void iwdg_set_period_ms(u32 period); bool iwdg_reload_busy(void); bool iwdg_prescaler_busy(void); void iwdg_reset(void); #endif volatile u32 msecs; volatile u32 msecTimer1; volatile u32 msecTimer2; volatile u32 wdg_time; u16 (*timer_callback)(void); volatile u8 msec_callbacks; volatile u32 msec_cbtime[NUM_MSEC_CALLBACKS]; void CLOCK_Init() { /* 60MHz / 8 => 7500000 counts per second */ systick_set_clocksource(STK_CTRL_CLKSOURCE_AHB_DIV8); /* 7500000/7500 = 1000 overflows per second - every 1ms one interrupt */ systick_set_reload(7500); nvic_set_priority(NVIC_SYSTICK_IRQ, 0x0); //Highest priority /* We trigger exti2 right before the watchdog fires to do a stack dump */ nvic_set_priority(NVIC_EXTI2_IRQ, 0x01); //Highest priority systick_interrupt_enable(); msecs = 0; msec_callbacks = 0; /* Start counting. */ systick_counter_enable(); /* Setup timer for Transmitter */ timer_callback = NULL; /* Enable TIM5 clock. */ rcc_peripheral_enable_clock(&RCC_APB1ENR, RCC_APB1ENR_TIM5EN); /* Enable TIM5 interrupt. */ nvic_enable_irq(NVIC_TIM5_IRQ); nvic_set_priority(NVIC_TIM5_IRQ, 16); //High priority timer_disable_counter(TIM5); /* Reset TIM5 peripheral. */ timer_reset(TIM5); /* Timer global mode: * - No divider * - Alignment edge * - Direction up */ timer_set_mode(TIM5, TIM_CR1_CKD_CK_INT, TIM_CR1_CMS_EDGE, TIM_CR1_DIR_UP); /* timer updates each microsecond */ timer_set_prescaler(TIM5, 60 - 1); timer_set_period(TIM5, 65535); /* Disable preload. */ timer_disable_preload(TIM5); /* Continous mode. */ timer_continuous_mode(TIM5); /* Disable outputs. */ timer_disable_oc_output(TIM5, TIM_OC1); timer_disable_oc_output(TIM5, TIM_OC2); timer_disable_oc_output(TIM5, TIM_OC3); timer_disable_oc_output(TIM5, TIM_OC4); /* Enable CCP1 */ timer_disable_oc_clear(TIM5, TIM_OC1); timer_disable_oc_preload(TIM5, TIM_OC1); timer_set_oc_slow_mode(TIM5, TIM_OC1); timer_set_oc_mode(TIM5, TIM_OC1, TIM_OCM_FROZEN); /* Disable CCP1 interrupt. */ timer_disable_irq(TIM5, TIM_DIER_CC1IE); timer_enable_counter(TIM5); /* Enable EXTI1 interrupt. */ /* We are enabling only the interrupt * We'll manually trigger this via set_pending_interrupt */ nvic_enable_irq(NVIC_EXTI1_IRQ); nvic_set_priority(NVIC_EXTI1_IRQ, 64); //Medium priority /* Enable DMA Channel1 with same priority as EXTI1 */ //FIXME //nvic_enable_irq(NVIC_DMA1_STREAM1_IRQ); //nvic_set_priority(NVIC_DMA1_STREAM1_IRQ, 65); //Medium priority /* wait for system to start up and stabilize */ while(msecs < 100) ; } void CLOCK_StartTimer(unsigned us, u16 (*cb)(void)) { if(! cb) return; timer_callback = cb; /* Counter enable. */ u16 t = timer_get_counter(TIM5); /* Set the capture compare value for OC1. */ timer_set_oc_value(TIM5, TIM_OC1, us + t); timer_clear_flag(TIM5, TIM_SR_CC1IF); timer_enable_irq(TIM5, TIM_DIER_CC1IE); } void CLOCK_StartWatchdog() { iwdg_set_period_ms(3000); iwdg_start(); wdg_time = msecs; nvic_clear_pending_irq(NVIC_EXTI2_IRQ); nvic_enable_irq(NVIC_EXTI2_IRQ); } void CLOCK_ResetWatchdog() { iwdg_reset(); wdg_time = msecs; } void CLOCK_StopTimer() { timer_disable_irq(TIM5, TIM_DIER_CC1IE); timer_callback = NULL; } void tim5_isr() { if(timer_callback) { #ifdef TIMING_DEBUG debug_timing(4, 0); #endif u16 us = timer_callback(); #ifdef TIMING_DEBUG debug_timing(4, 1); #endif timer_clear_flag(TIM5, TIM_SR_CC1IF); if (us) { timer_set_oc_value(TIM5, TIM_OC1, us + TIM_CCR1(TIM5)); return; } } CLOCK_StopTimer(); } u32 CLOCK_getms() { return msecs; } void CLOCK_SetMsecCallback(int cb, u32 msec) { msec_cbtime[cb] = msecs + msec; msec_callbacks |= (1 << cb); } void CLOCK_ClearMsecCallback(int cb) { msec_callbacks &= ~(1 << cb); } void exti1_isr() { //ADC_StartCapture(); //ADC completion will trigger update ADC_Filter(); medium_priority_cb(); } void sys_tick_handler(void) { msecs++; if(msecTimer1) msecTimer1--; if(msecTimer2) msecTimer2--; if(msecs - wdg_time > 2000) { nvic_set_pending_irq(NVIC_EXTI2_IRQ); return; } if(msec_callbacks & (1 << MEDIUM_PRIORITY)) { if (msecs == msec_cbtime[MEDIUM_PRIORITY]) { //medium priority tasks execute in interrupt and main loop context nvic_set_pending_irq(NVIC_EXTI1_IRQ); priority_ready |= 1 << MEDIUM_PRIORITY; msec_cbtime[MEDIUM_PRIORITY] = msecs + MEDIUM_PRIORITY_MSEC; } } if(msec_callbacks & (1 << LOW_PRIORITY)) { if (msecs == msec_cbtime[LOW_PRIORITY]) { //Low priority tasks execute in the main loop priority_ready |= 1 << LOW_PRIORITY; msec_cbtime[LOW_PRIORITY] = msecs + LOW_PRIORITY_MSEC; } } if(msec_callbacks & (1 << TIMER_SOUND)) { if (msecs == msec_cbtime[TIMER_SOUND]) { u16 ms = SOUND_Callback(); if(! ms) msec_callbacks &= ~(1 << TIMER_SOUND); else msec_cbtime[TIMER_SOUND] = msecs + ms; } } } // initialize RTC void RTC_Init() { rcc_peripheral_enable_clock(&RCC_APB1ENR, RCC_APB1ENR_PWREN); pwr_disable_backup_domain_write_protect(); rcc_osc_on(LSE); rcc_wait_for_osc_ready(LSE); RCC_BDCR |= RCC_BDCR_SRC_LSE; //Set source to LSE RCC_BDCR |= RCC_BDCR_RTCEN; //Enable RTC rtc_wait_for_synchro(); rtc_set_prescaler(255, 127); } static const u16 daysInYear[2][13] = { { 0,31,59,90,120,151,181,212,243,273,304,334,365}, { 0,31,60,91,121,152,182,213,244,274,305,335,366} }; // set date value (deviation epoch = seconds since 1.1.2012, 00:00:00) void RTC_SetValue(u32 value) { value += 4382 * 60 * 60 * 24; //convert date from 1.1.2012, 00:00:00 to 1.1.2000, 00:00:00 uint32_t date = 0, time = 0; const uint32_t SEC = value % 60; const uint32_t MIN = (value / 60) % 60; const uint32_t HR = (value / 60 / 60) % 24; uint32_t DAYS = (value / 60 / 60 / 24); uint32_t DAY = 0; uint32_t YEAR = (4*DAYS) / 1461; // = days/365.25 uint32_t LEAP = (YEAR % 4 == 0) ? 1 : 0; uint32_t WEEKDAY = DAYS / 7 + 7; //1/1/2000 was a Saturday uint32_t MONTH = 0; //Convert time to bcd time |= (SEC % 10) << 0; //seconds ones time |= (SEC / 10) << 4; //seconds tens time |= (MIN % 10) << 8; //minutes ones time |= (MIN / 10) << 12; //minutes tens time |= (HR % 10) << 16; //hours ones time |= (HR / 10) << 20; //hours tens //Convert date to bcd DAYS -= (uint32_t)(YEAR * 365 + YEAR / 4); DAYS -= (DAYS > daysInYear[LEAP][2]) ? 1 : 0; //leap year correction for RTC_STARTYEAR for (MONTH=0; MONTH<12; MONTH++) { if (DAYS < daysInYear[LEAP][MONTH + 1]) break; } DAY = DAYS - daysInYear[LEAP][MONTH]; date |= (DAY % 10) << 0; //date in ones date |= (DAY / 10) << 4; //date in tens date |= (MONTH % 10) << 8; //month in ones date |= (MONTH / 10) << 12; //month in tens date |= (WEEKDAY) << 13; //weekday date |= (YEAR % 10) << 16; //year in ones date |= (YEAR / 10) << 20; //year in tens //Unlock rtc_unlock(); //Enter Init mode RTC_ISR = 0xFFFFFFFF; for(int i = 0; i < 0x10000; i++) if((RTC_ISR & RTC_ISR_INITF) == 0) break; //SetDate RTC_DR = date; RTC_TR = time; // Exit Init mode RTC_ISR &= (uint32_t)~RTC_ISR_INIT; //Wait for synch rtc_wait_for_synchro(); //Lock rtc_lock(); } // get date value (deviation epoch = seconds since 1.1.2012, 00:00:00) u32 RTC_GetValue() { u32 value = 0; uint32_t time = RTC_TR; uint32_t date = RTC_DR; const uint32_t YEAR = (((date >> 20) & 0x0f) * 10) + ((date >> 16) & 0x0f) - 12; const uint32_t MONTH = (((date >> 12) & 0x01) * 10) + ((date >> 8) & 0x0f); const uint32_t DAY = (((date >> 4) & 0x03) * 10) + ((date >> 0) & 0x0f); const uint32_t HOUR = (((time >> 20) & 0x03) * 10) + ((time >> 16) & 0x0f); const uint32_t MIN = (((time >> 12) & 0x07) * 10) + ((time >> 8) & 0x0f); const uint32_t SEC = (((time >> 4) & 0x07) * 10) + ((time >> 0) & 0x0f); value += (DAY-1 + daysInYear[YEAR%4 == 0 ? 1 : 0][MONTH-1] + YEAR * 365 + YEAR/4 + ((YEAR != 0 && MONTH > 2) ? 1 : 0)) * (60*60*24); value += HOUR*60*60 + MIN*60+SEC; return value; } void rtc_gettime(struct gtm * t) { uint32_t time = RTC_TR; uint32_t date = RTC_DR; const uint32_t YEAR = (((date >> 20) & 0x0f) * 10) + ((date >> 16) & 0x0f); const uint32_t MONTH = (((date >> 12) & 0x01) * 10) + ((date >> 8) & 0x0f); const uint32_t DAY = (((date >> 4) & 0x03) * 10) + ((date >> 0) & 0x0f); const uint32_t HOUR = (((time >> 20) & 0x03) * 10) + ((time >> 16) & 0x0f); const uint32_t MIN = (((time >> 12) & 0x07) * 10) + ((time >> 8) & 0x0f); const uint32_t SEC = (((time >> 4) & 0x07) * 10) + ((time >> 0) & 0x0f); t->tm_hour = HOUR; t->tm_min = MIN; t->tm_sec = SEC; t->tm_year = YEAR + 100; t->tm_mon = MONTH; t->tm_mday = DAY; }
cctsao1008/deviation
src/target/x9d/clock.c
C
gpl-3.0
10,674
#define PETSCMAT_DLL #include "src/mat/impls/aij/seq/aij.h" EXTERN PetscErrorCode Mat_CheckInode(Mat,PetscTruth); EXTERN_C_BEGIN EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatInodeAdjustForInodes_Inode(Mat,IS*,IS*); EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatInodeGetInodeSizes_Inode(Mat,PetscInt*,PetscInt*[],PetscInt*); EXTERN_C_END #undef __FUNCT__ #define __FUNCT__ "MatView_Inode" PetscErrorCode MatView_Inode(Mat A,PetscViewer viewer) { Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscTruth iascii; PetscViewerFormat format; PetscFunctionBegin; ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr); if (iascii) { ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); if (format == PETSC_VIEWER_ASCII_INFO_DETAIL || format == PETSC_VIEWER_ASCII_INFO) { if (a->inode.size) { ierr = PetscViewerASCIIPrintf(viewer,"using I-node routines: found %D nodes, limit used is %D\n", a->inode.node_count,a->inode.limit);CHKERRQ(ierr); } else { ierr = PetscViewerASCIIPrintf(viewer,"not using I-node routines\n");CHKERRQ(ierr); } } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatAssemblyEnd_Inode" PetscErrorCode MatAssemblyEnd_Inode(Mat A, MatAssemblyType mode) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscTruth samestructure; PetscFunctionBegin; /* info.nz_unneeded of zero denotes no structural change was made to the matrix during Assembly */ samestructure = (PetscTruth)(!A->info.nz_unneeded); /* check for identical nodes. If found, use inode functions */ ierr = Mat_CheckInode(A,samestructure);CHKERRQ(ierr); a->inode.ibdiagvalid = PETSC_FALSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatDestroy_Inode" PetscErrorCode MatDestroy_Inode(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data; PetscFunctionBegin; ierr = PetscFree(a->inode.size);CHKERRQ(ierr); ierr = PetscFree2(a->inode.ibdiag,a->inode.bdiag);CHKERRQ(ierr); ierr = PetscObjectComposeFunctionDynamic((PetscObject)A,"MatInodeAdjustForInodes_C","",PETSC_NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunctionDynamic((PetscObject)A,"MatInodeGetInodeSizes_C","",PETSC_NULL);CHKERRQ(ierr); PetscFunctionReturn(0); } /* MatCreate_Inode is not DLLEXPORTed because it is not a constructor for a complete type. */ /* It is also not registered as a type for use within MatSetType. */ /* It is intended as a helper for the MATSEQAIJ class, so classes which desire Inodes should */ /* inherit off of MATSEQAIJ instead by calling MatSetType(MATSEQAIJ) in their constructor. */ /* Maybe this is a bad idea. (?) */ #undef __FUNCT__ #define __FUNCT__ "MatCreate_Inode" PetscErrorCode MatCreate_Inode(Mat B) { Mat_SeqAIJ *b=(Mat_SeqAIJ*)B->data; PetscErrorCode ierr; PetscTruth no_inode,no_unroll; PetscFunctionBegin; no_inode = PETSC_FALSE; no_unroll = PETSC_FALSE; b->inode.node_count = 0; b->inode.size = 0; b->inode.limit = 5; b->inode.max_limit = 5; b->inode.ibdiagvalid = PETSC_FALSE; b->inode.ibdiag = 0; b->inode.bdiag = 0; ierr = PetscOptionsBegin(((PetscObject)B)->comm,((PetscObject)B)->prefix,"Options for SEQAIJ matrix","Mat");CHKERRQ(ierr); ierr = PetscOptionsTruth("-mat_no_unroll","Do not optimize for inodes (slower)",PETSC_NULL,no_unroll,&no_unroll,PETSC_NULL);CHKERRQ(ierr); if (no_unroll) {ierr = PetscInfo(B,"Not using Inode routines due to -mat_no_unroll\n");CHKERRQ(ierr);} ierr = PetscOptionsTruth("-mat_no_inode","Do not optimize for inodes (slower)",PETSC_NULL,no_inode,&no_inode,PETSC_NULL);CHKERRQ(ierr); if (no_inode) {ierr = PetscInfo(B,"Not using Inode routines due to -mat_no_inode\n");CHKERRQ(ierr);} ierr = PetscOptionsInt("-mat_inode_limit","Do not use inodes larger then this value",PETSC_NULL,b->inode.limit,&b->inode.limit,PETSC_NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); b->inode.use = (PetscTruth)(!(no_unroll || no_inode)); if (b->inode.limit > b->inode.max_limit) b->inode.limit = b->inode.max_limit; ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatInodeAdjustForInodes_C", "MatInodeAdjustForInodes_Inode", MatInodeAdjustForInodes_Inode);CHKERRQ(ierr); ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatInodeGetInodeSizes_C", "MatInodeGetInodeSizes_Inode", MatInodeGetInodeSizes_Inode);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSetOption_Inode" PetscErrorCode MatSetOption_Inode(Mat A,MatOption op,PetscTruth flg) { Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data; PetscFunctionBegin; switch(op) { case MAT_USE_INODES: a->inode.use = flg; break; default: break; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatDuplicate_Inode" PetscErrorCode MatDuplicate_Inode(Mat A,MatDuplicateOption cpvalues,Mat *C) { Mat B=*C; Mat_SeqAIJ *c=(Mat_SeqAIJ*)B->data,*a=(Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscInt m=A->rmap.n; PetscFunctionBegin; c->inode.use = a->inode.use; c->inode.limit = a->inode.limit; c->inode.max_limit = a->inode.max_limit; if (a->inode.size){ ierr = PetscMalloc((m+1)*sizeof(PetscInt),&c->inode.size);CHKERRQ(ierr); c->inode.node_count = a->inode.node_count; ierr = PetscMemcpy(c->inode.size,a->inode.size,(m+1)*sizeof(PetscInt));CHKERRQ(ierr); } else { c->inode.size = 0; c->inode.node_count = 0; } c->inode.ibdiagvalid = PETSC_FALSE; c->inode.ibdiag = 0; c->inode.bdiag = 0; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatILUDTFactor_Inode" PetscErrorCode MatILUDTFactor_Inode(Mat A,IS isrow,IS iscol,MatFactorInfo *info,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; /* check for identical nodes. If found, use inode functions */ ierr = Mat_CheckInode(*fact,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatLUFactorSymbolic_Inode" PetscErrorCode MatLUFactorSymbolic_Inode(Mat A,IS isrow,IS iscol,MatFactorInfo *info,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; /* check for identical nodes. If found, use inode functions */ ierr = Mat_CheckInode(*fact,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatILUFactorSymbolic_Inode" PetscErrorCode MatILUFactorSymbolic_Inode(Mat A,IS isrow,IS iscol,MatFactorInfo *info,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; /* check for identical nodes. If found, use inode functions */ ierr = Mat_CheckInode(*fact,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); }
realincubus/pluto_clang
orio-0.1.0/testsuite/petsc/inode2.c
C
gpl-3.0
7,068
/* pmlastmsg.c * This is a parser module specifically for those horrible * "<PRI>last message repeated n times" messages notoriously generated * by some syslog implementations. Note that this parser should be placed * on top of the parser stack -- it takes out only these messages and * leaves all others for processing by the other parsers. * * NOTE: read comments in module-template.h to understand how this file * works! * * File begun on 2010-07-13 by RGerhards * * Copyright 2014-2016 Rainer Gerhards and Adiscon GmbH. * * This file is part of rsyslog. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * -or- * see COPYING.ASL20 in the source distribution * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "config.h" #include "rsyslog.h" #include <stdlib.h> #include <string.h> #include <assert.h> #include <ctype.h> #include "conf.h" #include "syslogd-types.h" #include "template.h" #include "msg.h" #include "module-template.h" #include "glbl.h" #include "errmsg.h" #include "parser.h" #include "datetime.h" #include "unicode-helper.h" MODULE_TYPE_PARSER MODULE_TYPE_NOKEEP PARSER_NAME("rsyslog.lastline") /* internal structures */ DEF_PMOD_STATIC_DATA DEFobjCurrIf(errmsg) DEFobjCurrIf(glbl) DEFobjCurrIf(parser) DEFobjCurrIf(datetime) /* static data */ static int bParseHOSTNAMEandTAG; /* cache for the equally-named global param - performance enhancement */ BEGINisCompatibleWithFeature CODESTARTisCompatibleWithFeature if(eFeat == sFEATUREAutomaticSanitazion) iRet = RS_RET_OK; if(eFeat == sFEATUREAutomaticPRIParsing) iRet = RS_RET_OK; ENDisCompatibleWithFeature /* parse a legay-formatted syslog message. */ BEGINparse uchar *p2parse; int lenMsg; #define OpeningText "last message repeated " #define ClosingText " times" CODESTARTparse dbgprintf("Message will now be parsed by \"last message repated n times\" parser.\n"); assert(pMsg != NULL); assert(pMsg->pszRawMsg != NULL); lenMsg = pMsg->iLenRawMsg - pMsg->offAfterPRI; /* note: offAfterPRI is already the number of PRI chars (do not add one!) */ p2parse = pMsg->pszRawMsg + pMsg->offAfterPRI; /* point to start of text, after PRI */ /* check if this message is of the type we handle in this (very limited) parser */ /* first, we permit SP */ while(lenMsg && *p2parse == ' ') { --lenMsg; ++p2parse; } if((unsigned) lenMsg < sizeof(OpeningText)-1 + sizeof(ClosingText)-1 + 1) { /* too short, can not be "our" message */ ABORT_FINALIZE(RS_RET_COULD_NOT_PARSE); } if(strncasecmp((char*) p2parse, OpeningText, sizeof(OpeningText)-1) != 0) { /* wrong opening text */ ABORT_FINALIZE(RS_RET_COULD_NOT_PARSE); } lenMsg -= sizeof(OpeningText) - 1; p2parse += sizeof(OpeningText) - 1; /* now we need an integer --> digits */ while(lenMsg && isdigit(*p2parse)) { --lenMsg; ++p2parse; } if(lenMsg != sizeof(ClosingText)-1) { /* size must fit, else it is not "our" message... */ ABORT_FINALIZE(RS_RET_COULD_NOT_PARSE); } if(strncasecmp((char*) p2parse, ClosingText, lenMsg) != 0) { /* wrong closing text */ ABORT_FINALIZE(RS_RET_COULD_NOT_PARSE); } /* OK, now we know we need to process this message, so we do that * (and it is fairly simple in our case...) */ DBGPRINTF("pmlastmsg detected a \"last message repeated n times\" message\n"); setProtocolVersion(pMsg, MSG_LEGACY_PROTOCOL); memcpy(&pMsg->tTIMESTAMP, &pMsg->tRcvdAt, sizeof(struct syslogTime)); MsgSetMSGoffs(pMsg, pMsg->offAfterPRI); /* we don't have a header! */ MsgSetTAG(pMsg, (uchar*)"", 0); finalize_it: ENDparse BEGINmodExit CODESTARTmodExit /* release what we no longer need */ objRelease(errmsg, CORE_COMPONENT); objRelease(glbl, CORE_COMPONENT); objRelease(parser, CORE_COMPONENT); objRelease(datetime, CORE_COMPONENT); ENDmodExit BEGINqueryEtryPt CODESTARTqueryEtryPt CODEqueryEtryPt_STD_PMOD_QUERIES CODEqueryEtryPt_IsCompatibleWithFeature_IF_OMOD_QUERIES ENDqueryEtryPt BEGINmodInit() CODESTARTmodInit *ipIFVersProvided = CURR_MOD_IF_VERSION; /* we only support the current interface specification */ CODEmodInit_QueryRegCFSLineHdlr CHKiRet(objUse(glbl, CORE_COMPONENT)); CHKiRet(objUse(errmsg, CORE_COMPONENT)); CHKiRet(objUse(parser, CORE_COMPONENT)); CHKiRet(objUse(datetime, CORE_COMPONENT)); dbgprintf("lastmsg parser init called, compiled with version %s\n", VERSION); bParseHOSTNAMEandTAG = glbl.GetParseHOSTNAMEandTAG(); /* cache value, is set only during rsyslogd option processing */ ENDmodInit /* vim:set ai: */
aturetta/rsyslog
plugins/pmlastmsg/pmlastmsg.c
C
gpl-3.0
4,969
/* Copyright (C) 2015-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <nss.h> #include <string.h> const char __nss_invalid_field_characters[] = NSS_INVALID_FIELD_CHARACTERS; /* Check that VALUE is either NULL or a NUL-terminated string which does not contain characters not permitted in NSS database fields. */ _Bool internal_function __nss_valid_field (const char *value) { return value == NULL || strpbrk (value, __nss_invalid_field_characters) == NULL; }
geminy/aidear
oss/glibc/glibc-2.24/nss/valid_field.c
C
gpl-3.0
1,201
/* Copyright (c) 1998, 1999 Thai Open Source Software Center Ltd See the file copying.txt for copying permission. */ #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include "xmlrpc_config.h" #include "xmlparse.h" #include "codepage.h" #include "xmlfile.h" #include "xmltchar.h" #if MSVCRT #include <crtdbg.h> #endif /* This ensures proper sorting. */ #define NSSEP T('\001') static void characterData(void *userData, const XML_Char *s, int len) { FILE *fp = userData; for (; len > 0; --len, ++s) { switch (*s) { case T('&'): fputts(T("&amp;"), fp); break; case T('<'): fputts(T("&lt;"), fp); break; case T('>'): fputts(T("&gt;"), fp); break; #ifdef W3C14N case 13: fputts(T("&#xD;"), fp); break; #else case T('"'): fputts(T("&quot;"), fp); break; case 9: case 10: case 13: ftprintf(fp, T("&#%d;"), *s); break; #endif default: puttc(*s, fp); break; } } } static void attributeValue(FILE *fp, const XML_Char *s) { puttc(T('='), fp); puttc(T('"'), fp); for (;;) { switch (*s) { case 0: case NSSEP: puttc(T('"'), fp); return; case T('&'): fputts(T("&amp;"), fp); break; case T('<'): fputts(T("&lt;"), fp); break; case T('"'): fputts(T("&quot;"), fp); break; #ifdef W3C14N case 9: fputts(T("&#x9;"), fp); break; case 10: fputts(T("&#xA;"), fp); break; case 13: fputts(T("&#xD;"), fp); break; #else case T('>'): fputts(T("&gt;"), fp); break; case 9: case 10: case 13: ftprintf(fp, T("&#%d;"), *s); break; #endif default: puttc(*s, fp); break; } s++; } } /* Lexicographically comparing UTF-8 encoded attribute values, is equivalent to lexicographically comparing based on the character number. */ static int attcmp(const void *att1, const void *att2) { return tcscmp(*(const XML_Char **)att1, *(const XML_Char **)att2); } static void startElement(void *userData, const XML_Char *name, const XML_Char **atts) { int nAtts; const XML_Char **p; FILE *fp = userData; puttc(T('<'), fp); fputts(name, fp); p = atts; while (*p) ++p; nAtts = (p - atts) >> 1; if (nAtts > 1) qsort((void *)atts, nAtts, sizeof(XML_Char *) * 2, attcmp); while (*atts) { puttc(T(' '), fp); fputts(*atts++, fp); attributeValue(fp, *atts); atts++; } puttc(T('>'), fp); } static void endElement(void *userData, const XML_Char *name) { FILE *fp = userData; puttc(T('<'), fp); puttc(T('/'), fp); fputts(name, fp); puttc(T('>'), fp); } static int nsattcmp(const void *p1, const void *p2) { const XML_Char *att1 = *(const XML_Char **)p1; const XML_Char *att2 = *(const XML_Char **)p2; int sep1 = (tcsrchr(att1, NSSEP) != 0); int sep2 = (tcsrchr(att1, NSSEP) != 0); if (sep1 != sep2) return sep1 - sep2; return tcscmp(att1, att2); } static void startElementNS(void *userData, const XML_Char *name, const XML_Char **atts) { int nAtts; int nsi; const XML_Char **p; FILE *fp = userData; const XML_Char *sep; puttc(T('<'), fp); sep = tcsrchr(name, NSSEP); if (sep) { fputts(T("n1:"), fp); fputts(sep + 1, fp); fputts(T(" xmlns:n1"), fp); attributeValue(fp, name); nsi = 2; } else { fputts(name, fp); nsi = 1; } p = atts; while (*p) ++p; nAtts = (p - atts) >> 1; if (nAtts > 1) qsort((void *)atts, nAtts, sizeof(XML_Char *) * 2, nsattcmp); while (*atts) { name = *atts++; sep = tcsrchr(name, NSSEP); puttc(T(' '), fp); if (sep) { ftprintf(fp, T("n%d:"), nsi); fputts(sep + 1, fp); } else fputts(name, fp); attributeValue(fp, *atts); if (sep) { ftprintf(fp, T(" xmlns:n%d"), nsi++); attributeValue(fp, name); } atts++; } puttc(T('>'), fp); } static void endElementNS(void *userData, const XML_Char *name) { FILE *fp = userData; const XML_Char *sep; puttc(T('<'), fp); puttc(T('/'), fp); sep = tcsrchr(name, NSSEP); if (sep) { fputts(T("n1:"), fp); fputts(sep + 1, fp); } else fputts(name, fp); puttc(T('>'), fp); } #ifndef W3C14N static void processingInstruction(void *userData, const XML_Char *target, const XML_Char *data) { FILE *fp = userData; puttc(T('<'), fp); puttc(T('?'), fp); fputts(target, fp); puttc(T(' '), fp); fputts(data, fp); puttc(T('?'), fp); puttc(T('>'), fp); } #endif /* not W3C14N */ static void defaultCharacterData(XML_Parser parser, const XML_Char *s, int len) { XML_DefaultCurrent(parser); } static void defaultStartElement(XML_Parser parser, const XML_Char *name, const XML_Char **atts) { XML_DefaultCurrent(parser); } static void defaultEndElement(XML_Parser parser, const XML_Char *name) { XML_DefaultCurrent(parser); } static void defaultProcessingInstruction(XML_Parser parser, const XML_Char *target, const XML_Char *data) { XML_DefaultCurrent(parser); } static void nopCharacterData(XML_Parser parser, const XML_Char *s, int len) { } static void nopStartElement(XML_Parser parser, const XML_Char *name, const XML_Char **atts) { } static void nopEndElement(XML_Parser parser, const XML_Char *name) { } static void nopProcessingInstruction(XML_Parser parser, const XML_Char *target, const XML_Char *data) { } static void markup(XML_Parser parser, const XML_Char *s, int len) { FILE *fp = XML_GetUserData(parser); for (; len > 0; --len, ++s) puttc(*s, fp); } static void metaLocation(XML_Parser parser) { const XML_Char *uri = XML_GetBase(parser); if (uri) ftprintf(XML_GetUserData(parser), T(" uri=\"%s\""), uri); ftprintf(XML_GetUserData(parser), T(" byte=\"%ld\" nbytes=\"%d\" line=\"%d\" col=\"%d\""), XML_GetCurrentByteIndex(parser), XML_GetCurrentByteCount(parser), XML_GetCurrentLineNumber(parser), XML_GetCurrentColumnNumber(parser)); } static void metaStartDocument(XML_Parser parser) { fputts(T("<document>\n"), XML_GetUserData(parser)); } static void metaEndDocument(XML_Parser parser) { fputts(T("</document>\n"), XML_GetUserData(parser)); } static void metaStartElement(XML_Parser parser, const XML_Char *name, const XML_Char **atts) { FILE *fp = XML_GetUserData(parser); const XML_Char **specifiedAttsEnd = atts + XML_GetSpecifiedAttributeCount(parser); const XML_Char **idAttPtr; int idAttIndex = XML_GetIdAttributeIndex(parser); if (idAttIndex < 0) idAttPtr = 0; else idAttPtr = atts + idAttIndex; ftprintf(fp, T("<starttag name=\"%s\""), name); metaLocation(parser); if (*atts) { fputts(T(">\n"), fp); do { ftprintf(fp, T("<attribute name=\"%s\" value=\""), atts[0]); characterData(fp, atts[1], tcslen(atts[1])); if (atts >= specifiedAttsEnd) fputts(T("\" defaulted=\"yes\"/>\n"), fp); else if (atts == idAttPtr) fputts(T("\" id=\"yes\"/>\n"), fp); else fputts(T("\"/>\n"), fp); } while (*(atts += 2)); fputts(T("</starttag>\n"), fp); } else fputts(T("/>\n"), fp); } static void metaEndElement(XML_Parser parser, const XML_Char *name) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<endtag name=\"%s\""), name); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaProcessingInstruction(XML_Parser parser, const XML_Char *target, const XML_Char *data) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<pi target=\"%s\" data=\""), target); characterData(fp, data, tcslen(data)); puttc(T('"'), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaComment(XML_Parser parser, const XML_Char *data) { FILE *fp = XML_GetUserData(parser); fputts(T("<comment data=\""), fp); characterData(fp, data, tcslen(data)); puttc(T('"'), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaStartCdataSection(XML_Parser parser) { FILE *fp = XML_GetUserData(parser); fputts(T("<startcdata"), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaEndCdataSection(XML_Parser parser) { FILE *fp = XML_GetUserData(parser); fputts(T("<endcdata"), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaCharacterData(XML_Parser parser, const XML_Char *s, int len) { FILE *fp = XML_GetUserData(parser); fputts(T("<chars str=\""), fp); characterData(fp, s, len); puttc(T('"'), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaStartDoctypeDecl(XML_Parser parser, const XML_Char *doctypeName) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<startdoctype name=\"%s\""), doctypeName); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaEndDoctypeDecl(XML_Parser parser) { FILE *fp = XML_GetUserData(parser); fputts(T("<enddoctype"), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaUnparsedEntityDecl(XML_Parser parser, const XML_Char *entityName, const XML_Char *base, const XML_Char *systemId, const XML_Char *publicId, const XML_Char *notationName) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<entity name=\"%s\""), entityName); if (publicId) ftprintf(fp, T(" public=\"%s\""), publicId); fputts(T(" system=\""), fp); characterData(fp, systemId, tcslen(systemId)); puttc(T('"'), fp); ftprintf(fp, T(" notation=\"%s\""), notationName); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaNotationDecl(XML_Parser parser, const XML_Char *notationName, const XML_Char *base, const XML_Char *systemId, const XML_Char *publicId) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<notation name=\"%s\""), notationName); if (publicId) ftprintf(fp, T(" public=\"%s\""), publicId); if (systemId) { fputts(T(" system=\""), fp); characterData(fp, systemId, tcslen(systemId)); puttc(T('"'), fp); } metaLocation(parser); fputts(T("/>\n"), fp); } static void metaExternalParsedEntityDecl(XML_Parser parser, const XML_Char *entityName, const XML_Char *base, const XML_Char *systemId, const XML_Char *publicId) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<entity name=\"%s\""), entityName); if (publicId) ftprintf(fp, T(" public=\"%s\""), publicId); fputts(T(" system=\""), fp); characterData(fp, systemId, tcslen(systemId)); puttc(T('"'), fp); metaLocation(parser); fputts(T("/>\n"), fp); } static void metaInternalParsedEntityDecl(XML_Parser parser, const XML_Char *entityName, const XML_Char *text, int textLen) { FILE *fp = XML_GetUserData(parser); ftprintf(fp, T("<entity name=\"%s\""), entityName); metaLocation(parser); puttc(T('>'), fp); characterData(fp, text, textLen); fputts(T("</entity/>\n"), fp); } static void metaStartNamespaceDecl(XML_Parser parser, const XML_Char *prefix, const XML_Char *uri) { FILE *fp = XML_GetUserData(parser); fputts(T("<startns"), fp); if (prefix) ftprintf(fp, T(" prefix=\"%s\""), prefix); if (uri) { fputts(T(" ns=\""), fp); characterData(fp, uri, tcslen(uri)); fputts(T("\"/>\n"), fp); } else fputts(T("/>\n"), fp); } static void metaEndNamespaceDecl(XML_Parser parser, const XML_Char *prefix) { FILE *fp = XML_GetUserData(parser); if (!prefix) fputts(T("<endns/>\n"), fp); else ftprintf(fp, T("<endns prefix=\"%s\"/>\n"), prefix); } static int unknownEncodingConvert(void *data, const char *p) { return codepageConvert(*(int *)data, p); } static int unknownEncoding(void *userData, const XML_Char *name, XML_Encoding *info) { int cp; static const XML_Char prefixL[] = T("windows-"); static const XML_Char prefixU[] = T("WINDOWS-"); int i; for (i = 0; prefixU[i]; i++) if (name[i] != prefixU[i] && name[i] != prefixL[i]) return 0; cp = 0; for (; name[i]; i++) { static const XML_Char digits[] = T("0123456789"); const XML_Char *s = tcschr(digits, name[i]); if (!s) return 0; cp *= 10; cp += s - digits; if (cp >= 0x10000) return 0; } if (!codepageMap(cp, info->map)) return 0; info->convert = unknownEncodingConvert; /* We could just cast the code page integer to a void *, and avoid the use of release. */ info->release = free; info->data = malloc(sizeof(int)); if (!info->data) return 0; *(int *)info->data = cp; return 1; } static int notStandalone(void *userData) { return 0; } static void usage(const XML_Char *prog) { ftprintf(stderr, T("usage: %s [-n] [-p] [-r] [-s] [-w] [-x] [-d output-dir] [-e encoding] file ...\n"), prog); exit(1); } int tmain(int argc, XML_Char **argv) { int i, j; const XML_Char *outputDir = 0; const XML_Char *encoding = 0; unsigned processFlags = XML_MAP_FILE; int windowsCodePages = 0; int outputType = 0; int useNamespaces = 0; int requireStandalone = 0; int paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #if MSVCRT _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF|_CRTDBG_LEAK_CHECK_DF); #endif i = 1; j = 0; while (i < argc) { if (j == 0) { if (argv[i][0] != T('-')) break; if (argv[i][1] == T('-') && argv[i][2] == T('\0')) { i++; break; } j++; } switch (argv[i][j]) { case T('r'): processFlags &= ~XML_MAP_FILE; j++; break; case T('s'): requireStandalone = 1; j++; break; case T('n'): useNamespaces = 1; j++; break; case T('p'): paramEntityParsing = XML_PARAM_ENTITY_PARSING_ALWAYS; /* fall through */ case T('x'): processFlags |= XML_EXTERNAL_ENTITIES; j++; break; case T('w'): windowsCodePages = 1; j++; break; case T('m'): outputType = 'm'; j++; break; case T('c'): outputType = 'c'; useNamespaces = 0; j++; break; case T('t'): outputType = 't'; j++; break; case T('d'): if (argv[i][j + 1] == T('\0')) { if (++i == argc) usage(argv[0]); outputDir = argv[i]; } else outputDir = argv[i] + j + 1; i++; j = 0; break; case T('e'): if (argv[i][j + 1] == T('\0')) { if (++i == argc) usage(argv[0]); encoding = argv[i]; } else encoding = argv[i] + j + 1; i++; j = 0; break; case T('\0'): if (j > 1) { i++; j = 0; break; } /* fall through */ default: usage(argv[0]); } } if (i == argc) usage(argv[0]); for (; i < argc; i++) { FILE *fp = 0; XML_Char *outName = 0; int result; XML_Parser parser; if (useNamespaces) parser = XML_ParserCreateNS(encoding, NSSEP); else parser = XML_ParserCreate(encoding); if (requireStandalone) XML_SetNotStandaloneHandler(parser, notStandalone); XML_SetParamEntityParsing(parser, paramEntityParsing); if (outputType == 't') { /* This is for doing timings; this gives a more realistic estimate of the parsing time. */ outputDir = 0; XML_SetElementHandler(parser, nopStartElement, nopEndElement); XML_SetCharacterDataHandler(parser, nopCharacterData); XML_SetProcessingInstructionHandler(parser, nopProcessingInstruction); } else if (outputDir) { const XML_Char *file = argv[i]; if (tcsrchr(file, T('/'))) file = tcsrchr(file, T('/')) + 1; #if MSVCRT if (tcsrchr(file, T('\\'))) file = tcsrchr(file, T('\\')) + 1; #endif outName = malloc((tcslen(outputDir) + tcslen(file) + 2) * sizeof(XML_Char)); tcscpy(outName, outputDir); tcscat(outName, T("/")); tcscat(outName, file); fp = tfopen(outName, T("wb")); if (!fp) { tperror(outName); exit(1); } setvbuf(fp, NULL, _IOFBF, 16384); #ifdef XML_UNICODE puttc(0xFEFF, fp); #endif XML_SetUserData(parser, fp); switch (outputType) { case 'm': XML_UseParserAsHandlerArg(parser); XML_SetElementHandler(parser, metaStartElement, metaEndElement); XML_SetProcessingInstructionHandler(parser, metaProcessingInstruction); XML_SetCommentHandler(parser, metaComment); XML_SetCdataSectionHandler(parser, metaStartCdataSection, metaEndCdataSection); XML_SetCharacterDataHandler(parser, metaCharacterData); XML_SetDoctypeDeclHandler(parser, metaStartDoctypeDecl, metaEndDoctypeDecl); XML_SetUnparsedEntityDeclHandler(parser, metaUnparsedEntityDecl); XML_SetNotationDeclHandler(parser, metaNotationDecl); XML_SetExternalParsedEntityDeclHandler(parser, metaExternalParsedEntityDecl); XML_SetInternalParsedEntityDeclHandler(parser, metaInternalParsedEntityDecl); XML_SetNamespaceDeclHandler(parser, metaStartNamespaceDecl, metaEndNamespaceDecl); metaStartDocument(parser); break; case 'c': XML_UseParserAsHandlerArg(parser); XML_SetDefaultHandler(parser, markup); XML_SetElementHandler(parser, defaultStartElement, defaultEndElement); XML_SetCharacterDataHandler(parser, defaultCharacterData); XML_SetProcessingInstructionHandler(parser, defaultProcessingInstruction); break; default: if (useNamespaces) XML_SetElementHandler(parser, startElementNS, endElementNS); else XML_SetElementHandler(parser, startElement, endElement); XML_SetCharacterDataHandler(parser, characterData); #ifndef W3C14N XML_SetProcessingInstructionHandler(parser, processingInstruction); #endif /* not W3C14N */ break; } } if (windowsCodePages) XML_SetUnknownEncodingHandler(parser, unknownEncoding, 0); result = XML_ProcessFile(parser, argv[i], processFlags); if (outputDir) { if (outputType == 'm') metaEndDocument(parser); fclose(fp); if (!result) tremove(outName); free(outName); } XML_ParserFree(parser); } return 0; }
JMSDOnline/QuickBox
xmlrpc-c_1-39-07/lib/expat/xmlwf/xmlwf.c
C
gpl-3.0
17,924
/* * Doubly linked list construction and deletion. */ #include <stdlib.h> #include <verifier-builtins.h> int main() { struct T { struct T* next; struct T* prev; int data; }; struct T* x = NULL; struct T* y = NULL; x = malloc(sizeof(struct T)); x->next = NULL; x->prev = NULL; while (__VERIFIER_nondet_int()) { y = malloc(sizeof(struct T)); y->next = x; x->prev = y; y->prev = NULL; x = y; } __VERIFIER_plot("test-f0028-fixpoint"); while (x) { y = x->next; free(x); x = y; } return 0; }
kdudka/predator
tests/forester/dll.c
C
gpl-3.0
532
/* Unix SMB/CIFS implementation. Filename utility functions. Copyright (C) Tim Prouty 2009 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "includes.h" /** * XXX: This is temporary and there should be no callers of this outside of * this file once smb_filename is plumbed through all path based operations. * The one legitimate caller currently is smb_fname_str_dbg(), which this * could be made static for. */ NTSTATUS get_full_smb_filename(TALLOC_CTX *ctx, const struct smb_filename *smb_fname, char **full_name) { if (smb_fname->stream_name) { /* stream_name must always be NULL if there is no stream. */ SMB_ASSERT(smb_fname->stream_name[0] != '\0'); *full_name = talloc_asprintf(ctx, "%s%s", smb_fname->base_name, smb_fname->stream_name); } else { *full_name = talloc_strdup(ctx, smb_fname->base_name); } if (!*full_name) { return NT_STATUS_NO_MEMORY; } return NT_STATUS_OK; } /** * There are actually legitimate callers of this such as functions that * enumerate streams using the vfs_streaminfo interface and then want to * operate on each stream. */ struct smb_filename *synthetic_smb_fname(TALLOC_CTX *mem_ctx, const char *base_name, const char *stream_name, const SMB_STRUCT_STAT *psbuf, uint32_t flags) { struct smb_filename smb_fname_loc = { 0, }; /* Setup the base_name/stream_name. */ smb_fname_loc.base_name = discard_const_p(char, base_name); smb_fname_loc.stream_name = discard_const_p(char, stream_name); smb_fname_loc.flags = flags; /* Copy the psbuf if one was given. */ if (psbuf) smb_fname_loc.st = *psbuf; /* Let cp_smb_filename() do the heavy lifting. */ return cp_smb_filename(mem_ctx, &smb_fname_loc); } /** * Utility function used by VFS calls that must *NOT* operate * on a stream filename, only the base_name. */ struct smb_filename *cp_smb_filename_nostream(TALLOC_CTX *mem_ctx, const struct smb_filename *smb_fname_in) { struct smb_filename *smb_fname = cp_smb_filename(mem_ctx, smb_fname_in); if (smb_fname == NULL) { return NULL; } TALLOC_FREE(smb_fname->stream_name); return smb_fname; } /** * There are a few legitimate users of this. */ struct smb_filename *synthetic_smb_fname_split(TALLOC_CTX *ctx, const char *fname, bool posix_path) { char *stream_name = NULL; char *base_name = NULL; struct smb_filename *ret; bool ok; if (posix_path) { /* No stream name looked for. */ return synthetic_smb_fname(ctx, fname, NULL, NULL, SMB_FILENAME_POSIX_PATH); } ok = split_stream_filename(ctx, fname, &base_name, &stream_name); if (!ok) { return NULL; } ret = synthetic_smb_fname(ctx, base_name, stream_name, NULL, 0); TALLOC_FREE(base_name); TALLOC_FREE(stream_name); return ret; } /** * Return a string using the talloc_tos() */ const char *smb_fname_str_dbg(const struct smb_filename *smb_fname) { char *fname = NULL; NTSTATUS status; if (smb_fname == NULL) { return ""; } status = get_full_smb_filename(talloc_tos(), smb_fname, &fname); if (!NT_STATUS_IS_OK(status)) { return ""; } return fname; } /** * Return a debug string of the path name of an fsp using the talloc_tos(). */ const char *fsp_str_dbg(const struct files_struct *fsp) { return smb_fname_str_dbg(fsp->fsp_name); } /** * Create a debug string for the fnum of an fsp. * * This is allocated to talloc_tos() or a string constant * in certain corner cases. The returned string should * hence not be free'd directly but only via the talloc stack. */ const char *fsp_fnum_dbg(const struct files_struct *fsp) { char *str; if (fsp == NULL) { return "fnum [fsp is NULL]"; } if (fsp->fnum == FNUM_FIELD_INVALID) { return "fnum [invalid value]"; } str = talloc_asprintf(talloc_tos(), "fnum %llu", (unsigned long long)fsp->fnum); if (str == NULL) { DEBUG(1, ("%s: talloc_asprintf failed\n", __FUNCTION__)); return "fnum [talloc failed!]"; } return str; } struct smb_filename *cp_smb_filename(TALLOC_CTX *mem_ctx, const struct smb_filename *in) { struct smb_filename *out; size_t base_len = 0; size_t stream_len = 0; size_t lcomp_len = 0; int num = 0; /* stream_name must always be NULL if there is no stream. */ if (in->stream_name) { SMB_ASSERT(in->stream_name[0] != '\0'); } if (in->base_name != NULL) { base_len = strlen(in->base_name) + 1; num += 1; } if (in->stream_name != NULL) { stream_len = strlen(in->stream_name) + 1; num += 1; } if (in->original_lcomp != NULL) { lcomp_len = strlen(in->original_lcomp) + 1; num += 1; } out = talloc_pooled_object(mem_ctx, struct smb_filename, num, stream_len + base_len + lcomp_len); if (out == NULL) { return NULL; } ZERO_STRUCTP(out); /* * The following allocations cannot fail as we * pre-allocated space for them in the out pooled * object. */ if (in->base_name != NULL) { out->base_name = talloc_memdup( out, in->base_name, base_len); talloc_set_name_const(out->base_name, out->base_name); } if (in->stream_name != NULL) { out->stream_name = talloc_memdup( out, in->stream_name, stream_len); talloc_set_name_const(out->stream_name, out->stream_name); } if (in->original_lcomp != NULL) { out->original_lcomp = talloc_memdup( out, in->original_lcomp, lcomp_len); talloc_set_name_const(out->original_lcomp, out->original_lcomp); } out->flags = in->flags; out->st = in->st; return out; } /**************************************************************************** Simple check to determine if the filename is a stream. ***************************************************************************/ bool is_ntfs_stream_smb_fname(const struct smb_filename *smb_fname) { /* stream_name must always be NULL if there is no stream. */ if (smb_fname->stream_name) { SMB_ASSERT(smb_fname->stream_name[0] != '\0'); } if (smb_fname->flags & SMB_FILENAME_POSIX_PATH) { return false; } return smb_fname->stream_name != NULL; } /**************************************************************************** Returns true if the filename's stream == "::$DATA" ***************************************************************************/ bool is_ntfs_default_stream_smb_fname(const struct smb_filename *smb_fname) { if (!is_ntfs_stream_smb_fname(smb_fname)) { return false; } return strcasecmp_m(smb_fname->stream_name, "::$DATA") == 0; } /**************************************************************************** Filter out Windows invalid EA names (list probed from Windows 2012). ****************************************************************************/ static char bad_ea_name_chars[] = "\"*+,/:;<=>?[\\]|"; bool is_invalid_windows_ea_name(const char *name) { int i; /* EA name is pulled as ascii so we can examine individual bytes here. */ for (i = 0; name[i] != 0; i++) { int val = (name[i] & 0xff); if (val < ' ' || strchr(bad_ea_name_chars, val)) { return true; } } return false; } bool ea_list_has_invalid_name(struct ea_list *ea_list) { for (;ea_list; ea_list = ea_list->next) { if (is_invalid_windows_ea_name(ea_list->ea.name)) { return true; } } return false; } /**************************************************************************** Split an incoming name into tallocd filename and stream components. Returns true on success, false on out of memory. ****************************************************************************/ bool split_stream_filename(TALLOC_CTX *ctx, const char *filename_in, char **filename_out, char **streamname_out) { const char *stream_name = NULL; char *stream_out = NULL; char *file_out = NULL; stream_name = strchr_m(filename_in, ':'); if (stream_name) { stream_out = talloc_strdup(ctx, stream_name); if (stream_out == NULL) { return false; } file_out = talloc_strndup(ctx, filename_in, PTR_DIFF(stream_name, filename_in)); } else { file_out = talloc_strdup(ctx, filename_in); } if (file_out == NULL) { TALLOC_FREE(stream_out); return false; } if (filename_out) { *filename_out = file_out; } if (streamname_out) { *streamname_out = stream_out; } return true; }
sathieu/samba
source3/lib/filename_util.c
C
gpl-3.0
8,836
/* Cubesat Space Protocol - A small network-layer protocol designed for Cubesats Copyright (C) 2012 Gomspace ApS (http://www.gomspace.com) Copyright (C) 2012 AAUSAT3 Project (http://aausat3.space.aau.dk) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Inspired by c-pthread-queue by Matthew Dickinson http://code.google.com/p/c-pthread-queue/ */ #include <pthread.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <mach/clock.h> #include <mach/mach.h> /* CSP includes */ #include "pthread_queue.h" pthread_queue_t * pthread_queue_create(int length, size_t item_size) { pthread_queue_t * q = malloc(sizeof(pthread_queue_t)); if (q != NULL) { q->buffer = malloc(length*item_size); if (q->buffer != NULL) { q->size = length; q->item_size = item_size; q->items = 0; q->in = 0; q->out = 0; if (pthread_mutex_init(&(q->mutex), NULL) || pthread_cond_init(&(q->cond_full), NULL) || pthread_cond_init(&(q->cond_empty), NULL)) { free(q->buffer); free(q); q = NULL; } } else { free(q); q = NULL; } } return q; } void pthread_queue_delete(pthread_queue_t * q) { if (q == NULL) return; free(q->buffer); free(q); return; } int pthread_queue_enqueue(pthread_queue_t * queue, void * value, uint32_t timeout) { int ret; /* Calculate timeout */ struct timespec ts; clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; uint32_t sec = timeout / 1000; uint32_t nsec = (timeout - 1000 * sec) * 1000000; ts.tv_sec += sec; if (ts.tv_nsec + nsec > 1000000000) ts.tv_sec++; ts.tv_nsec = (ts.tv_nsec + nsec) % 1000000000; /* Get queue lock */ pthread_mutex_lock(&(queue->mutex)); while (queue->items == queue->size) { ret = pthread_cond_timedwait(&(queue->cond_full), &(queue->mutex), &ts); if (ret != 0) { pthread_mutex_unlock(&(queue->mutex)); return PTHREAD_QUEUE_FULL; } } /* Coby object from input buffer */ memcpy(queue->buffer+(queue->in * queue->item_size), value, queue->item_size); queue->items++; queue->in = (queue->in + 1) % queue->size; pthread_mutex_unlock(&(queue->mutex)); /* Nofify blocked threads */ pthread_cond_broadcast(&(queue->cond_empty)); return PTHREAD_QUEUE_OK; } int pthread_queue_dequeue(pthread_queue_t * queue, void * buf, uint32_t timeout) { int ret; /* Calculate timeout */ struct timespec ts; clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; uint32_t sec = timeout / 1000; uint32_t nsec = (timeout - 1000 * sec) * 1000000; ts.tv_sec += sec; if (ts.tv_nsec + nsec > 1000000000) ts.tv_sec++; ts.tv_nsec = (ts.tv_nsec + nsec) % 1000000000; /* Get queue lock */ pthread_mutex_lock(&(queue->mutex)); while (queue->items == 0) { ret = pthread_cond_timedwait(&(queue->cond_empty), &(queue->mutex), &ts); if (ret != 0) { pthread_mutex_unlock(&(queue->mutex)); return PTHREAD_QUEUE_EMPTY; } } /* Coby object to output buffer */ memcpy(buf, queue->buffer+(queue->out * queue->item_size), queue->item_size); queue->items--; queue->out = (queue->out + 1) % queue->size; pthread_mutex_unlock(&(queue->mutex)); /* Nofify blocked threads */ pthread_cond_broadcast(&(queue->cond_full)); return PTHREAD_QUEUE_OK; } int pthread_queue_items(pthread_queue_t * queue) { pthread_mutex_lock(&(queue->mutex)); int items = queue->items; pthread_mutex_unlock(&(queue->mutex)); return items; }
marshall/libcsp
src/arch/macosx/pthread_queue.c
C
lgpl-2.1
4,452
#include "clar_libgit2.h" #include "git2/merge.h" #include "buffer.h" #include "merge.h" #include "../merge_helpers.h" #include "posix.h" #define TEST_REPO_PATH "merge-resolve" #define MERGE_BRANCH_OID "7cb63eed597130ba4abb87b3e544b85021905520" #define AUTOMERGEABLE_MERGED_FILE \ "this file is changed in master\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is automergeable\n" \ "this file is changed in branch\n" #define CHANGED_IN_BRANCH_FILE \ "changed in branch\n" static git_repository *repo; static git_index *repo_index; static char *unaffected[][4] = { { "added-in-master.txt", NULL }, { "changed-in-master.txt", NULL }, { "unchanged.txt", NULL }, { "added-in-master.txt", "changed-in-master.txt", NULL }, { "added-in-master.txt", "unchanged.txt", NULL }, { "changed-in-master.txt", "unchanged.txt", NULL }, { "added-in-master.txt", "changed-in-master.txt", "unchanged.txt", NULL }, { "new_file.txt", NULL }, { "new_file.txt", "unchanged.txt", NULL }, { NULL }, }; static char *affected[][5] = { { "automergeable.txt", NULL }, { "changed-in-branch.txt", NULL }, { "conflicting.txt", NULL }, { "removed-in-branch.txt", NULL }, { "automergeable.txt", "changed-in-branch.txt", NULL }, { "automergeable.txt", "conflicting.txt", NULL }, { "automergeable.txt", "removed-in-branch.txt", NULL }, { "changed-in-branch.txt", "conflicting.txt", NULL }, { "changed-in-branch.txt", "removed-in-branch.txt", NULL }, { "conflicting.txt", "removed-in-branch.txt", NULL }, { "automergeable.txt", "changed-in-branch.txt", "conflicting.txt", NULL }, { "automergeable.txt", "changed-in-branch.txt", "removed-in-branch.txt", NULL }, { "automergeable.txt", "conflicting.txt", "removed-in-branch.txt", NULL }, { "changed-in-branch.txt", "conflicting.txt", "removed-in-branch.txt", NULL }, { "automergeable.txt", "changed-in-branch.txt", "conflicting.txt", "removed-in-branch.txt", NULL }, { NULL }, }; static char *result_contents[4][6] = { { "automergeable.txt", AUTOMERGEABLE_MERGED_FILE, NULL, NULL }, { "changed-in-branch.txt", CHANGED_IN_BRANCH_FILE, NULL, NULL }, { "automergeable.txt", AUTOMERGEABLE_MERGED_FILE, "changed-in-branch.txt", CHANGED_IN_BRANCH_FILE, NULL, NULL }, { NULL } }; void test_merge_workdir_dirty__initialize(void) { repo = cl_git_sandbox_init(TEST_REPO_PATH); git_repository_index(&repo_index, repo); } void test_merge_workdir_dirty__cleanup(void) { git_index_free(repo_index); cl_git_sandbox_cleanup(); } static void set_core_autocrlf_to(git_repository *repo, bool value) { git_config *cfg; cl_git_pass(git_repository_config(&cfg, repo)); cl_git_pass(git_config_set_bool(cfg, "core.autocrlf", value)); git_config_free(cfg); } static int merge_branch(void) { git_oid their_oids[1]; git_annotated_commit *their_head; git_merge_options merge_opts = GIT_MERGE_OPTIONS_INIT; git_checkout_options checkout_opts = GIT_CHECKOUT_OPTIONS_INIT; int error; cl_git_pass(git_oid_fromstr(&their_oids[0], MERGE_BRANCH_OID)); cl_git_pass(git_annotated_commit_lookup(&their_head, repo, &their_oids[0])); checkout_opts.checkout_strategy = GIT_CHECKOUT_SAFE; error = git_merge(repo, (const git_annotated_commit **)&their_head, 1, &merge_opts, &checkout_opts); git_annotated_commit_free(their_head); return error; } static void write_files(char *files[]) { char *filename; git_buf path = GIT_BUF_INIT, content = GIT_BUF_INIT; size_t i; for (i = 0, filename = files[i]; filename; filename = files[++i]) { git_buf_clear(&path); git_buf_clear(&content); git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename); git_buf_printf(&content, "This is a dirty file in the working directory!\n\n" "It will not be staged! Its filename is %s.\n", filename); cl_git_mkfile(path.ptr, content.ptr); } git_buf_free(&path); git_buf_free(&content); } static void hack_index(char *files[]) { char *filename; struct stat statbuf; git_buf path = GIT_BUF_INIT; git_index_entry *entry; size_t i; /* Update the index to suggest that checkout placed these files on * disk, keeping the object id but updating the cache, which will * emulate a Git implementation's different filter. */ for (i = 0, filename = files[i]; filename; filename = files[++i]) { git_buf_clear(&path); cl_assert(entry = (git_index_entry *) git_index_get_bypath(repo_index, filename, 0)); cl_git_pass(git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename)); cl_git_pass(p_stat(path.ptr, &statbuf)); entry->ctime.seconds = (git_time_t)statbuf.st_ctime; entry->ctime.nanoseconds = 0; entry->mtime.seconds = (git_time_t)statbuf.st_mtime; entry->mtime.nanoseconds = 0; entry->dev = statbuf.st_dev; entry->ino = statbuf.st_ino; entry->uid = statbuf.st_uid; entry->gid = statbuf.st_gid; entry->file_size = statbuf.st_size; } git_buf_free(&path); } static void stage_random_files(char *files[]) { char *filename; size_t i; write_files(files); for (i = 0, filename = files[i]; filename; filename = files[++i]) cl_git_pass(git_index_add_bypath(repo_index, filename)); } static void stage_content(char *content[]) { git_reference *head; git_object *head_object; git_buf path = GIT_BUF_INIT; char *filename, *text; size_t i; cl_git_pass(git_repository_head(&head, repo)); cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT)); cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL)); for (i = 0, filename = content[i], text = content[++i]; filename && text; filename = content[++i], text = content[++i]) { git_buf_clear(&path); cl_git_pass(git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename)); cl_git_mkfile(path.ptr, text); cl_git_pass(git_index_add_bypath(repo_index, filename)); } git_object_free(head_object); git_reference_free(head); git_buf_free(&path); } static int merge_dirty_files(char *dirty_files[]) { git_reference *head; git_object *head_object; int error; cl_git_pass(git_repository_head(&head, repo)); cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT)); cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL)); write_files(dirty_files); error = merge_branch(); git_object_free(head_object); git_reference_free(head); return error; } static int merge_differently_filtered_files(char *files[]) { git_reference *head; git_object *head_object; int error; cl_git_pass(git_repository_head(&head, repo)); cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT)); cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL)); write_files(files); hack_index(files); cl_git_pass(git_index_write(repo_index)); error = merge_branch(); git_object_free(head_object); git_reference_free(head); return error; } static int merge_staged_files(char *staged_files[]) { stage_random_files(staged_files); return merge_branch(); } void test_merge_workdir_dirty__unaffected_dirty_files_allowed(void) { char **files; size_t i; for (i = 0, files = unaffected[i]; files[0]; files = unaffected[++i]) cl_git_pass(merge_dirty_files(files)); } void test_merge_workdir_dirty__unstaged_deletes_maintained(void) { git_reference *head; git_object *head_object; cl_git_pass(git_repository_head(&head, repo)); cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT)); cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL)); cl_git_pass(p_unlink("merge-resolve/unchanged.txt")); cl_git_pass(merge_branch()); git_object_free(head_object); git_reference_free(head); } void test_merge_workdir_dirty__affected_dirty_files_disallowed(void) { char **files; size_t i; for (i = 0, files = affected[i]; files[0]; files = affected[++i]) cl_git_fail(merge_dirty_files(files)); } void test_merge_workdir_dirty__staged_files_in_index_disallowed(void) { char **files; size_t i; for (i = 0, files = unaffected[i]; files[0]; files = unaffected[++i]) cl_git_fail(merge_staged_files(files)); for (i = 0, files = affected[i]; files[0]; files = affected[++i]) cl_git_fail(merge_staged_files(files)); } void test_merge_workdir_dirty__identical_staged_files_allowed(void) { char **content; size_t i; set_core_autocrlf_to(repo, false); for (i = 0, content = result_contents[i]; content[0]; content = result_contents[++i]) { stage_content(content); git_index_write(repo_index); cl_git_pass(merge_branch()); } } void test_merge_workdir_dirty__honors_cache(void) { char **files; size_t i; for (i = 0, files = affected[i]; files[0]; files = affected[++i]) cl_git_pass(merge_differently_filtered_files(files)); }
rcorre/libgit2
tests/merge/workdir/dirty.c
C
lgpl-2.1
8,749
/** @defgroup crc_file CRC @ingroup STM32F4xx @brief <b>libopencm3 STM32F4xx CRC</b> @version 1.0.0 @date 15 October 2012 LGPL License Terms @ref lgpl_license */ /* * This file is part of the libopencm3 project. * * This library is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library. If not, see <http://www.gnu.org/licenses/>. */ #include <libopencm3/stm32/crc.h> #include <libopencm3/stm32/common/crc_common_all.h>
mrnuke/libopencm3
lib/stm32/f4/crc.c
C
lgpl-3.0
982
// RUN: %clang_cc1 -triple armv7-apple-darwin9 -emit-llvm -o - %s | FileCheck %s // This isn't really testing anything ARM-specific; it's just a convenient // 32-bit platform. #define SWIFTCALL __attribute__((swiftcall)) #define OUT __attribute__((swift_indirect_result)) #define ERROR __attribute__((swift_error_result)) #define CONTEXT __attribute__((swift_context)) /*****************************************************************************/ /****************************** PARAMETER ABIS *******************************/ /*****************************************************************************/ SWIFTCALL void indirect_result_1(OUT int *arg0, OUT float *arg1) {} // CHECK-LABEL: define {{.*}} void @indirect_result_1(i32* noalias sret align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}}) // TODO: maybe this shouldn't suppress sret. SWIFTCALL int indirect_result_2(OUT int *arg0, OUT float *arg1) { __builtin_unreachable(); } // CHECK-LABEL: define {{.*}} i32 @indirect_result_2(i32* noalias align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}}) typedef struct { char array[1024]; } struct_reallybig; SWIFTCALL struct_reallybig indirect_result_3(OUT int *arg0, OUT float *arg1) { __builtin_unreachable(); } // CHECK-LABEL: define {{.*}} void @indirect_result_3({{.*}}* noalias sret {{.*}}, i32* noalias align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}}) SWIFTCALL void context_1(CONTEXT void *self) {} // CHECK-LABEL: define {{.*}} void @context_1(i8* swiftself SWIFTCALL void context_2(void *arg0, CONTEXT void *self) {} // CHECK-LABEL: define {{.*}} void @context_2(i8*{{.*}}, i8* swiftself SWIFTCALL void context_error_1(CONTEXT int *self, ERROR float **error) {} // CHECK-LABEL: define {{.*}} void @context_error_1(i32* swiftself{{.*}}, float** swifterror) // CHECK: [[TEMP:%.*]] = alloca float*, align 4 // CHECK: [[T0:%.*]] = load float*, float** [[ERRORARG:%.*]], align 4 // CHECK: store float* [[T0]], float** [[TEMP]], align 4 // CHECK: [[T0:%.*]] = load float*, float** [[TEMP]], align 4 // CHECK: store float* [[T0]], float** [[ERRORARG]], align 4 void test_context_error_1() { int x; float *error; context_error_1(&x, &error); } // CHECK-LABEL: define void @test_context_error_1() // CHECK: [[X:%.*]] = alloca i32, align 4 // CHECK: [[ERROR:%.*]] = alloca float*, align 4 // CHECK: [[TEMP:%.*]] = alloca swifterror float*, align 4 // CHECK: [[T0:%.*]] = load float*, float** [[ERROR]], align 4 // CHECK: store float* [[T0]], float** [[TEMP]], align 4 // CHECK: call [[SWIFTCC:swiftcc]] void @context_error_1(i32* swiftself [[X]], float** swifterror [[TEMP]]) // CHECK: [[T0:%.*]] = load float*, float** [[TEMP]], align 4 // CHECK: store float* [[T0]], float** [[ERROR]], align 4 SWIFTCALL void context_error_2(short s, CONTEXT int *self, ERROR float **error) {} // CHECK-LABEL: define {{.*}} void @context_error_2(i16{{.*}}, i32* swiftself{{.*}}, float** swifterror) /*****************************************************************************/ /********************************** LOWERING *********************************/ /*****************************************************************************/ typedef float float4 __attribute__((ext_vector_type(4))); typedef float float8 __attribute__((ext_vector_type(8))); typedef double double2 __attribute__((ext_vector_type(2))); typedef double double4 __attribute__((ext_vector_type(4))); typedef int int3 __attribute__((ext_vector_type(3))); typedef int int4 __attribute__((ext_vector_type(4))); typedef int int5 __attribute__((ext_vector_type(5))); typedef int int8 __attribute__((ext_vector_type(8))); #define TEST(TYPE) \ SWIFTCALL TYPE return_##TYPE(void) { \ TYPE result = {}; \ return result; \ } \ SWIFTCALL void take_##TYPE(TYPE v) { \ } \ void test_##TYPE() { \ take_##TYPE(return_##TYPE()); \ } /*****************************************************************************/ /*********************************** STRUCTS *********************************/ /*****************************************************************************/ typedef struct { } struct_empty; TEST(struct_empty); // CHECK-LABEL: define {{.*}} @return_struct_empty() // CHECK: ret void // CHECK-LABEL: define {{.*}} @take_struct_empty() // CHECK: ret void typedef struct { int x; char c0; char c1; float f0; float f1; } struct_1; TEST(struct_1); // CHECK-LABEL: define {{.*}} @return_struct_1() // CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4 // CHECK: [[VAR:%.*]] = alloca [[REC]], align 4 // CHECK: @llvm.memset // CHECK: @llvm.memcpy // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i16, \[2 x i8\], float, float }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i16, i16* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i16, float, float }]] undef, i32 [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i16 [[SECOND]], 1 // CHECK: [[T2:%.*]] = insertvalue [[UAGG]] [[T1]], float [[THIRD]], 2 // CHECK: [[T3:%.*]] = insertvalue [[UAGG]] [[T2]], float [[FOURTH]], 3 // CHECK: ret [[UAGG]] [[T3]] // CHECK-LABEL: define {{.*}} @take_struct_1(i32, i16, float, float) // CHECK: [[V:%.*]] = alloca [[REC]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store i32 %0, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store i16 %1, i16* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: store float %2, float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4 // CHECK: store float %3, float* [[T0]], align 4 // CHECK: ret void // CHECK-LABEL: define void @test_struct_1() // CHECK: [[TMP:%.*]] = alloca [[REC]], align 4 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_struct_1() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store i32 [[T1]], i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store i16 [[T1]], i16* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2 // CHECK: store float [[T1]], float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3 // CHECK: store float [[T1]], float* [[T0]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i16, i16* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4 // CHECK: call [[SWIFTCC]] void @take_struct_1(i32 [[FIRST]], i16 [[SECOND]], float [[THIRD]], float [[FOURTH]]) // CHECK: ret void typedef struct { int x; char c0; __attribute__((aligned(2))) char c1; float f0; float f1; } struct_2; TEST(struct_2); // CHECK-LABEL: define {{.*}} @return_struct_2() // CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4 // CHECK: [[VAR:%.*]] = alloca [[REC]], align 4 // CHECK: @llvm.memcpy // CHECK: @llvm.memcpy // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32, float, float }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i32, float, float }]] undef, i32 [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1 // CHECK: [[T2:%.*]] = insertvalue [[UAGG]] [[T1]], float [[THIRD]], 2 // CHECK: [[T3:%.*]] = insertvalue [[UAGG]] [[T2]], float [[FOURTH]], 3 // CHECK: ret [[UAGG]] [[T3]] // CHECK-LABEL: define {{.*}} @take_struct_2(i32, i32, float, float) // CHECK: [[V:%.*]] = alloca [[REC]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store i32 %0, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store i32 %1, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: store float %2, float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: store float %3, float* [[T0]], align 4 // CHECK: ret void // CHECK-LABEL: define void @test_struct_2() // CHECK: [[TMP:%.*]] = alloca [[REC]], align 4 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_struct_2() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store i32 [[T1]], i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store i32 [[T1]], i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2 // CHECK: store float [[T1]], float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3 // CHECK: store float [[T1]], float* [[T0]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4 // CHECK: call [[SWIFTCC]] void @take_struct_2(i32 [[FIRST]], i32 [[SECOND]], float [[THIRD]], float [[FOURTH]]) // CHECK: ret void // There's no way to put a field randomly in the middle of an otherwise // empty storage unit in C, so that case has to be tested in C++, which // can use empty structs to introduce arbitrary padding. (In C, they end up // with size 0 and so don't affect layout.) // Misaligned data rule. typedef struct { char c0; __attribute__((packed)) float f; } struct_misaligned_1; TEST(struct_misaligned_1) // CHECK-LABEL: define {{.*}} @return_struct_misaligned_1() // CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align // CHECK: [[VAR:%.*]] = alloca [[REC]], align // CHECK: @llvm.memset // CHECK: @llvm.memcpy // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i8 }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i8, i8* [[T0]], align // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i8 }]] undef, i32 [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i8 [[SECOND]], 1 // CHECK: ret [[UAGG]] [[T1]] // CHECK-LABEL: define {{.*}} @take_struct_misaligned_1(i32, i8) // CHECK: [[V:%.*]] = alloca [[REC]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store i32 %0, i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store i8 %1, i8* [[T0]], align // CHECK: ret void // Too many scalars. typedef struct { int x[5]; } struct_big_1; TEST(struct_big_1) // CHECK-LABEL: define {{.*}} void @return_struct_big_1({{.*}} noalias sret // Should not be byval. // CHECK-LABEL: define {{.*}} void @take_struct_big_1({{.*}}*{{( %.*)?}}) /*****************************************************************************/ /********************************* TYPE MERGING ******************************/ /*****************************************************************************/ typedef union { float f; double d; } union_het_fp; TEST(union_het_fp) // CHECK-LABEL: define {{.*}} @return_union_het_fp() // CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4 // CHECK: [[VAR:%.*]] = alloca [[REC]], align 4 // CHECK: @llvm.memcpy // CHECK: @llvm.memcpy // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32 }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i32 }]] undef, i32 [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1 // CHECK: ret [[UAGG]] [[T1]] // CHECK-LABEL: define {{.*}} @take_union_het_fp(i32, i32) // CHECK: [[V:%.*]] = alloca [[REC]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store i32 %0, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store i32 %1, i32* [[T0]], align 4 // CHECK: ret void // CHECK-LABEL: define void @test_union_het_fp() // CHECK: [[TMP:%.*]] = alloca [[REC]], align 4 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_union_het_fp() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store i32 [[T1]], i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store i32 [[T1]], i32* [[T0]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4 // CHECK: call [[SWIFTCC]] void @take_union_het_fp(i32 [[FIRST]], i32 [[SECOND]]) // CHECK: ret void typedef union { float f1; float f2; } union_hom_fp; TEST(union_hom_fp) // CHECK-LABEL: define void @test_union_hom_fp() // CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 4 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] float @return_union_hom_fp() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ float }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store float [[CALL]], float* [[T0]], align 4 // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load float, float* [[T0]], align 4 // CHECK: call [[SWIFTCC]] void @take_union_hom_fp(float [[FIRST]]) // CHECK: ret void typedef union { float f1; float4 fv2; } union_hom_fp_partial; TEST(union_hom_fp_partial) // CHECK-LABEL: define void @test_union_hom_fp_partial() // CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 16 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG:{ float, float, float, float }]] @return_union_hom_fp_partial() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ float, float, float, float }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align // CHECK: call [[SWIFTCC]] void @take_union_hom_fp_partial(float [[FIRST]], float [[SECOND]], float [[THIRD]], float [[FOURTH]]) // CHECK: ret void typedef union { struct { int x, y; } f1; float4 fv2; } union_het_fpv_partial; TEST(union_het_fpv_partial) // CHECK-LABEL: define void @test_union_het_fpv_partial() // CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 16 // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG:{ i32, i32, float, float }]] @return_union_het_fpv_partial() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ i32, i32, float, float }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store i32 [[T1]], i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store i32 [[T1]], i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3 // CHECK: store float [[T1]], float* [[T0]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2 // CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3 // CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align // CHECK: call [[SWIFTCC]] void @take_union_het_fpv_partial(i32 [[FIRST]], i32 [[SECOND]], float [[THIRD]], float [[FOURTH]]) // CHECK: ret void /*****************************************************************************/ /****************************** VECTOR LEGALIZATION **************************/ /*****************************************************************************/ TEST(int4) // CHECK-LABEL: define {{.*}} <4 x i32> @return_int4() // CHECK-LABEL: define {{.*}} @take_int4(<4 x i32> TEST(int8) // CHECK-LABEL: define {{.*}} @return_int8() // CHECK: [[RET:%.*]] = alloca [[REC:<8 x i32>]], align 32 // CHECK: [[VAR:%.*]] = alloca [[REC]], align // CHECK: store // CHECK: load // CHECK: store // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, <4 x i32> }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, <4 x i32> }]] undef, <4 x i32> [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], <4 x i32> [[SECOND]], 1 // CHECK: ret [[UAGG]] [[T1]] // CHECK-LABEL: define {{.*}} @take_int8(<4 x i32>, <4 x i32>) // CHECK: [[V:%.*]] = alloca [[REC]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store <4 x i32> %0, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store <4 x i32> %1, <4 x i32>* [[T0]], align // CHECK: ret void // CHECK-LABEL: define void @test_int8() // CHECK: [[TMP1:%.*]] = alloca [[REC]], align // CHECK: [[TMP2:%.*]] = alloca [[REC]], align // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int8() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align // CHECK: [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align // CHECK: store [[REC]] [[V]], [[REC]]* [[TMP2]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: call [[SWIFTCC]] void @take_int8(<4 x i32> [[FIRST]], <4 x i32> [[SECOND]]) // CHECK: ret void TEST(int5) // CHECK-LABEL: define {{.*}} @return_int5() // CHECK: [[RET:%.*]] = alloca [[REC:<5 x i32>]], align 32 // CHECK: [[VAR:%.*]] = alloca [[REC]], align // CHECK: store // CHECK: load // CHECK: store // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, i32 }]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align // CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, i32 }]] undef, <4 x i32> [[FIRST]], 0 // CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1 // CHECK: ret [[UAGG]] [[T1]] // CHECK-LABEL: define {{.*}} @take_int5(<4 x i32>, i32) // CHECK: [[V:%.*]] = alloca [[REC]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: store <4 x i32> %0, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: store i32 %1, i32* [[T0]], align // CHECK: ret void // CHECK-LABEL: define void @test_int5() // CHECK: [[TMP1:%.*]] = alloca [[REC]], align // CHECK: [[TMP2:%.*]] = alloca [[REC]], align // CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int5() // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0 // CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1 // CHECK: store i32 [[T1]], i32* [[T0]], align // CHECK: [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align // CHECK: store [[REC]] [[V]], [[REC]]* [[TMP2]], align // CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]* // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0 // CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align // CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1 // CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align // CHECK: call [[SWIFTCC]] void @take_int5(<4 x i32> [[FIRST]], i32 [[SECOND]]) // CHECK: ret void typedef struct { int x; int3 v __attribute__((packed)); } misaligned_int3; TEST(misaligned_int3) // CHECK-LABEL: define {{.*}} @take_misaligned_int3(i32, i32, i32, i32)
cd80/UtilizedLLVM
tools/clang/test/CodeGen/arm-swiftcall.c
C
unlicense
28,014
/* * Copyright (c) 2013-2021, Pelion and affiliates. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nsconfig.h" #include <string.h> #include <ns_types.h> #include <ns_trace.h> #include "eventOS_event.h" #include "eventOS_scheduler.h" #include "eventOS_event_timer.h" #include "nsdynmemLIB.h" #include "ns_list.h" #include "randLIB.h" #include "socket_api.h" #include "net_interface.h" #include "common_functions.h" #include "libDHCPv6/libDHCPv6.h" #include "NWK_INTERFACE/Include/protocol.h" // just for protocol_core_monotonic_time #include "Common_Protocols/ip.h" #include "dhcp_service_api.h" #ifdef HAVE_DHCPV6 #define TRACE_GROUP "dhcp" #define MAX_SERVERS 20 /* Fixed-point randomisation limits for randlib_randomise_base() - RFC 3315 * says RAND is uniformly distributed between -0.1 and +0.1 */ #define RAND1_LOW 0x7333 // 1 - 0.1; minimum for "1+RAND" #define RAND1_HIGH 0x8CCD // 1 + 0.1; maximum for "1+RAND" typedef struct { dhcp_service_receive_req_cb *recv_req_cb; uint16_t instance_id; int8_t interface_id; dhcp_instance_type_e instance_type; ns_list_link_t link; } server_instance_t; typedef NS_LIST_HEAD(server_instance_t, link) server_instance_list_t; typedef struct { uint16_t instance_id; int8_t interface_id; uint8_t server_address[16]; bool relay_activated; bool add_interface_id_option; ns_list_link_t link; } relay_instance_t; typedef NS_LIST_HEAD(relay_instance_t, link) relay_instance_list_t; typedef struct { ns_address_t addr; dhcp_service_receive_resp_cb *recv_resp_cb; uint16_t instance_id; int8_t interface_id; int8_t socket; uint8_t options; void *client_obj_ptr; uint32_t msg_tr_id; uint32_t message_tr_id; uint32_t transmit_time; uint32_t first_transmit_time; uint16_t delayed_tx; uint16_t timeout; uint16_t timeout_init; uint16_t timeout_max; uint8_t retrans_max; uint8_t retrans; uint8_t *msg_ptr; uint16_t msg_len; uint8_t *relay_start; uint8_t *opt_interface_id; uint16_t opt_interface_id_length; ns_list_link_t link; } msg_tr_t; typedef NS_LIST_HEAD(msg_tr_t, link) tr_list_t; typedef struct { dhcp_relay_neighbour_cb *recv_notify_cb; int8_t interface_id; ns_list_link_t link; } relay_notify_t; typedef NS_LIST_HEAD(relay_notify_t, link) relay_notify_list_t; typedef struct { ns_address_t src_address; server_instance_list_t srv_list; relay_instance_list_t relay_list; relay_notify_list_t notify_list; tr_list_t tr_list; int8_t dhcp_server_socket; int8_t dhcp_client_socket; int8_t dhcp_relay_socket; int8_t dhcpv6_socket_service_tasklet; } dhcp_service_class_t; #define DHCPV6_SOCKET_SERVICE_TASKLET_INIT 1 #define DHCPV6_SOCKET_SERVICE_TIMER 2 #define DHCPV6_SOCKET_SERVICE_TIMER_ID 1 #define DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS 100 dhcp_service_class_t *dhcp_service = NULL; static bool dhcpv6_socket_timeout_timer_active = false; void dhcp_service_send_message(msg_tr_t *msg_tr_ptr); void DHCPv6_socket_service_tasklet(arm_event_s *event) { if (event->event_type == DHCPV6_SOCKET_SERVICE_TASKLET_INIT) { //We should define peridiocally timer service!! eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS); dhcpv6_socket_timeout_timer_active = true; } else if (event->event_type == DHCPV6_SOCKET_SERVICE_TIMER) { if (dhcp_service_timer_tick(1)) { dhcpv6_socket_timeout_timer_active = true; eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS); } else { dhcpv6_socket_timeout_timer_active = false; } } } bool dhcp_service_allocate(void) { bool retVal = false; if (dhcp_service == NULL) { dhcp_service = ns_dyn_mem_alloc(sizeof(dhcp_service_class_t)); if (dhcp_service) { ns_list_init(&dhcp_service->srv_list); ns_list_init(&dhcp_service->relay_list); ns_list_init(&dhcp_service->notify_list); ns_list_init(&dhcp_service->tr_list); dhcp_service->dhcp_client_socket = -1; dhcp_service->dhcp_server_socket = -1; dhcp_service->dhcp_relay_socket = -1; dhcp_service->dhcpv6_socket_service_tasklet = eventOS_event_handler_create(DHCPv6_socket_service_tasklet, DHCPV6_SOCKET_SERVICE_TASKLET_INIT); if (dhcp_service->dhcpv6_socket_service_tasklet < 0) { ns_dyn_mem_free(dhcp_service); dhcp_service = NULL; } else { retVal = true; } } } else { retVal = true; } return retVal; } /*Subclass instances*/ msg_tr_t *dhcp_tr_find(uint32_t msg_tr_id) { msg_tr_t *result = NULL; ns_list_foreach(msg_tr_t, cur_ptr, &dhcp_service->tr_list) { if (cur_ptr->msg_tr_id == msg_tr_id) { result = cur_ptr; } } return result; } msg_tr_t *dhcp_tr_create(void) { uint32_t tr_id; msg_tr_t *msg_ptr = NULL; msg_ptr = ns_dyn_mem_temporary_alloc(sizeof(msg_tr_t)); if (msg_ptr == NULL) { return NULL; } memset(msg_ptr, 0, sizeof(msg_tr_t)); msg_ptr->msg_ptr = NULL; msg_ptr->recv_resp_cb = NULL; tr_id = randLIB_get_32bit() & 0xffffff;// 24 bits for random // Ensure a unique non-zero transaction id for each transaction while (tr_id == 0 || dhcp_tr_find(tr_id) != NULL) { tr_id = (tr_id + 1) & 0xffffff; } msg_ptr->msg_tr_id = tr_id; ns_list_add_to_start(&dhcp_service->tr_list, msg_ptr); return msg_ptr; } void dhcp_tr_delete(msg_tr_t *msg_ptr) { if (msg_ptr != NULL) { ns_list_remove(&dhcp_service->tr_list, msg_ptr); ns_dyn_mem_free(msg_ptr->msg_ptr); ns_dyn_mem_free(msg_ptr); } return; } void dhcp_tr_set_retry_timers(msg_tr_t *msg_ptr, uint8_t msg_type) { if (msg_ptr != NULL) { if (msg_type == DHCPV6_SOLICATION_TYPE) { msg_ptr->timeout_init = SOL_TIMEOUT; msg_ptr->timeout_max = SOL_MAX_RT; msg_ptr->retrans_max = 0; } else if (msg_type == DHCPV6_RENEW_TYPE) { msg_ptr->timeout_init = REN_TIMEOUT; msg_ptr->timeout_max = REN_MAX_RT; msg_ptr->retrans_max = 0; } else if (msg_type == DHCPV6_LEASEQUERY_TYPE) { msg_ptr->timeout_init = LQ_TIMEOUT; msg_ptr->timeout_max = LQ_MAX_RT; msg_ptr->retrans_max = LQ_MAX_RC; } else { msg_ptr->timeout_init = REL_TIMEOUT; msg_ptr->timeout_max = 0; msg_ptr->retrans_max = REL_MAX_RC; } // Convert from seconds to 1/10s ticks, with initial randomisation factor msg_ptr->timeout_init = randLIB_randomise_base(msg_ptr->timeout_init * 10, RAND1_LOW, RAND1_HIGH); msg_ptr->timeout_max *= 10; msg_ptr->timeout = msg_ptr->timeout_init; if (!dhcpv6_socket_timeout_timer_active) { eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS); dhcpv6_socket_timeout_timer_active = true; } } return; } server_instance_t *dhcp_service_client_find(uint16_t instance_id) { server_instance_t *result = NULL; ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) { if (cur_ptr->instance_id == instance_id) { result = cur_ptr; } } return result; } static uint16_t dhcp_service_relay_interface_get(int8_t interface_id) { ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) { if (cur_ptr->interface_id == interface_id && cur_ptr->instance_type == DHCP_INTANCE_RELAY_AGENT) { return cur_ptr->instance_id; } } return 0; } static relay_notify_t *dhcp_service_notify_find(int8_t interface_id) { relay_notify_t *result = NULL; ns_list_foreach(relay_notify_t, cur_ptr, &dhcp_service->notify_list) { if (cur_ptr->interface_id == interface_id) { result = cur_ptr; } } return result; } static relay_instance_t *dhcp_service_relay_find(uint16_t instance_id) { relay_instance_t *result = NULL; ns_list_foreach(relay_instance_t, cur_ptr, &dhcp_service->relay_list) { if (cur_ptr->instance_id == instance_id) { result = cur_ptr; } } return result; } static relay_instance_t *dhcp_service_relay_interface(int8_t interface_id) { relay_instance_t *result = NULL; ns_list_foreach(relay_instance_t, cur_ptr, &dhcp_service->relay_list) { if (cur_ptr->interface_id == interface_id) { result = cur_ptr; } } return result; } void recv_dhcp_server_msg(void *cb_res) { socket_callback_t *sckt_data; server_instance_t *srv_ptr = NULL; msg_tr_t *msg_tr_ptr; uint8_t *msg_ptr, *allocated_ptr; uint16_t msg_len; dhcpv6_relay_msg_t relay_msg; sckt_data = cb_res; if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) { return; } relay_notify_t *neigh_notify = NULL; tr_debug("dhcp Server recv request"); msg_tr_ptr = dhcp_tr_create(); msg_ptr = ns_dyn_mem_temporary_alloc(sckt_data->d_len); allocated_ptr = msg_ptr; if (msg_ptr == NULL || msg_tr_ptr == NULL) { // read actual message tr_error("Out of resources"); goto cleanup; } msg_len = socket_read(sckt_data->socket_id, &msg_tr_ptr->addr, msg_ptr, sckt_data->d_len); uint8_t msg_type = *msg_ptr; if (msg_type == DHCPV6_RELAY_FORWARD) { if (!libdhcpv6_relay_msg_read(msg_ptr, msg_len, &relay_msg)) { tr_error("Relay forward not correct"); goto cleanup; } //Update Source and data msg_tr_ptr->relay_start = msg_ptr; msg_tr_ptr->opt_interface_id = relay_msg.relay_interface_id.msg_ptr; msg_tr_ptr->opt_interface_id_length = relay_msg.relay_interface_id.len; memcpy(msg_tr_ptr->addr.address, relay_msg.peer_address, 16); msg_ptr = relay_msg.relay_options.msg_ptr; msg_len = relay_msg.relay_options.len; msg_type = *msg_ptr; } else if (msg_type == DHCPV6_RELAY_REPLY) { tr_error("Relay reply drop at server"); goto cleanup; } else { //Search only for direct messages here neigh_notify = dhcp_service_notify_find(sckt_data->interface_id); } //TODO use real function from lib also call validity check msg_tr_ptr->message_tr_id = common_read_24_bit(&msg_ptr[1]); if (0 != libdhcpv6_message_malformed_check(msg_ptr, msg_len)) { tr_error("Malformed packet"); goto cleanup; } if (neigh_notify && neigh_notify->recv_notify_cb) { neigh_notify->recv_notify_cb(sckt_data->interface_id, msg_tr_ptr->addr.address); } msg_tr_ptr->socket = sckt_data->socket_id; // call all receivers until found. ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) { if (cur_ptr->interface_id == sckt_data->interface_id && cur_ptr->recv_req_cb != NULL) { msg_tr_ptr->instance_id = cur_ptr->instance_id; msg_tr_ptr->interface_id = sckt_data->interface_id; if ((RET_MSG_ACCEPTED == cur_ptr->recv_req_cb(cur_ptr->instance_id, msg_tr_ptr->msg_tr_id, msg_type, msg_ptr + 4, msg_len - 4))) { // should not modify pointers but library requires. msg_tr_ptr = NULL; srv_ptr = cur_ptr; break; } } } cleanup: dhcp_tr_delete(msg_tr_ptr); ns_dyn_mem_free(allocated_ptr); if (srv_ptr == NULL) { //no owner found tr_warn("No handler for this message found"); } return; } void recv_dhcp_relay_msg(void *cb_res) { socket_callback_t *sckt_data; uint16_t msg_len; sckt_data = cb_res; if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) { return; } protocol_interface_info_entry_t *interface_ptr = protocol_stack_interface_info_get_by_id(sckt_data->interface_id); relay_instance_t *relay_srv = dhcp_service_relay_interface(sckt_data->interface_id); if (!interface_ptr || !relay_srv || !relay_srv->relay_activated) { return; } ns_address_t src_address; //Relay vector added space for relay frame + Interface ID uint8_t relay_frame[DHCPV6_RELAY_LENGTH + 4 + 5]; uint8_t *socket_data = ns_dyn_mem_temporary_alloc(sckt_data->d_len); if (socket_data == NULL) { // read actual message tr_error("Out of resources"); goto cleanup; } ns_msghdr_t msghdr; ns_iovec_t msg_data; msg_data.iov_base = socket_data; msg_data.iov_len = sckt_data->d_len; //Set messages name buffer msghdr.msg_name = &src_address; msghdr.msg_namelen = sizeof(src_address); msghdr.msg_iov = &msg_data; msghdr.msg_iovlen = 1; msghdr.msg_control = NULL; msghdr.msg_controllen = 0; msg_len = socket_recvmsg(sckt_data->socket_id, &msghdr, NS_MSG_LEGACY0); tr_debug("dhcp Relay recv msg"); //Parse type uint8_t msg_type = *socket_data; int16_t tc = 0; if (msg_type == DHCPV6_RELAY_FORWARD) { tr_error("Drop not supported DHCPv6 forward at Agent"); goto cleanup; } else if (msg_type == DHCPV6_RELAY_REPLY) { //Parse and validate Relay dhcpv6_relay_msg_t relay_msg; if (!libdhcpv6_relay_msg_read(socket_data, msg_len, &relay_msg)) { tr_error("Not valid relay"); goto cleanup; } if (0 != libdhcpv6_message_malformed_check(relay_msg.relay_options.msg_ptr, relay_msg.relay_options.len)) { tr_error("Malformed packet"); goto cleanup; } //Copy DST address memcpy(src_address.address, relay_msg.peer_address, 16); src_address.type = ADDRESS_IPV6; src_address.identifier = DHCPV6_CLIENT_PORT; msghdr.msg_iov = &msg_data; msghdr.msg_iovlen = 1; msg_data.iov_base = relay_msg.relay_options.msg_ptr; msg_data.iov_len = relay_msg.relay_options.len; tr_debug("Forward Original relay msg to client"); } else { if (0 != libdhcpv6_message_malformed_check(socket_data, msg_len)) { tr_error("Malformed packet"); goto cleanup; } uint8_t gp_address[16]; //Get blobal address from interface if (addr_interface_select_source(interface_ptr, gp_address, relay_srv->server_address, 0) != 0) { // No global prefix available tr_error("No GP address"); goto cleanup; } ns_iovec_t msg_iov[2]; uint8_t *ptr = relay_frame; //Build //ADD relay frame vector front of original data msghdr.msg_iov = &msg_iov[0]; msg_iov[0].iov_base = relay_frame; msghdr.msg_iovlen = 2; //SET Original Data msg_iov[1].iov_base = socket_data; msg_iov[1].iov_len = msg_len; ptr = libdhcpv6_dhcp_relay_msg_write(ptr, DHCPV6_RELAY_FORWARD, 0, src_address.address, gp_address); if (relay_srv->add_interface_id_option) { ptr = libdhcpv6_option_interface_id_write(ptr, sckt_data->interface_id); } ptr = libdhcpv6_dhcp_option_header_write(ptr, DHCPV6_OPTION_RELAY, msg_len); //Update length of relay vector msg_iov[0].iov_len = ptr - relay_frame; //Update Neighbour table if necessary relay_notify_t *neigh_notify = dhcp_service_notify_find(sckt_data->interface_id); if (neigh_notify && neigh_notify->recv_notify_cb) { neigh_notify->recv_notify_cb(sckt_data->interface_id, src_address.address); } //Copy DST address memcpy(src_address.address, relay_srv->server_address, 16); src_address.type = ADDRESS_IPV6; src_address.identifier = DHCPV6_SERVER_PORT; tr_debug("Forward Client msg to server"); tc = IP_DSCP_CS6 << IP_TCLASS_DSCP_SHIFT; } socket_setsockopt(sckt_data->socket_id, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc)); socket_sendmsg(sckt_data->socket_id, &msghdr, NS_MSG_LEGACY0); cleanup: ns_dyn_mem_free(socket_data); return; } void recv_dhcp_client_msg(void *cb_res) { ns_address_t address; socket_callback_t *sckt_data; msg_tr_t *msg_tr_ptr = NULL; uint8_t *msg_ptr = NULL; int16_t msg_len = 0; uint_fast24_t tr_id = 0; int retVal = RET_MSG_ACCEPTED; sckt_data = cb_res; if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) { return; } tr_debug("dhcp recv response message"); // read actual message msg_ptr = ns_dyn_mem_temporary_alloc(sckt_data->d_len); if (msg_ptr == NULL) { tr_error("Out of memory"); goto cleanup; } msg_len = socket_read(sckt_data->socket_id, &address, msg_ptr, sckt_data->d_len); tr_id = common_read_24_bit(&msg_ptr[1]); msg_tr_ptr = dhcp_tr_find(tr_id); if (msg_tr_ptr == NULL) { tr_error("invalid tr id"); goto cleanup; } if (0 != libdhcpv6_message_malformed_check(msg_ptr, msg_len)) { msg_tr_ptr->recv_resp_cb(msg_tr_ptr->instance_id, msg_tr_ptr->client_obj_ptr, 0, NULL, 0); tr_error("Malformed packet"); goto cleanup; } // read msg tr id from message and find transaction. and then instance // TODO use real function from dhcp lib if (msg_tr_ptr != NULL && msg_tr_ptr->recv_resp_cb) { // call receive callback should not modify pointers but library requires retVal = msg_tr_ptr->recv_resp_cb(msg_tr_ptr->instance_id, msg_tr_ptr->client_obj_ptr, *msg_ptr, msg_ptr + 4, msg_len - 4); } else { tr_error("no receiver for this message found"); } cleanup: ns_dyn_mem_free(msg_ptr); if (retVal != RET_MSG_WAIT_ANOTHER) { //Transaction is not killed yet dhcp_tr_delete(dhcp_tr_find(tr_id)); } return ; } uint16_t dhcp_service_init(int8_t interface_id, dhcp_instance_type_e instance_type, dhcp_service_receive_req_cb *receive_req_cb) { uint16_t id = 1; server_instance_t *srv_ptr; if (!dhcp_service_allocate()) { tr_error("dhcp Sockets data base alloc fail"); return 0; } if (instance_type == DHCP_INSTANCE_SERVER && dhcp_service->dhcp_server_socket < 0) { if (dhcp_service->dhcp_relay_socket >= 0) { tr_error("dhcp Server socket can't open because Agent open already"); } dhcp_service->dhcp_server_socket = socket_open(SOCKET_UDP, DHCPV6_SERVER_PORT, recv_dhcp_server_msg); } if (instance_type == DHCP_INTANCE_RELAY_AGENT && dhcp_service->dhcp_relay_socket < 0) { if (dhcp_service->dhcp_server_socket >= 0) { tr_error("dhcp Relay agent can't open because server open already"); } dhcp_service->dhcp_relay_socket = socket_open(SOCKET_UDP, DHCPV6_SERVER_PORT, recv_dhcp_relay_msg); } if (instance_type == DHCP_INSTANCE_CLIENT && dhcp_service->dhcp_client_socket < 0) { dhcp_service->dhcp_client_socket = socket_open(SOCKET_UDP, DHCPV6_CLIENT_PORT, recv_dhcp_client_msg); } if (instance_type == DHCP_INSTANCE_SERVER && dhcp_service->dhcp_server_socket < 0) { tr_error("No sockets available for DHCP server"); return 0; } if (instance_type == DHCP_INSTANCE_CLIENT && dhcp_service->dhcp_client_socket < 0) { tr_error("No sockets available for DHCP client"); return 0; } if (instance_type == DHCP_INTANCE_RELAY_AGENT) { if (dhcp_service->dhcp_relay_socket < 0) { tr_error("No sockets available for DHCP server"); } uint16_t temp_id = dhcp_service_relay_interface_get(interface_id); if (temp_id) { return temp_id; } } for (; id < MAX_SERVERS; id++) { if (dhcp_service_client_find(id) == NULL) { break; } } srv_ptr = ns_dyn_mem_alloc(sizeof(server_instance_t)); if (id == MAX_SERVERS || srv_ptr == NULL) { tr_error("Out of server instances"); ns_dyn_mem_free(srv_ptr); return 0; } if (instance_type == DHCP_INTANCE_RELAY_AGENT) { //Allocate Realay Agent relay_instance_t *relay_srv = ns_dyn_mem_alloc(sizeof(relay_instance_t)); if (!relay_srv) { tr_error("Out of realy instances"); ns_dyn_mem_free(srv_ptr); return 0; } ns_list_add_to_start(&dhcp_service->relay_list, relay_srv); relay_srv->instance_id = id; relay_srv->interface_id = interface_id; relay_srv->relay_activated = false; relay_srv->add_interface_id_option = false; } ns_list_add_to_start(&dhcp_service->srv_list, srv_ptr); srv_ptr->instance_id = id; srv_ptr->instance_type = instance_type; srv_ptr->interface_id = interface_id; srv_ptr->recv_req_cb = receive_req_cb; return srv_ptr->instance_id; } void dhcp_service_relay_instance_enable(uint16_t instance, uint8_t *server_address) { relay_instance_t *relay_srv = dhcp_service_relay_find(instance); if (relay_srv) { relay_srv->relay_activated = true; memcpy(relay_srv->server_address, server_address, 16); } } void dhcp_service_relay_interface_id_option_enable(uint16_t instance, bool enable) { relay_instance_t *relay_srv = dhcp_service_relay_find(instance); if (relay_srv) { relay_srv->add_interface_id_option = enable; } } uint8_t *dhcp_service_relay_global_addres_get(uint16_t instance) { relay_instance_t *relay_srv = dhcp_service_relay_find(instance); if (!relay_srv || !relay_srv->relay_activated) { return NULL; } return relay_srv->server_address; } void dhcp_service_delete(uint16_t instance) { server_instance_t *srv_ptr; if (dhcp_service == NULL) { return; } srv_ptr = dhcp_service_client_find(instance); //TODO delete all transactions if (srv_ptr != NULL) { ns_list_remove(&dhcp_service->srv_list, srv_ptr); if (srv_ptr->instance_type == DHCP_INTANCE_RELAY_AGENT) { //Free relay service relay_instance_t *relay = dhcp_service_relay_find(instance); if (relay) { ns_list_remove(&dhcp_service->relay_list, relay); ns_dyn_mem_free(relay); } } ns_dyn_mem_free(srv_ptr); } ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) { if (cur_ptr->instance_id == instance) { dhcp_tr_delete(cur_ptr); } } int8_t server_instances = 0, client_instances = 0, relay_instances = 0; ns_list_foreach(server_instance_t, srv, &dhcp_service->srv_list) { if (srv->instance_type == DHCP_INSTANCE_SERVER) { ++server_instances; } else if (srv->instance_type == DHCP_INSTANCE_CLIENT) { ++client_instances; } else if (srv->instance_type == DHCP_INTANCE_RELAY_AGENT) { ++relay_instances; } } if ((server_instances == 0 && relay_instances == 0) && dhcp_service->dhcp_server_socket > -1) { socket_close(dhcp_service->dhcp_server_socket); dhcp_service->dhcp_server_socket = -1; } if (client_instances == 0 && dhcp_service->dhcp_client_socket > -1) { socket_close(dhcp_service->dhcp_client_socket); dhcp_service->dhcp_client_socket = -1; } return; } int dhcp_service_send_resp(uint32_t msg_tr_id, uint8_t options, uint8_t *msg_ptr, uint16_t msg_len) { tr_debug("Send DHCPv6 response"); msg_tr_t *msg_tr_ptr; server_instance_t *srv_instance; msg_tr_ptr = dhcp_tr_find(msg_tr_id); if (msg_tr_ptr == NULL) { tr_error("msg_tr_id not found"); return -1; } srv_instance = dhcp_service_client_find(msg_tr_ptr->instance_id); if (srv_instance == NULL) { tr_error("Srv Instance not found"); return -1; } ns_dyn_mem_free(msg_tr_ptr->msg_ptr); msg_tr_ptr->msg_ptr = msg_ptr; msg_tr_ptr->msg_len = msg_len; msg_tr_ptr->options = options; // set the received transaction id to message. common_write_24_bit(msg_tr_ptr->message_tr_id, &msg_tr_ptr->msg_ptr[1]); dhcp_service_send_message(msg_tr_ptr); msg_tr_ptr->msg_ptr = NULL; // pointer is the responsibility of client dhcp_tr_delete(msg_tr_ptr); return 0; } uint32_t dhcp_service_send_req(uint16_t instance_id, uint8_t options, void *ptr, const uint8_t addr[static 16], uint8_t *msg_ptr, uint16_t msg_len, dhcp_service_receive_resp_cb *receive_resp_cb, uint16_t delay_tx) { tr_debug("Send DHCPv6 request"); msg_tr_t *msg_tr_ptr; server_instance_t *srv_ptr; srv_ptr = dhcp_service_client_find(instance_id); msg_tr_ptr = dhcp_tr_create(); if (msg_tr_ptr == NULL || srv_ptr == NULL || msg_ptr == NULL || receive_resp_cb == NULL || msg_len < 5) { tr_error("Request sending failed"); return 0; } msg_tr_ptr->msg_ptr = msg_ptr; msg_tr_ptr->msg_len = msg_len; msg_tr_ptr->options = options; msg_tr_ptr->client_obj_ptr = ptr; memcpy(msg_tr_ptr->addr.address, addr, 16); msg_tr_ptr->addr.identifier = DHCPV6_SERVER_PORT; msg_tr_ptr->addr.type = ADDRESS_IPV6; msg_tr_ptr->interface_id = srv_ptr->interface_id; msg_tr_ptr->instance_id = instance_id; msg_tr_ptr->socket = dhcp_service->dhcp_client_socket; msg_tr_ptr->recv_resp_cb = receive_resp_cb; msg_tr_ptr->delayed_tx = delay_tx; msg_tr_ptr->first_transmit_time = 0; msg_tr_ptr->transmit_time = 0; dhcp_tr_set_retry_timers(msg_tr_ptr, msg_tr_ptr->msg_ptr[0]); common_write_24_bit(msg_tr_ptr->msg_tr_id, &msg_tr_ptr->msg_ptr[1]); dhcp_service_send_message(msg_tr_ptr); return msg_tr_ptr->msg_tr_id; } void dhcp_service_set_retry_timers(uint32_t msg_tr_id, uint16_t timeout_init, uint16_t timeout_max, uint8_t retrans_max) { msg_tr_t *msg_tr_ptr; msg_tr_ptr = dhcp_tr_find(msg_tr_id); if (msg_tr_ptr != NULL) { msg_tr_ptr->timeout_init = randLIB_randomise_base(timeout_init * 10, RAND1_LOW, RAND1_HIGH); msg_tr_ptr->timeout = msg_tr_ptr->timeout_init; msg_tr_ptr->timeout_max = timeout_max * 10; msg_tr_ptr->retrans_max = retrans_max; } return; } void dhcp_service_update_server_address(uint32_t msg_tr_id, uint8_t *server_address) { msg_tr_t *msg_tr_ptr; msg_tr_ptr = dhcp_tr_find(msg_tr_id); if (msg_tr_ptr != NULL) { memcpy(msg_tr_ptr->addr.address, server_address, 16); } } uint32_t dhcp_service_rtt_get(uint32_t msg_tr_id) { msg_tr_t *msg_tr_ptr = dhcp_tr_find(msg_tr_id); if (msg_tr_ptr && msg_tr_ptr->transmit_time) { return protocol_core_monotonic_time - msg_tr_ptr->transmit_time; } return 0; } void dhcp_service_req_remove(uint32_t msg_tr_id) { if (dhcp_service) { dhcp_tr_delete(dhcp_tr_find(msg_tr_id)); } return; } void dhcp_service_req_remove_all(void *msg_class_ptr) { if (dhcp_service) { ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) { if (cur_ptr->client_obj_ptr == msg_class_ptr) { dhcp_tr_delete(cur_ptr); } } } } void dhcp_service_send_message(msg_tr_t *msg_tr_ptr) { int8_t retval; int16_t multicast_hop_limit = -1; const uint32_t address_pref = SOCKET_IPV6_PREFER_SRC_6LOWPAN_SHORT; dhcp_options_msg_t elapsed_time; if (msg_tr_ptr->first_transmit_time && libdhcpv6_message_option_discover((msg_tr_ptr->msg_ptr + 4), (msg_tr_ptr->msg_len - 4), DHCPV6_ELAPSED_TIME_OPTION, &elapsed_time) == 0 && elapsed_time.len == 2) { uint32_t t = protocol_core_monotonic_time - msg_tr_ptr->first_transmit_time; // time in 1/10s ticks uint16_t cs; if (t > 0xffff / 10) { cs = 0xffff; } else { cs = (uint16_t) t * 10; } common_write_16_bit(cs, elapsed_time.msg_ptr); } if ((msg_tr_ptr->options & TX_OPT_USE_SHORT_ADDR) == TX_OPT_USE_SHORT_ADDR) { socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_ADDR_PREFERENCES, &address_pref, sizeof address_pref); } if ((msg_tr_ptr->options & TX_OPT_MULTICAST_HOP_LIMIT_64) == TX_OPT_MULTICAST_HOP_LIMIT_64) { multicast_hop_limit = 64; } socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_MULTICAST_HOPS, &multicast_hop_limit, sizeof multicast_hop_limit); socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_INTERFACE_SELECT, &msg_tr_ptr->interface_id, sizeof(int8_t)); if (msg_tr_ptr->relay_start) { //Build Relay Reply only server do this int16_t tc = IP_DSCP_CS6 << IP_TCLASS_DSCP_SHIFT; socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc)); ns_iovec_t data_vector[4]; uint8_t relay_header[4]; libdhcpv6_dhcp_option_header_write(relay_header, DHCPV6_OPTION_RELAY, msg_tr_ptr->msg_len); ns_msghdr_t msghdr; msghdr.msg_iovlen = 0; memcpy(msg_tr_ptr->addr.address, msg_tr_ptr->relay_start + 2, 16); msg_tr_ptr->addr.identifier = DHCPV6_SERVER_PORT; //SET IOV vectors //Relay Reply data_vector[msghdr.msg_iovlen].iov_base = (void *) msg_tr_ptr->relay_start; data_vector[msghdr.msg_iovlen].iov_len = DHCPV6_RELAY_LENGTH; msghdr.msg_iovlen++; if (msg_tr_ptr->opt_interface_id) { data_vector[msghdr.msg_iovlen].iov_base = (void *)(msg_tr_ptr->opt_interface_id - 4); data_vector[msghdr.msg_iovlen].iov_len = msg_tr_ptr->opt_interface_id_length + 4; msghdr.msg_iovlen++; } //Relay reply header data_vector[msghdr.msg_iovlen].iov_base = (void *) relay_header; data_vector[msghdr.msg_iovlen].iov_len = 4; msghdr.msg_iovlen++; //DHCPV normal message vector data_vector[msghdr.msg_iovlen].iov_base = (void *) msg_tr_ptr->msg_ptr; data_vector[msghdr.msg_iovlen].iov_len = msg_tr_ptr->msg_len; msghdr.msg_iovlen++; //Set message name msghdr.msg_name = (void *) &msg_tr_ptr->addr; msghdr.msg_namelen = sizeof(ns_address_t); msghdr.msg_iov = &data_vector[0]; //No ancillary data msghdr.msg_control = NULL; msghdr.msg_controllen = 0; uint8_t *ptr = msg_tr_ptr->relay_start; *ptr = DHCPV6_RELAY_REPLY; if (msg_tr_ptr->delayed_tx) { retval = 0; } else { retval = socket_sendmsg(msg_tr_ptr->socket, &msghdr, NS_MSG_LEGACY0); } } else { if (msg_tr_ptr->delayed_tx) { retval = 0; } else { int16_t tc = 0; socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc)); retval = socket_sendto(msg_tr_ptr->socket, &msg_tr_ptr->addr, msg_tr_ptr->msg_ptr, msg_tr_ptr->msg_len); msg_tr_ptr->transmit_time = protocol_core_monotonic_time ? protocol_core_monotonic_time : 1; if (msg_tr_ptr->first_transmit_time == 0 && retval == 0) { //Mark first pushed message timestamp msg_tr_ptr->first_transmit_time = protocol_core_monotonic_time ? protocol_core_monotonic_time : 1; } } } if (retval != 0) { tr_warn("dhcp service socket_sendto fails: %i", retval); } else { tr_info("dhcp service socket_sendto %s", trace_ipv6(msg_tr_ptr->addr.address)); } } bool dhcp_service_timer_tick(uint16_t ticks) { bool activeTimerNeed = false; ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) { if (cur_ptr->delayed_tx) { activeTimerNeed = true; if (cur_ptr->delayed_tx <= ticks) { cur_ptr->delayed_tx = 0; dhcp_service_send_message(cur_ptr); } else { cur_ptr->delayed_tx -= ticks; } continue; } if (cur_ptr->timeout == 0) { continue; } if (cur_ptr->timeout <= ticks) { activeTimerNeed = true; cur_ptr->retrans++; if (cur_ptr->retrans_max != 0 && cur_ptr->retrans >= cur_ptr->retrans_max) { // retransmission count exceeded. cur_ptr->recv_resp_cb(cur_ptr->instance_id, cur_ptr->client_obj_ptr, 0, NULL, 0); dhcp_tr_delete(cur_ptr); continue; } dhcp_service_send_message(cur_ptr); // RFC 3315 says: // RT = 2*RTprev + RAND*RTprev, // We calculate this as // RT = RTprev + (1+RAND)*RTprev cur_ptr->timeout = cur_ptr->timeout_init + randLIB_randomise_base(cur_ptr->timeout_init, RAND1_LOW, RAND1_HIGH); // Catch 16-bit integer overflow if (cur_ptr->timeout < cur_ptr->timeout_init) { cur_ptr->timeout = 0xFFFF; } // Check against MRT if (cur_ptr->timeout_max != 0 && cur_ptr->timeout > cur_ptr->timeout_max) { cur_ptr->timeout = randLIB_randomise_base(cur_ptr->timeout_max, RAND1_LOW, RAND1_HIGH); } cur_ptr->timeout_init = cur_ptr->timeout; } else { cur_ptr->timeout -= ticks; activeTimerNeed = true; } } return activeTimerNeed; } int dhcp_service_link_local_rx_cb_set(int8_t interface_id, dhcp_relay_neighbour_cb *notify_cb) { if (dhcp_service == NULL) { return -1; } relay_notify_t *notify_srv = dhcp_service_notify_find(interface_id); if (notify_srv) { notify_srv->recv_notify_cb = notify_cb; return 0; } notify_srv = ns_dyn_mem_alloc(sizeof(relay_notify_t)); if (!notify_srv) { return -1; } ns_list_add_to_start(&dhcp_service->notify_list, notify_srv); notify_srv->recv_notify_cb = notify_cb; notify_srv->interface_id = interface_id; return 0; } #else uint16_t dhcp_service_init(int8_t interface_id, dhcp_instance_type_e instance_type, dhcp_service_receive_req_cb *receive_req_cb) { (void)interface_id; (void)instance_type; (void)receive_req_cb; return 0; } void dhcp_service_delete(uint16_t instance) { (void)instance; } void dhcp_service_relay_instance_enable(uint16_t instance, uint8_t *server_address) { (void)instance; (void)server_address; } void dhcp_service_relay_interface_id_option_enable(uint16_t instance, bool enable) { (void)instance; (void)enable; } int dhcp_service_send_resp(uint32_t msg_tr_id, uint8_t options, uint8_t *msg_ptr, uint16_t msg_len) { (void)msg_tr_id; (void)options; (void)msg_ptr; (void)msg_len; return -1; } uint32_t dhcp_service_send_req(uint16_t instance_id, uint8_t options, void *ptr, const uint8_t addr[static 16], uint8_t *msg_ptr, uint16_t msg_len, dhcp_service_receive_resp_cb *receive_resp_cb, uint16_t delay_tx) { (void)instance_id; (void)options; (void)ptr; (void)addr; (void)msg_ptr; (void)msg_len; (void)receive_resp_cb; (void)delay_tx; return 0; } void dhcp_service_set_retry_timers(uint32_t msg_tr_id, uint16_t timeout_init, uint16_t timeout_max, uint8_t retrans_max) { (void)msg_tr_id; (void)timeout_init; (void)timeout_max; (void)retrans_max; } void dhcp_service_req_remove(uint32_t msg_tr_id) { (void)msg_tr_id; } bool dhcp_service_timer_tick(uint16_t ticks) { (void)ticks; return false; } void dhcp_service_req_remove_all(void *msg_class_ptr) { (void)msg_class_ptr; } int dhcp_service_link_local_rx_cb_set(int8_t interface_id, dhcp_relay_neighbour_cb *notify_cb) { (void) interface_id; (void) notify_cb; return -1; } #endif
adfernandes/mbed
connectivity/nanostack/sal-stack-nanostack/source/libDHCPv6/dhcp_service_api.c
C
apache-2.0
36,946
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <axutil_dll_desc.h> #include <axutil_class_loader.h> struct axutil_dll_desc { axis2_char_t *dll_name; axis2_char_t *path_qualified_dll_name; axis2_dll_type_t dll_type; int load_options; AXIS2_DLHANDLER dl_handler; CREATE_FUNCT create_funct; DELETE_FUNCT delete_funct; AXIS2_TIME_T timestamp; axutil_error_codes_t error_code; }; AXIS2_EXTERN axutil_dll_desc_t *AXIS2_CALL axutil_dll_desc_create( const axutil_env_t *env) { axutil_dll_desc_t *dll_desc = NULL; AXIS2_ENV_CHECK(env, NULL); dll_desc = (axutil_dll_desc_t *)AXIS2_MALLOC(env->allocator, sizeof(axutil_dll_desc_t)); if(!dll_desc) { AXIS2_ERROR_SET(env->error, AXIS2_ERROR_NO_MEMORY, AXIS2_FAILURE); AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "Out of memory"); return NULL; } dll_desc->dll_name = NULL; dll_desc->path_qualified_dll_name = NULL; dll_desc->dll_type = 0; dll_desc->load_options = 0; dll_desc->dl_handler = NULL; dll_desc->create_funct = NULL; dll_desc->delete_funct = NULL; dll_desc->timestamp = 0; dll_desc->error_code = AXIS2_ERROR_NONE; return dll_desc; } AXIS2_EXTERN void AXIS2_CALL axutil_dll_desc_free( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); if(dll_desc->dl_handler) { axutil_class_loader_delete_dll(env, dll_desc); } if(dll_desc->dll_name) { AXIS2_FREE(env->allocator, dll_desc->dll_name); dll_desc->dll_name = NULL; } if(dll_desc->path_qualified_dll_name) { AXIS2_FREE(env->allocator, dll_desc->path_qualified_dll_name); dll_desc->path_qualified_dll_name = NULL; } if(dll_desc) { AXIS2_FREE(env->allocator, dll_desc); } return; } AXIS2_EXTERN void AXIS2_CALL axutil_dll_desc_free_void_arg( void *dll_desc, const axutil_env_t *env) { axutil_dll_desc_t *dll_desc_l = NULL; AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc_l = (axutil_dll_desc_t *)dll_desc; axutil_dll_desc_free(dll_desc_l, env); return; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_name( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, axis2_char_t *name) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); AXIS2_PARAM_CHECK(env->error, name, AXIS2_FAILURE); if(dll_desc->path_qualified_dll_name) { AXIS2_FREE(env->allocator, dll_desc->path_qualified_dll_name); dll_desc->path_qualified_dll_name = NULL; } dll_desc->path_qualified_dll_name = axutil_strdup(env, name); if(!dll_desc->path_qualified_dll_name) { return AXIS2_FAILURE; } return AXIS2_SUCCESS; } AXIS2_EXTERN axis2_char_t *AXIS2_CALL axutil_dll_desc_get_name( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->path_qualified_dll_name; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_load_options( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, int options) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->load_options = options; return AXIS2_SUCCESS; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_type( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, axis2_dll_type_t type) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->dll_type = type; return AXIS2_SUCCESS; } AXIS2_EXTERN axis2_dll_type_t AXIS2_CALL axutil_dll_desc_get_type( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->dll_type; } AXIS2_EXTERN int AXIS2_CALL axutil_dll_desc_get_load_options( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->load_options; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_dl_handler( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, AXIS2_DLHANDLER dl_handler) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); AXIS2_PARAM_CHECK(env->error, dl_handler, AXIS2_FAILURE); if(dll_desc->dl_handler) { AXIS2_FREE(env->allocator, dll_desc->dl_handler); } dll_desc->dl_handler = dl_handler; return AXIS2_SUCCESS; } AXIS2_EXTERN AXIS2_DLHANDLER AXIS2_CALL axutil_dll_desc_get_dl_handler( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->dl_handler; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_create_funct( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, CREATE_FUNCT funct) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->create_funct = funct; return AXIS2_SUCCESS; } AXIS2_EXTERN CREATE_FUNCT AXIS2_CALL axutil_dll_desc_get_create_funct( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->create_funct; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_delete_funct( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, DELETE_FUNCT funct) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->delete_funct = funct; return AXIS2_SUCCESS; } AXIS2_EXTERN DELETE_FUNCT AXIS2_CALL axutil_dll_desc_get_delete_funct( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->delete_funct; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_timestamp( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, AXIS2_TIME_T timestamp) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->timestamp = timestamp; return AXIS2_SUCCESS; } AXIS2_EXTERN AXIS2_TIME_T AXIS2_CALL axutil_dll_desc_get_timestamp( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->timestamp; } AXIS2_EXTERN axis2_status_t AXIS2_CALL axutil_dll_desc_set_error_code( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, axutil_error_codes_t error_code) { AXIS2_ENV_CHECK(env, AXIS2_FAILURE); dll_desc->error_code = error_code; return AXIS2_SUCCESS; } AXIS2_EXTERN axutil_error_codes_t AXIS2_CALL axutil_dll_desc_get_error_code( axutil_dll_desc_t *dll_desc, const axutil_env_t *env) { return dll_desc->error_code; } AXIS2_EXTERN axis2_char_t *AXIS2_CALL axutil_dll_desc_create_platform_specific_dll_name( axutil_dll_desc_t *dll_desc, const axutil_env_t *env, const axis2_char_t *class_name) { axis2_char_t *temp_name = NULL; AXIS2_ENV_CHECK(env, NULL); /* allow config to give a literal lib name since it may want a * versioned lib like "libfoo.so.0" */ if (axutil_strstr(class_name, AXIS2_LIB_SUFFIX)) { /* assume the class_name is the literal lib file name */ dll_desc->dll_name = axutil_strdup(env,class_name); return dll_desc->dll_name; } temp_name = axutil_stracat(env, AXIS2_LIB_PREFIX, class_name); dll_desc->dll_name = axutil_stracat(env, temp_name, AXIS2_LIB_SUFFIX); AXIS2_FREE(env->allocator, temp_name); return dll_desc->dll_name; }
techhead/wsf_php_dist
wsf_c/axis2c/util/src/dll_desc.c
C
apache-2.0
7,773
/*++ Copyright (c) 2004, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. Module Name: EfiShell.c Abstract: FFS Filename for EFI Shell --*/ #include "Tiano.h" #include EFI_GUID_DEFINITION (EfiShell) EFI_GUID gEfiShellFileGuid = EFI_SHELL_FILE_GUID; EFI_GUID gEfiMiniShellFileGuid = EFI_MINI_SHELL_FILE_GUID; EFI_GUID_STRING (&gEfiShellFileGuid, "EfiShell", "Efi Shell FFS file name GUID") EFI_GUID_STRING (&gEfiMiniShellFileGuid, "EfiMiniShell", "Efi Mini-Shell FFS file name GUID")
google/google-ctf
third_party/edk2/EdkCompatibilityPkg/Foundation/Guid/EfiShell/EfiShell.c
C
apache-2.0
1,146
/* FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd. All rights reserved VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION. This file is part of the FreeRTOS distribution. FreeRTOS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License (version 2) as published by the Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception. *************************************************************************** >>! NOTE: The modification to the GPL is included to allow you to !<< >>! distribute a combined work that includes FreeRTOS without being !<< >>! obliged to provide the source code for proprietary components !<< >>! outside of the FreeRTOS kernel. !<< *************************************************************************** FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Full license text is available on the following link: http://www.freertos.org/a00114.html *************************************************************************** * * * FreeRTOS provides completely free yet professionally developed, * * robust, strictly quality controlled, supported, and cross * * platform software that is more than just the market leader, it * * is the industry's de facto standard. * * * * Help yourself get started quickly while simultaneously helping * * to support the FreeRTOS project by purchasing a FreeRTOS * * tutorial book, reference manual, or both: * * http://www.FreeRTOS.org/Documentation * * * *************************************************************************** http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading the FAQ page "My application does not run, what could be wrong?". Have you defined configASSERT()? http://www.FreeRTOS.org/support - In return for receiving this top quality embedded software for free we request you assist our global community by participating in the support forum. http://www.FreeRTOS.org/training - Investing in training allows your team to be as productive as possible as early as possible. Now you can receive FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers Ltd, and the world's leading authority on the world's leading RTOS. http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products, including FreeRTOS+Trace - an indispensable productivity tool, a DOS compatible FAT file system, and our tiny thread aware UDP/IP stack. http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate. Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS. http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS licenses offer ticketed support, indemnification and commercial middleware. http://www.SafeRTOS.com - High Integrity Systems also provide a safety engineered and independently SIL3 certified version for use in safety and mission critical applications that require provable dependability. 1 tab == 4 spaces! */ /* * Interrupt service routines that cannot nest have no special requirements and * can be written as per the compiler documentation. However interrupts written * in this manner will utilise the stack of whichever task was interrupts, * rather than the system stack, necessitating that adequate stack space be * allocated to each created task. It is therefore not recommended to write * interrupt service routines in this manner. * * Interrupts service routines that can nest require a simple assembly wrapper. * This file is provided as a example of how this is done. * * The example in this file creates a single task. The task blocks on a * semaphore which is periodically 'given' from a timer interrupt. The assembly * wrapper for the interrupt is implemented in ISRTriggeredTask_isr.S. The * C function called by the assembly wrapper is implemented in this file. * * The task toggle LED mainISR_TRIGGERED_LED each time it is unblocked by the * interrupt. */ /* Standard includes. */ #include <stdio.h> /* Scheduler includes. */ #include "FreeRTOS.h" #include "task.h" #include "semphr.h" /* Standard demo includes. */ #include "ParTest.h" /*-----------------------------------------------------------*/ /* The LED controlled by the ISR triggered task. */ #define mainISR_TRIGGERED_LED ( 1 ) /* Constants used to configure T5. */ #define mainT5PRESCALAR ( 6 ) #define mainT5_SEMAPHORE_RATE ( 31250 ) /*-----------------------------------------------------------*/ /* * The task that is periodically triggered by an interrupt, as described at the * top of this file. */ static void prvISRTriggeredTask( void* pvParameters ); /* * Configures the T5 timer peripheral to generate the interrupts that unblock * the task implemented by the prvISRTriggeredTask() function. */ static void prvSetupT5( void ); /* The timer 5 interrupt handler. As this interrupt uses the FreeRTOS assembly entry point the IPL setting in the following function prototype has no effect. */ void __attribute__( (interrupt(IPL3AUTO), vector(_TIMER_5_VECTOR))) vT5InterruptWrapper( void ); /*-----------------------------------------------------------*/ /* The semaphore given by the T5 interrupt to unblock the task implemented by the prvISRTriggeredTask() function. */ static SemaphoreHandle_t xBlockSemaphore = NULL; /*-----------------------------------------------------------*/ void vStartISRTriggeredTask( void ) { /* Create the task described at the top of this file. The timer is configured by the task itself. */ xTaskCreate( prvISRTriggeredTask, /* The function that implements the task. */ "ISRt", /* Text name to help debugging - not used by the kernel. */ configMINIMAL_STACK_SIZE, /* The size of the stack to allocate to the task - defined in words, not bytes. */ NULL, /* The parameter to pass into the task. Not used in this case. */ configMAX_PRIORITIES - 1, /* The priority at which the task is created. */ NULL ); /* Used to pass a handle to the created task out of the function. Not used in this case. */ } /*-----------------------------------------------------------*/ void vT5InterruptHandler( void ) { portBASE_TYPE xHigherPriorityTaskWoken = pdFALSE; /* This function is the handler for the peripheral timer interrupt. The interrupt is initially signalled in a separate assembly file which switches to the system stack and then calls this function. It gives a semaphore which signals the prvISRBlockTask */ /* Give the semaphore. If giving the semaphore causes the task to leave the Blocked state, and the priority of the task is higher than the priority of the interrupted task, then xHigherPriorityTaskWoken will be set to pdTRUE inside the xSemaphoreGiveFromISR() function. xHigherPriorityTaskWoken is later passed into portEND_SWITCHING_ISR(), where a context switch is requested if it is pdTRUE. The context switch ensures the interrupt returns directly to the unblocked task. */ xSemaphoreGiveFromISR( xBlockSemaphore, &xHigherPriorityTaskWoken ); /* Clear the interrupt */ IFS0CLR = _IFS0_T5IF_MASK; /* See comment above the call to xSemaphoreGiveFromISR(). */ portEND_SWITCHING_ISR( xHigherPriorityTaskWoken ); } /*-----------------------------------------------------------*/ static void prvISRTriggeredTask( void* pvParameters ) { /* Avoid compiler warnings. */ ( void ) pvParameters; /* Create the semaphore used to signal this task */ xBlockSemaphore = xSemaphoreCreateBinary(); /* Configure the timer to generate the interrupts. */ prvSetupT5(); for( ;; ) { /* Block on the binary semaphore given by the T5 interrupt. */ xSemaphoreTake( xBlockSemaphore, portMAX_DELAY ); /* Toggle the LED. */ vParTestToggleLED( mainISR_TRIGGERED_LED ); } } /*-----------------------------------------------------------*/ static void prvSetupT5( void ) { /* Set up timer 5 to generate an interrupt every 50 ms */ T5CON = 0; TMR5 = 0; T5CONbits.TCKPS = mainT5PRESCALAR; PR5 = mainT5_SEMAPHORE_RATE; /* Setup timer 5 interrupt priority to be the maximum from which interrupt safe FreeRTOS API functions can be called. Interrupt safe FreeRTOS API functions are those that end "FromISR". */ IPC6bits.T5IP = configMAX_SYSCALL_INTERRUPT_PRIORITY; /* Clear the interrupt as a starting condition. */ IFS0bits.T5IF = 0; /* Enable the interrupt. */ IEC0bits.T5IE = 1; /* Start the timer. */ T5CONbits.TON = 1; }
PUCSIE-embedded-course/stm32f4-examples
firmware/freertos/semaphore/lib/FreeRTOSV8.2.3/FreeRTOS/Demo/PIC32MZ_MPLAB/ISRTriggeredTask.c
C
mit
9,510
/** * Copyright (c) 2014,2015 NetEase, Inc. and other Pomelo contributors * MIT Licensed. */ #include <assert.h> #include <string.h> #include <pomelo_trans.h> #include "pc_lib.h" #include "pc_pomelo_i.h" void pc_trans_fire_event(pc_client_t* client, int ev_type, const char* arg1, const char* arg2) { int pending = 0; if (!client) { pc_lib_log(PC_LOG_ERROR, "pc_client_fire_event - client is null"); return ; } if (client->config.enable_polling) { pending = 1; } pc__trans_fire_event(client, ev_type, arg1, arg2, pending); } void pc__trans_fire_event(pc_client_t* client, int ev_type, const char* arg1, const char* arg2, int pending) { QUEUE* q; pc_ev_handler_t* handler; pc_event_t* ev; int i; if (ev_type >= PC_EV_COUNT || ev_type < 0) { pc_lib_log(PC_LOG_ERROR, "pc__transport_fire_event - error event type"); return; } if (ev_type == PC_EV_USER_DEFINED_PUSH && (!arg1 || !arg2)) { pc_lib_log(PC_LOG_ERROR, "pc__transport_fire_event - push msg but without a route or msg"); return; } if (ev_type == PC_EV_CONNECT_ERROR || ev_type == PC_EV_UNEXPECTED_DISCONNECT || ev_type == PC_EV_PROTO_ERROR || ev_type == PC_EV_CONNECT_FAILED) { if (!arg1) { pc_lib_log(PC_LOG_ERROR, "pc__transport_fire_event - event should be with a reason description"); return ; } } if (pending) { assert(client->config.enable_polling); pc_lib_log(PC_LOG_INFO, "pc__trans_fire_event - add pending event: %s", pc_client_ev_str(ev_type)); pc_mutex_lock(&client->event_mutex); ev = NULL; for (i = 0; i < PC_PRE_ALLOC_EVENT_SLOT_COUNT; ++i) { if (PC_PRE_ALLOC_IS_IDLE(client->pending_events[i].type)) { ev = &client->pending_events[i]; PC_PRE_ALLOC_SET_BUSY(ev->type); break; } } if (!ev) { ev = (pc_event_t* )pc_lib_malloc(sizeof(pc_event_t)); memset(ev, 0, sizeof(pc_event_t)); ev->type = PC_DYN_ALLOC; } PC_EV_SET_NET_EVENT(ev->type); QUEUE_INIT(&ev->queue); QUEUE_INSERT_TAIL(&client->pending_ev_queue, &ev->queue); ev->data.ev.ev_type = ev_type; if (arg1) { ev->data.ev.arg1 = pc_lib_strdup(arg1); } else { ev->data.ev.arg1 = NULL; } if (arg2) { ev->data.ev.arg2 = pc_lib_strdup(arg2); } else { ev->data.ev.arg2 = NULL; } pc_mutex_unlock(&client->event_mutex); return ; } pc_lib_log(PC_LOG_INFO, "pc__trans_fire_event - fire event: %s, arg1: %s, arg2: %s", pc_client_ev_str(ev_type), arg1 ? arg1 : "", arg2 ? arg2 : ""); pc_mutex_lock(&client->state_mutex); switch(ev_type) { case PC_EV_CONNECTED: assert(client->state == PC_ST_CONNECTING); client->state = PC_ST_CONNECTED; break; case PC_EV_CONNECT_ERROR: assert(client->state == PC_ST_CONNECTING || client->state == PC_ST_DISCONNECTING); break; case PC_EV_CONNECT_FAILED: assert(client->state == PC_ST_CONNECTING || client->state == PC_ST_DISCONNECTING); client->state = PC_ST_INITED; break; case PC_EV_DISCONNECT: assert(client->state == PC_ST_DISCONNECTING); client->state = PC_ST_INITED; break; case PC_EV_KICKED_BY_SERVER: assert(client->state == PC_ST_CONNECTED || client->state == PC_ST_DISCONNECTING); client->state = PC_ST_INITED; break; case PC_EV_UNEXPECTED_DISCONNECT: case PC_EV_PROTO_ERROR: assert(client->state == PC_ST_CONNECTING || client->state == PC_ST_CONNECTED || client->state == PC_ST_DISCONNECTING); client->state = PC_ST_CONNECTING; break; case PC_EV_USER_DEFINED_PUSH: /* do nothing here */ break; default: /* never run to here */ pc_lib_log(PC_LOG_ERROR, "pc__trans_fire_event - unknown network event: %d", ev_type); } pc_mutex_unlock(&client->state_mutex); /* invoke handler */ pc_mutex_lock(&client->handler_mutex); QUEUE_FOREACH(q, &client->ev_handlers) { handler = QUEUE_DATA(q, pc_ev_handler_t, queue); assert(handler && handler->cb); handler->cb(client, ev_type, handler->ex_data, arg1, arg2); } pc_mutex_unlock(&client->handler_mutex); } void pc_trans_sent(pc_client_t* client, unsigned int seq_num, int rc) { int pending = 0; if (!client) { pc_lib_log(PC_LOG_ERROR, "pc_trans_sent - client is null"); return ; } if (client->config.enable_polling) { pending = 1; } pc__trans_sent(client, seq_num, rc, pending); } void pc__trans_sent(pc_client_t* client, unsigned int seq_num, int rc, int pending) { QUEUE* q; pc_notify_t* notify; pc_notify_t* target; pc_event_t* ev; int i; if (pending) { pc_mutex_lock(&client->event_mutex); pc_lib_log(PC_LOG_INFO, "pc__trans_sent - add pending sent event, seq_num: %u, rc: %s", seq_num, pc_client_rc_str(rc)); ev = NULL; for (i = 0; i < PC_PRE_ALLOC_EVENT_SLOT_COUNT; ++i) { if (PC_PRE_ALLOC_IS_IDLE(client->pending_events[i].type)) { ev = &client->pending_events[i]; PC_PRE_ALLOC_SET_BUSY(ev->type); break; } } if (!ev) { ev = (pc_event_t* )pc_lib_malloc(sizeof(pc_event_t)); memset(ev, 0, sizeof(pc_event_t)); ev->type = PC_DYN_ALLOC; } QUEUE_INIT(&ev->queue); PC_EV_SET_NOTIFY_SENT(ev->type); ev->data.notify.seq_num = seq_num; ev->data.notify.rc = rc; QUEUE_INSERT_TAIL(&client->pending_ev_queue, &ev->queue); pc_mutex_unlock(&client->event_mutex); return ; } /* callback immediately */ pc_mutex_lock(&client->notify_mutex); target = NULL; QUEUE_FOREACH(q, &client->notify_queue) { notify = (pc_notify_t* )QUEUE_DATA(q, pc_common_req_t, queue); if (notify->base.seq_num == seq_num) { pc_lib_log(PC_LOG_INFO, "pc__trans_sent - fire sent event, seq_num: %u, rc: %s", seq_num, pc_client_rc_str(rc)); target = notify; QUEUE_REMOVE(q); QUEUE_INIT(q); break; } } pc_mutex_unlock(&client->notify_mutex); if (target) { target->cb(target, rc); pc_lib_free((char*)target->base.msg); pc_lib_free((char*)target->base.route); target->base.msg = NULL; target->base.route = NULL; if (PC_IS_PRE_ALLOC(target->base.type)) { pc_mutex_lock(&client->notify_mutex); PC_PRE_ALLOC_SET_IDLE(target->base.type); pc_mutex_unlock(&client->notify_mutex); } else { pc_lib_free(target); } } else { pc_lib_log(PC_LOG_ERROR, "pc__trans_sent - no pending notify found" " when transport has sent it, seq num: %u", seq_num); } } void pc_trans_resp(pc_client_t* client, unsigned int req_id, int rc, const char* resp) { int pending = 0; if (!client) { pc_lib_log(PC_LOG_ERROR, "pc_trans_resp - client is null"); return ; } if (client->config.enable_polling) { pending = 1; } pc__trans_resp(client, req_id, rc, resp, pending); } void pc__trans_resp(pc_client_t* client, unsigned int req_id, int rc, const char* resp, int pending) { QUEUE* q; pc_request_t* req; pc_event_t* ev; pc_request_t* target; int i; if (pending) { pc_mutex_lock(&client->event_mutex); pc_lib_log(PC_LOG_INFO, "pc__trans_resp - add pending resp event, req_id: %u, rc: %s", req_id, pc_client_rc_str(rc)); ev = NULL; for (i = 0; i < PC_PRE_ALLOC_EVENT_SLOT_COUNT; ++i) { if (PC_PRE_ALLOC_IS_IDLE(client->pending_events[i].type)) { ev = &client->pending_events[i]; PC_PRE_ALLOC_SET_BUSY(ev->type); break; } } if (!ev) { ev = (pc_event_t* )pc_lib_malloc(sizeof(pc_event_t)); memset(ev, 0, sizeof(pc_event_t)); ev->type = PC_DYN_ALLOC; } PC_EV_SET_RESP(ev->type); QUEUE_INIT(&ev->queue); ev->data.req.req_id = req_id; ev->data.req.rc = rc; ev->data.req.resp = pc_lib_strdup(resp); QUEUE_INSERT_TAIL(&client->pending_ev_queue, &ev->queue); pc_mutex_unlock(&client->event_mutex); return ; } /* invoke callback immediately */ target = NULL; pc_mutex_lock(&client->req_mutex); QUEUE_FOREACH(q, &client->req_queue) { req= (pc_request_t* )QUEUE_DATA(q, pc_common_req_t, queue); if (req->req_id == req_id) { pc_lib_log(PC_LOG_INFO, "pc__trans_resp - fire resp event, req_id: %u, rc: %s", req_id, pc_client_rc_str(rc)); target = req; QUEUE_REMOVE(q); QUEUE_INIT(q); break; } } pc_mutex_unlock(&client->req_mutex); if (target) { target->cb(target, rc, resp); pc_lib_free((char*)target->base.msg); pc_lib_free((char*)target->base.route); target->base.msg = NULL; target->base.route = NULL; if (PC_IS_PRE_ALLOC(target->base.type)) { pc_mutex_lock(&client->req_mutex); PC_PRE_ALLOC_SET_IDLE(target->base.type); pc_mutex_unlock(&client->req_mutex); } else { pc_lib_free(target); } } else { pc_lib_log(PC_LOG_ERROR, "pc__trans_resp - no pending request found when" " get a response, req id: %u", req_id); } }
hitstanley/libpomelo2
src/pc_trans.c
C
mit
10,100
/* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002 */ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <asm/uaccess.h> #include "queue.h" MODULE_ALIAS("mmc:block"); #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "mmcblk." #define INAND_CMD38_ARG_EXT_CSD 113 #define INAND_CMD38_ARG_ERASE 0x00 #define INAND_CMD38_ARG_TRIM 0x01 #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 #define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */ #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ (req->cmd_flags & REQ_META)) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 #define PACKED_TRIGGER_MAX_ELEMENTS 5000 #define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \ do { \ if (stats->enabled) \ stats->pack_stop_reason[reason]++; \ } while (0) #define PCKD_TRGR_INIT_MEAN_POTEN 17 #define PCKD_TRGR_POTEN_LOWER_BOUND 5 #define PCKD_TRGR_URGENT_PENALTY 2 #define PCKD_TRGR_LOWER_BOUND 5 #define PCKD_TRGR_PRECISION_MULTIPLIER 100 static DEFINE_MUTEX(block_mutex); /* * The defaults come from config options but can be overriden by module * or bootarg options. */ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; /* * We've only got one major, so number of mmcblk devices is * limited to 256 / number of minors per device. */ static int max_devices; /* 256 minors, so at most 256 separate devices */ static DECLARE_BITMAP(dev_use, 256); static DECLARE_BITMAP(name_use, 256); /* * There is one mmc_blk_data per slot. */ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; struct list_head part; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ unsigned int usage; unsigned int read_only; unsigned int part_type; unsigned int name_idx; unsigned int reset_done; #define MMC_BLK_READ BIT(0) #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) /* * Only set in main mmc_blk_data associated * with mmc_card with mmc_set_drvdata, and keeps * track of the current selected device partition. */ unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; struct device_attribute num_wr_reqs_to_start_packing; struct device_attribute bkops_check_threshold; struct device_attribute no_pack_for_random; int area_type; }; static DEFINE_MUTEX(open_lock); enum { MMC_PACKED_N_IDX = -1, MMC_PACKED_N_ZERO, MMC_PACKED_N_SINGLE, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) { mqrq->packed_cmd = MMC_PACKED_NONE; mqrq->packed_num = MMC_PACKED_N_ZERO; } static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; mutex_lock(&open_lock); md = disk->private_data; if (md && md->usage == 0) md = NULL; if (md) md->usage++; mutex_unlock(&open_lock); return md; } static inline int mmc_get_devidx(struct gendisk *disk) { int devidx = disk->first_minor / perdev_minors; return devidx; } static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); } mutex_unlock(&open_lock); } static ssize_t power_ro_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int locked = 0; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) locked = 2; else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) locked = 1; ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); return ret; } static ssize_t power_ro_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct mmc_blk_data *md, *part_md; struct mmc_card *card; unsigned long set; if (kstrtoul(buf, 0, &set)) return -EINVAL; if (set != 1) return count; md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; mmc_claim_host(card->host); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | EXT_CSD_BOOT_WP_B_PWR_WP_EN, card->ext_csd.part_time); if (ret) pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; mmc_release_host(card->host); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", md->disk->disk_name); set_disk_ro(md->disk, 1); list_for_each_entry(part_md, &md->part, part) if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); set_disk_ro(part_md->disk, 1); } } mmc_blk_put(md); return count; } static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); ret = snprintf(buf, PAGE_SIZE, "%d", get_disk_ro(dev_to_disk(dev)) ^ md->read_only); mmc_blk_put(md); return ret; } static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); unsigned long set = simple_strtoul(buf, &end, 0); if (end == buf) { ret = -EINVAL; goto out; } set_disk_ro(dev_to_disk(dev), set || md->read_only); ret = count; out: mmc_blk_put(md); return ret; } static ssize_t num_wr_reqs_to_start_packing_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); int num_wr_reqs_to_start_packing; int ret; num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing; ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing); mmc_blk_put(md); return ret; } static ssize_t num_wr_reqs_to_start_packing_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); sscanf(buf, "%d", &value); if (value >= 0) md->queue.num_wr_reqs_to_start_packing = value; mmc_blk_put(md); return count; } static ssize_t bkops_check_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int ret; if (!card) ret = -EINVAL; else ret = snprintf(buf, PAGE_SIZE, "%d\n", card->bkops_info.size_percentage_to_queue_delayed_work); mmc_blk_put(md); return ret; } static ssize_t bkops_check_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; unsigned int card_size; int ret = count; if (!card) { ret = -EINVAL; goto exit; } sscanf(buf, "%d", &value); if ((value <= 0) || (value >= 100)) { ret = -EINVAL; goto exit; } card_size = (unsigned int)get_capacity(md->disk); if (card_size <= 0) { ret = -EINVAL; goto exit; } card->bkops_info.size_percentage_to_queue_delayed_work = value; card->bkops_info.min_sectors_to_queue_delayed_work = (card_size * value) / 100; pr_debug("%s: size_percentage = %d, min_sectors = %d", mmc_hostname(card->host), card->bkops_info.size_percentage_to_queue_delayed_work, card->bkops_info.min_sectors_to_queue_delayed_work); exit: mmc_blk_put(md); return count; } static ssize_t no_pack_for_random_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); int ret; ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random); mmc_blk_put(md); return ret; } static ssize_t no_pack_for_random_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int ret = count; if (!card) { ret = -EINVAL; goto exit; } sscanf(buf, "%d", &value); if (value < 0) { pr_err("%s: value %d is not valid. old value remains = %d", mmc_hostname(card->host), value, md->queue.no_pack_for_random); ret = -EINVAL; goto exit; } md->queue.no_pack_for_random = (value > 0) ? true : false; pr_debug("%s: no_pack_for_random: new value = %d", mmc_hostname(card->host), md->queue.no_pack_for_random); exit: mmc_blk_put(md); return ret; } static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); ret = 0; if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } } mutex_unlock(&block_mutex); return ret; } static int mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); return 0; } static int mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo->heads = 4; geo->sectors = 16; return 0; } struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; }; static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( struct mmc_ioc_cmd __user *user) { struct mmc_blk_ioc_data *idata; int err; idata = kzalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; } if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { err = -EFAULT; goto idata_err; } idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { err = -EOVERFLOW; goto idata_err; } if (!idata->buf_bytes) return idata; idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; goto idata_err; } if (copy_from_user(idata->buf, (void __user *)(unsigned long) idata->ic.data_ptr, idata->buf_bytes)) { err = -EFAULT; goto copy_err; } return idata; copy_err: kfree(idata->buf); idata_err: kfree(idata); out: return ERR_PTR(err); } struct scatterlist *mmc_blk_get_sg(struct mmc_card *card, unsigned char *buf, int *sg_len, int size) { struct scatterlist *sg; struct scatterlist *sl; int total_sec_cnt, sec_cnt; int max_seg_size, len; total_sec_cnt = size; max_seg_size = card->host->max_seg_size; len = (size - 1 + max_seg_size) / max_seg_size; sl = kmalloc(sizeof(struct scatterlist) * len, GFP_KERNEL); if (!sl) { return NULL; } sg = (struct scatterlist *)sl; sg_init_table(sg, len); while (total_sec_cnt) { if (total_sec_cnt < max_seg_size) sec_cnt = total_sec_cnt; else sec_cnt = max_seg_size; sg_set_page(sg, virt_to_page(buf), sec_cnt, offset_in_page(buf)); buf = buf + sec_cnt; total_sec_cnt = total_sec_cnt - sec_cnt; if (total_sec_cnt == 0) break; sg = sg_next(sg); } if (sg) sg_mark_end(sg); *sg_len = len; return sl; } static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { struct mmc_blk_ioc_data *idata; struct mmc_blk_data *md; struct mmc_card *card; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = {NULL}; struct scatterlist *sg = 0; int err = 0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the * whole block device, not on a partition. This prevents overspray * between sibling partitions. */ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) return -EPERM; idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; goto blk_err; } card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); goto cmd_done; } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; cmd.flags = idata->ic.flags; if (idata->buf_bytes) { int len; data.blksz = idata->ic.blksz; data.blocks = idata->ic.blocks; sg = mmc_blk_get_sg(card, idata->buf, &len, idata->buf_bytes); data.sg = sg; data.sg_len = len; if (idata->ic.write_flag) data.flags = MMC_DATA_WRITE; else data.flags = MMC_DATA_READ; /* data.flags must already be set before doing this. */ mmc_set_data_timeout(&data, card); /* Allow overriding the timeout_ns for empirical tuning. */ if (idata->ic.data_timeout_ns) data.timeout_ns = idata->ic.data_timeout_ns; if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Pretend this is a data transfer and rely on the * host driver to compute timeout. When all host * drivers support cmd.cmd_timeout for R1B, this * can be changed to: * * mrq.data = NULL; * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; */ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; } mrq.data = &data; } mrq.cmd = &cmd; mmc_claim_host(card->host); if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); if (err) goto cmd_rel_host; } mmc_wait_for_req(card->host, &mrq); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); err = cmd.error; goto cmd_rel_host; } if (data.error) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); err = data.error; goto cmd_rel_host; } /* * According to the SD specs, some commands require a delay after * issuing the command. */ if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { err = -EFAULT; goto cmd_rel_host; } if (!idata->ic.write_flag) { if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, idata->buf, idata->buf_bytes)) { err = -EFAULT; goto cmd_rel_host; } } cmd_rel_host: mmc_release_host(card->host); cmd_done: mmc_blk_put(md); if (sg) kfree(sg); blk_err: kfree(idata->buf); kfree(idata); return err; } static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; if (cmd == MMC_IOC_CMD) ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); return ret; } #ifdef CONFIG_COMPAT static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); } #endif static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, .ioctl = mmc_blk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mmc_blk_compat_ioctl, #endif }; static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { int ret; struct mmc_blk_data *main_md = mmc_get_drvdata(card); if (main_md->part_curr == md->part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) return ret; card->ext_csd.part_config = part_config; } main_md->part_curr = md->part_type; return 0; } static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 result; __be32 *blocks; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); mrq.cmd = &cmd; mrq.data = &data; blocks = kmalloc(4, GFP_KERNEL); if (!blocks) return (u32)-1; sg_init_one(&sg, blocks, 4); mmc_wait_for_req(card->host, &mrq); result = ntohl(*blocks); kfree(blocks); if (cmd.error || data.error) result = (u32)-1; return result; } static int send_stop(struct mmc_card *card, u32 *status) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_STOP_TRANSMISSION; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err == 0) *status = cmd.resp[0]; return err; } static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, retries); if (err == 0) *status = cmd.resp[0]; return err; } #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 #define ERR_CONTINUE 0 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, bool status_valid, u32 status) { switch (error) { case -EILSEQ: /* response crc error, retry the r/w cmd */ pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "response CRC error", name, status); return ERR_RETRY; case -ETIMEDOUT: pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "timed out", name, status); /* If the status cmd initially failed, retry the r/w cmd */ if (!status_valid) { pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name); return ERR_RETRY; } /* * If it was a r/w cmd crc error, or illegal command * (eg, issued in wrong state) then retry - we should * have corrected the state problem above. */ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name); return ERR_RETRY; } /* Otherwise abort the command */ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name); return ERR_ABORT; default: /* We don't understand the error code the driver gave us */ pr_err("%s: unknown error %d sending read/write command, card status %#x\n", req->rq_disk->disk_name, error, status); return ERR_ABORT; } } /* * Initial r/w and stop cmd error recovery. * We don't know whether the card received the r/w cmd or not, so try to * restore things back to a sane state. Essentially, we do this as follows: * - Obtain card status. If the first attempt to obtain card status fails, * the status word will reflect the failed status cmd, not the failed * r/w cmd. If we fail to obtain card status, it suggests we can no * longer communicate with the card. * - Check the card state. If the card received the cmd but there was a * transient problem with the response, it might still be in a data transfer * mode. Try to send it a stop command. If this fails, we can't recover. * - If the r/w cmd failed due to a response CRC error, it was probably * transient, so retry the cmd. * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or * illegal cmd, retry. * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; int err, retry; if (mmc_card_removed(card)) return ERR_NOMEDIUM; /* * Try to get card status which indicates both the card state * and why there was no response. If the first attempt fails, * we can't be sure the returned status is for the r/w command. */ for (retry = 2; retry >= 0; retry--) { err = get_card_status(card, &status, 0); if (!err) break; prev_cmd_status_valid = false; pr_err("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); } /* We couldn't get a response from the card. Give up. */ if (err) { /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; return ERR_ABORT; } /* Flag ECC errors */ if ((status & R1_CARD_ECC_FAILED) || (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; /* Flag General errors */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if ((status & R1_ERROR) || (brq->stop.resp[0] & R1_ERROR)) { pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0], status); *gen_err = 1; } /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { err = send_stop(card, &stop_status); if (err) pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); /* * If the stop cmd also timed out, the card is probably * not present, so abort. Other errors are bad news too. */ if (err) return ERR_ABORT; if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if (stop_status & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", req->rq_disk->disk_name, __func__, stop_status); *gen_err = 1; } } /* Check for set block count errors */ if (brq->sbc.error) return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, prev_cmd_status_valid, status); /* Check for r/w command errors */ if (brq->cmd.error) return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, prev_cmd_status_valid, status); /* Data errors */ if (!brq->stop.error) return ERR_CONTINUE; /* Now for stop errors. These aren't fatal to the transfer. */ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->stop.error, brq->cmd.resp[0], status); /* * Subsitute in our own stop status as this will give the error * state which happened during the execution of the r/w command. */ if (stop_status) { brq->stop.resp[0] = stop_status; brq->stop.error = 0; } return ERR_CONTINUE; } static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { int err; if (md->reset_done & type) return -EEXIST; md->reset_done |= type; err = mmc_hw_reset(host); /* Ensure we switch back to the correct partition */ if (err != -EOPNOTSUPP) { struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); int part_err; main_md->part_curr = main_md->part_type; part_err = mmc_blk_part_switch(host->card, md); if (part_err) { /* * We have failed to get back into the correct * partition, so we need to abort the whole request. */ return -ENODEV; } } return err; } static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) { md->reset_done &= ~type; } static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_DISCARD; if (!mmc_can_erase(card)) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (card->ext_csd.bkops_en) card->bkops_info.sectors_changed += blk_rq_sectors(req); if (mmc_can_discard(card)) arg = MMC_DISCARD_ARG; else if (mmc_can_trim(card)) arg = MMC_TRIM_ARG; else arg = MMC_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, 0); if (err) goto out; } err = mmc_erase(card, from, nr, arg); out: if (err == -EIO && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) arg = MMC_SECURE_TRIM1_ARG; else arg = MMC_SECURE_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, arg); if (err == -EIO) goto out_retry; if (err) goto out; if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); if (err == -EIO) goto out_retry; if (err) goto out; } out_retry: if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int err = 0; BUG_ON(!card); BUG_ON(!card->host); if (!(mmc_can_sanitize(card) && (card->host->caps2 & MMC_CAP2_SANITIZE))) { pr_warning("%s: %s - SANITIZE is not supported\n", mmc_hostname(card->host), __func__); err = -EOPNOTSUPP; goto out; } pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", mmc_hostname(card->host), __func__); err = mmc_switch_ignore_timeout(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1, MMC_SANITIZE_REQ_TIMEOUT); if (err) pr_err("%s: %s - mmc_switch() with " "EXT_CSD_SANITIZE_START failed. err=%d\n", mmc_hostname(card->host), __func__, err); pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), __func__); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret = 0; ret = mmc_flush_cache(card); if (ret) ret = -EIO; blk_end_request_all(req, ret); return ret ? 0 : 1; } /* * Reformat current write as a reliable write, supporting * both legacy and the enhanced reliable write MMC cards. * In each transfer we'll handle only as much as a single * reliable write can handle, thus finish the request in * partial completions. */ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, struct mmc_card *card, struct request *req) { if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { /* Legacy mode imposes restrictions on transfers. */ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) brq->data.blocks = 1; if (brq->data.blocks > card->ext_csd.rel_sectors) brq->data.blocks = card->ext_csd.rel_sectors; else if (brq->data.blocks < card->ext_csd.rel_sectors) brq->data.blocks = 1; } } #define CMD_ERRORS \ (R1_OUT_OF_RANGE | /* Command argument out of range */ \ R1_ADDRESS_ERROR | /* Misaligned address */ \ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ R1_WP_VIOLATION | /* Tried to write to protected block */ \ R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ static int mmc_blk_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. * * cmd.error indicates a problem with the r/w command. No * data will have been transferred. * * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: return MMC_BLK_ABORT; case ERR_NOMEDIUM: return MMC_BLK_NOMEDIUM; case ERR_CONTINUE: break; } } /* * Check for errors relating to the execution of the * initial command - such as address errors. No data * has been transferred. */ if (brq->cmd.resp[0] & CMD_ERRORS) { pr_err("%s: r/w command failed, status = %#x\n", req->rq_disk->disk_name, brq->cmd.resp[0]); return MMC_BLK_ABORT; } /* * Everything else is either success, or a data error of some * kind. If it was a write, we may have transitioned to * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { u32 status; unsigned long timeout; /* Check stop command response */ if (brq->stop.resp[0] & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0]); gen_err = 1; } timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS); do { int err = get_card_status(card, &status, 5); if (err) { pr_err("%s: error %d requesting status\n", req->rq_disk->disk_name, err); return MMC_BLK_CMD_ERR; } if (status & R1_ERROR) { pr_err("%s: %s: general error sending status command, card status %#x\n", req->rq_disk->disk_name, __func__, status); gen_err = 1; } /* Timeout if the device never becomes ready for data * and never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state!"\ " %s %s\n", mmc_hostname(card->host), req->rq_disk->disk_name, __func__); return MMC_BLK_CMD_ERR; } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ } while (!(status & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(status) == R1_STATE_PRG)); } /* if general error occurs, retry the write operation. */ if (gen_err) { pr_warn("%s: retrying write for general error\n", req->rq_disk->disk_name); return MMC_BLK_RETRY; } if (brq->data.error) { pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->data.error, (unsigned)blk_rq_pos(req), (unsigned)blk_rq_sectors(req), brq->cmd.resp[0], brq->stop.resp[0]); if (rq_data_dir(req) == READ) { if (ecc_err) return MMC_BLK_ECC_ERR; return MMC_BLK_DATA_ERR; } else { return MMC_BLK_CMD_ERR; } } if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; if (mq_mrq->packed_cmd != MMC_PACKED_NONE) { if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) return MMC_BLK_PARTIAL; else return MMC_BLK_SUCCESS; } if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } /* * mmc_blk_reinsert_req() - re-insert request back to the scheduler * @areq: request to re-insert. * * Request may be packed or single. When fails to reinsert request, it will be * requeued to the the dispatch queue. */ static void mmc_blk_reinsert_req(struct mmc_async_req *areq) { struct request *prq; int ret = 0; struct mmc_queue_req *mq_rq; struct request_queue *q; mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); q = mq_rq->req->q; if (mq_rq->packed_cmd != MMC_PACKED_NONE) { while (!list_empty(&mq_rq->packed_list)) { /* return requests in reverse order */ prq = list_entry_rq(mq_rq->packed_list.prev); list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); ret = blk_reinsert_request(q, prq); if (ret) { blk_requeue_request(q, prq); spin_unlock_irq(q->queue_lock); goto reinsert_error; } spin_unlock_irq(q->queue_lock); } } else { spin_lock_irq(q->queue_lock); ret = blk_reinsert_request(q, mq_rq->req); if (ret) blk_requeue_request(q, mq_rq->req); spin_unlock_irq(q->queue_lock); } return; reinsert_error: pr_err("%s: blk_reinsert_request() failed (%d)", mq_rq->req->rq_disk->disk_name, ret); /* * -EIO will be reported for this request and rest of packed_list. * Urgent request will be proceeded anyway, while upper layer * responsibility to re-send failed requests */ while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.next); list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); blk_requeue_request(q, prq); spin_unlock_irq(q->queue_lock); } } /* * mmc_blk_update_interrupted_req() - update of the stopped request * @card: the MMC card associated with the request. * @areq: interrupted async request. * * Get stopped request state from card and update successfully done part of * the request by setting packed_fail_idx. The packed_fail_idx is index of * first uncompleted request in packed request list, for non-packed request * packed_fail_idx remains unchanged. * * Returns: MMC_BLK_SUCCESS for success, MMC_BLK_ABORT otherwise */ static int mmc_blk_update_interrupted_req(struct mmc_card *card, struct mmc_async_req *areq) { int ret = MMC_BLK_SUCCESS; u8 *ext_csd; int correctly_done; struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); struct request *prq; u8 req_index = 0; if (mq_rq->packed_cmd == MMC_PACKED_NONE) return MMC_BLK_SUCCESS; ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) return MMC_BLK_ABORT; /* get correctly programmed sectors number from card */ ret = mmc_send_ext_csd(card, ext_csd); if (ret) { pr_err("%s: error %d reading ext_csd\n", mmc_hostname(card->host), ret); ret = MMC_BLK_ABORT; goto exit; } correctly_done = card->ext_csd.data_sector_size * (ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 0] << 0 | ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 1] << 8 | ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 2] << 16 | ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 3] << 24); list_for_each_entry(prq, &mq_rq->packed_list, queuelist) { if ((correctly_done - (int)blk_rq_bytes(prq)) < 0) { /* prq is not successfull */ mq_rq->packed_fail_idx = req_index; break; } correctly_done -= blk_rq_bytes(prq); req_index++; } exit: kfree(ext_csd); return ret; } static int mmc_blk_packed_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); struct request *req = mq_rq->req; int err, check, status; u8 ext_csd[512]; mq_rq->packed_retries--; check = mmc_blk_err_check(card, areq); err = get_card_status(card, &status, 0); if (err) { pr_err("%s: error %d sending status command\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if (status & R1_EXCEPTION_EVENT) { err = mmc_send_ext_csd(card, ext_csd); if (err) { pr_err("%s: error %d sending ext_csd\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & EXT_CSD_PACKED_FAILURE) && (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_GENERIC_ERROR)) { if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_INDEXED_ERROR) { mq_rq->packed_fail_idx = ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; return MMC_BLK_PARTIAL; } } } return check; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, struct mmc_queue *mq) { u32 readcmd, writecmd; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * REQ_META accesses, and are supported only on MMCs. * * XXX: this really needs a good explanation of why REQ_META * is treated special. */ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || (req->cmd_flags & REQ_META)) && (rq_data_dir(req) == WRITE) && (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq->data.blocks = blk_rq_sectors(req); brq->data.fault_injected = false; /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests. */ if (brq->data.blocks > card->host->max_blk_count) brq->data.blocks = card->host->max_blk_count; if (brq->data.blocks > 1) { /* * After a read error, we redo the request one sector * at a time in order to accurately determine which * sectors can be read successfully. */ if (disable_multi) brq->data.blocks = 1; /* Some controllers can't do multiblock reads due to hw bugs */ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && rq_data_dir(req) == READ) brq->data.blocks = 1; } if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ if (!mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) brq->mrq.stop = &brq->stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq->mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ do_data_tag = (card->ext_csd.data_tag_unit_size) && (req->cmd_flags & REQ_META) && (rq_data_dir(req) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, * and avoid the final CMD12, as on an error condition * CMD12 (stop) needs to be sent anyway. This, coupled * with Auto-CMD23 enhancements provided by some * hosts, means that the complexity of dealing * with this is best left to the host. If CMD23 is * supported by card and host, we'll fill sbc in and let * the host deal with handling it correctly. This means * that for hosts that don't expose MMC_CAP_CMD23, no * change of behavior will be observed. * * N.B: Some MMC cards experience perf degradation. * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | (do_rel_wr ? (1 << 31) : 0) | (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); /* * Adjust the sg list so it is the same size as the * request. */ if (brq->data.blocks != blk_rq_sectors(req)) { int i, data_size = brq->data.blocks << 9; struct scatterlist *sg; for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { data_size -= sg->length; if (data_size <= 0) { sg->length += data_size; i++; break; } } brq->data.sg_len = i; } mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.cmd_flags = req->cmd_flags; mqrq->mmc_active.err_check = mmc_blk_err_check; mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req; mqrq->mmc_active.update_interrupted_req = mmc_blk_update_interrupted_req; mmc_queue_bounce_pre(mqrq); } /** * mmc_blk_disable_wr_packing() - disables packing mode * @mq: MMC queue. * */ void mmc_blk_disable_wr_packing(struct mmc_queue *mq) { if (mq) { mq->wr_packing_enabled = false; mq->num_of_potential_packed_wr_reqs = 0; } } EXPORT_SYMBOL(mmc_blk_disable_wr_packing); static int get_packed_trigger(int potential, struct mmc_card *card, struct request *req, int curr_trigger) { static int num_mean_elements = 1; static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN; unsigned int trigger = curr_trigger; unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes; /* scale down the upper bound to 75% */ pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4; /* * since the most common calls for this function are with small * potential write values and since we don't want these calls to affect * the packed trigger, set a lower bound and ignore calls with * potential lower than that bound */ if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND) return trigger; /* * this is to prevent integer overflow in the following calculation: * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm */ if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) { num_mean_elements = 1; mean_potential = PCKD_TRGR_INIT_MEAN_POTEN; } /* * get next mean value based on previous mean value and current * potential packed writes. Calculation is as follows: * mean_pot[i+1] = * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1) */ mean_potential *= num_mean_elements; /* * add num_mean_elements so that the division of two integers doesn't * lower mean_potential too much */ if (potential > mean_potential) mean_potential += num_mean_elements; mean_potential += potential; /* this is for gaining more precision when dividing two integers */ mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER; /* this completes the mean calculation */ mean_potential /= ++num_mean_elements; mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER; /* * if current potential packed writes is greater than the mean potential * then the heuristic is that the following workload will contain many * write requests, therefore we lower the packed trigger. In the * opposite case we want to increase the trigger in order to get less * packing events. */ if (potential >= mean_potential) trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ? PCKD_TRGR_LOWER_BOUND : trigger - 1; else trigger = (trigger >= pckd_trgr_upper_bound) ? pckd_trgr_upper_bound : trigger + 1; /* * an urgent read request indicates a packed list being interrupted * by this read, therefore we aim for less packing, hence the trigger * gets increased */ if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ)) trigger += PCKD_TRGR_URGENT_PENALTY; return trigger; } static void mmc_blk_write_packing_control(struct mmc_queue *mq, struct request *req) { struct mmc_host *host = mq->card->host; int data_dir; if (!(host->caps2 & MMC_CAP2_PACKED_WR)) return; /* * In case the packing control is not supported by the host, it should * not have an effect on the write packing. Therefore we have to enable * the write packing */ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) { mq->wr_packing_enabled = true; return; } if (!req || (req && (req->cmd_flags & REQ_FLUSH))) { if (mq->num_of_potential_packed_wr_reqs > mq->num_wr_reqs_to_start_packing) mq->wr_packing_enabled = true; mq->num_wr_reqs_to_start_packing = get_packed_trigger(mq->num_of_potential_packed_wr_reqs, mq->card, req, mq->num_wr_reqs_to_start_packing); mq->num_of_potential_packed_wr_reqs = 0; return; } data_dir = rq_data_dir(req); if (data_dir == READ) { mmc_blk_disable_wr_packing(mq); mq->num_wr_reqs_to_start_packing = get_packed_trigger(mq->num_of_potential_packed_wr_reqs, mq->card, req, mq->num_wr_reqs_to_start_packing); mq->num_of_potential_packed_wr_reqs = 0; mq->wr_packing_enabled = false; return; } else if (data_dir == WRITE) { mq->num_of_potential_packed_wr_reqs++; } if (mq->num_of_potential_packed_wr_reqs > mq->num_wr_reqs_to_start_packing) mq->wr_packing_enabled = true; } struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card) { if (!card) return NULL; return &card->wr_pack_stats; } EXPORT_SYMBOL(mmc_blk_get_packed_statistics); void mmc_blk_init_packed_statistics(struct mmc_card *card) { int max_num_of_packed_reqs = 0; if (!card || !card->wr_pack_stats.packing_events) return; max_num_of_packed_reqs = card->ext_csd.max_packed_writes; spin_lock(&card->wr_pack_stats.lock); memset(card->wr_pack_stats.packing_events, 0, (max_num_of_packed_reqs + 1) * sizeof(*card->wr_pack_stats.packing_events)); memset(&card->wr_pack_stats.pack_stop_reason, 0, sizeof(card->wr_pack_stats.pack_stop_reason)); card->wr_pack_stats.enabled = true; spin_unlock(&card->wr_pack_stats.lock); } EXPORT_SYMBOL(mmc_blk_init_packed_statistics); void print_mmc_packing_stats(struct mmc_card *card) { int i; int max_num_of_packed_reqs = 0; if ((!card) || (!card->wr_pack_stats.packing_events)) return; max_num_of_packed_reqs = card->ext_csd.max_packed_writes; spin_lock(&card->wr_pack_stats.lock); pr_info("%s: write packing statistics:\n", mmc_hostname(card->host)); for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) { if (card->wr_pack_stats.packing_events[i] != 0) pr_info("%s: Packed %d reqs - %d times\n", mmc_hostname(card->host), i, card->wr_pack_stats.packing_events[i]); } pr_info("%s: stopped packing due to the following reasons:\n", mmc_hostname(card->host)); if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) pr_info("%s: %d times: exceedmax num of segments\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]); if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]) pr_info("%s: %d times: exceeding the max num of sectors\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]); if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]) pr_info("%s: %d times: wrong data direction\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]); if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]) pr_info("%s: %d times: flush or discard\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]); if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]) pr_info("%s: %d times: empty queue\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]); if (card->wr_pack_stats.pack_stop_reason[REL_WRITE]) pr_info("%s: %d times: rel write\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[REL_WRITE]); if (card->wr_pack_stats.pack_stop_reason[THRESHOLD]) pr_info("%s: %d times: Threshold\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[THRESHOLD]); spin_unlock(&card->wr_pack_stats.lock); } EXPORT_SYMBOL(print_mmc_packing_stats); static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) { struct request_queue *q = mq->queue; struct mmc_card *card = mq->card; struct request *cur = req, *next = NULL; struct mmc_blk_data *md = mq->data; bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; unsigned int req_sectors = 0, phys_segments = 0; unsigned int max_blk_count, max_phys_segs; u8 put_back = 0; u8 max_packed_rw = 0; u8 reqs = 0; struct mmc_wr_pack_stats *stats = &card->wr_pack_stats; mmc_blk_clear_packed(mq->mqrq_cur); if (!(md->flags & MMC_BLK_CMD23) || !card->ext_csd.packed_event_en) goto no_packed; if (!mq->wr_packing_enabled) goto no_packed; if ((rq_data_dir(cur) == WRITE) && (card->host->caps2 & MMC_CAP2_PACKED_WR)) max_packed_rw = card->ext_csd.max_packed_writes; if (max_packed_rw == 0) goto no_packed; if (mmc_req_rel_wr(cur) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) goto no_packed; if (mmc_large_sec(card) && !IS_ALIGNED(blk_rq_sectors(cur), 8)) goto no_packed; max_blk_count = min(card->host->max_blk_count, card->host->max_req_size >> 9); if (unlikely(max_blk_count > 0xffff)) max_blk_count = 0xffff; max_phys_segs = queue_max_segments(q); req_sectors += blk_rq_sectors(cur); phys_segments += cur->nr_phys_segments; if (rq_data_dir(cur) == WRITE) { req_sectors++; phys_segments++; } spin_lock(&stats->lock); while (reqs < max_packed_rw - 1) { /* We should stop no-more packing its nopacked_period */ if ((card->host->caps2 & MMC_CAP2_ADAPT_PACKED) && time_is_after_jiffies(mq->nopacked_period)) break; spin_lock_irq(q->queue_lock); next = blk_fetch_request(q); spin_unlock_irq(q->queue_lock); if (!next) { MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE); break; } if (mmc_large_sec(card) && !IS_ALIGNED(blk_rq_sectors(next), 8)) { MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN); put_back = 1; break; } if (next->cmd_flags & REQ_DISCARD || next->cmd_flags & REQ_FLUSH) { MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD); put_back = 1; break; } if (rq_data_dir(cur) != rq_data_dir(next)) { MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR); put_back = 1; break; } if (mmc_req_rel_wr(next) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) { MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE); put_back = 1; break; } req_sectors += blk_rq_sectors(next); if (req_sectors > max_blk_count) { if (stats->enabled) stats->pack_stop_reason[EXCEEDS_SECTORS]++; put_back = 1; break; } phys_segments += next->nr_phys_segments; if (phys_segments > max_phys_segs) { MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS); put_back = 1; break; } if (mq->no_pack_for_random) { if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) != blk_rq_pos(next)) { MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM); put_back = 1; break; } } if (rq_data_dir(next) == WRITE) { mq->num_of_potential_packed_wr_reqs++; if (card->ext_csd.bkops_en) card->bkops_info.sectors_changed += blk_rq_sectors(next); } list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list); cur = next; reqs++; } if (put_back) { spin_lock_irq(q->queue_lock); blk_requeue_request(q, next); spin_unlock_irq(q->queue_lock); } if (stats->enabled) { if (reqs + 1 <= card->ext_csd.max_packed_writes) stats->packing_events[reqs + 1]++; if (reqs + 1 == max_packed_rw) MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD); } spin_unlock(&stats->lock); if (reqs > 0) { list_add(&req->queuelist, &mq->mqrq_cur->packed_list); mq->mqrq_cur->packed_num = ++reqs; mq->mqrq_cur->packed_retries = reqs; return reqs; } no_packed: mmc_blk_clear_packed(mq->mqrq_cur); return 0; } static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, struct mmc_queue *mq) { struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct request *prq; struct mmc_blk_data *md = mq->data; bool do_rel_wr, do_data_tag; u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr; u8 i = 1; mqrq->packed_cmd = MMC_PACKED_WRITE; mqrq->packed_blocks = 0; mqrq->packed_fail_idx = MMC_PACKED_N_IDX; memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr)); packed_cmd_hdr[0] = (mqrq->packed_num << 16) | (PACKED_CMD_WR << 8) | PACKED_CMD_VER; /* * Argument for each entry of packed group */ list_for_each_entry(prq, &mqrq->packed_list, queuelist) { do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* Argument of CMD23 */ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | blk_rq_sectors(prq); /* Argument of CMD18 or CMD25 */ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ? blk_rq_pos(prq) : blk_rq_pos(prq) << 9; mqrq->packed_blocks += blk_rq_sectors(prq); i++; } memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->mrq.sbc = &brq->sbc; brq->mrq.stop = &brq->stop; brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->data.blocks = mqrq->packed_blocks + 1; brq->data.flags |= MMC_DATA_WRITE; brq->data.fault_injected = false; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.cmd_flags = req->cmd_flags; /* * This is intended for packed commands tests usage - in case these * functions are not in use the respective pointers are NULL */ if (mq->err_check_fn) mqrq->mmc_active.err_check = mq->err_check_fn; else mqrq->mmc_active.err_check = mmc_blk_packed_err_check; if (mq->packed_test_fn) mq->packed_test_fn(mq->queue, mqrq); mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req; mqrq->mmc_active.update_interrupted_req = mmc_blk_update_interrupted_req; mmc_queue_bounce_pre(mqrq); } static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { struct mmc_queue_req *mq_rq; mq_rq = container_of(brq, struct mmc_queue_req, brq); /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * as reported by the controller (which might be less than * the real number of written sectors, but never more). */ if (mmc_card_sd(card)) { u32 blocks; if (!brq->data.fault_injected) { blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) ret = blk_end_request(req, 0, blocks << 9); } else ret = blk_end_request(req, 0, brq->data.bytes_xfered); } else { if (mq_rq->packed_cmd == MMC_PACKED_NONE) ret = blk_end_request(req, 0, brq->data.bytes_xfered); } return ret; } static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; int idx = mq_rq->packed_fail_idx, i = 0; int ret = 0; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.next); if (idx == i) { /* retry from error index */ mq_rq->packed_num -= idx; mq_rq->req = prq; ret = 1; if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) { list_del_init(&prq->queuelist); mmc_blk_clear_packed(mq_rq); } return ret; } list_del_init(&prq->queuelist); blk_end_request(prq, 0, blk_rq_bytes(prq)); i++; } mmc_blk_clear_packed(mq_rq); return ret; } static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.next); list_del_init(&prq->queuelist); blk_end_request(prq, -EIO, blk_rq_bytes(prq)); } mmc_blk_clear_packed(mq_rq); } static void mmc_blk_revert_packed_req(struct mmc_queue *mq, struct mmc_queue_req *mq_rq) { struct request *prq; struct request_queue *q = mq->queue; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.prev); if (prq->queuelist.prev != &mq_rq->packed_list) { list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); blk_requeue_request(mq->queue, prq); spin_unlock_irq(q->queue_lock); } else { list_del_init(&prq->queuelist); } } mmc_blk_clear_packed(mq_rq); } static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mq->mqrq_cur->brq; int ret = 1, disable_multi = 0, retry = 0, type; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; struct request *req; struct mmc_async_req *areq; const u8 packed_num = 2; u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; if (rqc) { if ((card->ext_csd.bkops_en) && (rq_data_dir(rqc) == WRITE)) card->bkops_info.sectors_changed += blk_rq_sectors(rqc); reqs = mmc_blk_prep_packed_list(mq, rqc); } do { if (rqc) { if (reqs >= packed_num) mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq); else mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); if (!areq) { if (status == MMC_BLK_NEW_REQUEST) mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; req = mq_rq->req; type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; mmc_queue_bounce_post(mq_rq); switch (status) { case MMC_BLK_URGENT: if (mq_rq->packed_cmd != MMC_PACKED_NONE) { /* complete successfully transmitted part */ if (mmc_blk_end_packed_req(mq_rq)) /* process for not transmitted part */ mmc_blk_reinsert_req(areq); } else { mmc_blk_reinsert_req(areq); } mq->flags |= MMC_QUEUE_URGENT_REQUEST; ret = 0; break; case MMC_BLK_URGENT_DONE: case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: /* * A block was successfully transferred. */ mmc_blk_reset_success(md, type); if (mq_rq->packed_cmd != MMC_PACKED_NONE) { ret = mmc_blk_end_packed_req(mq_rq); break; } else { ret = blk_end_request(req, 0, brq->data.bytes_xfered); } /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors * were returned by the host controller, it's a bug. */ if (status == MMC_BLK_SUCCESS && ret) { pr_err("%s BUG rq_tot %d d_xfer %d\n", __func__, blk_rq_bytes(req), brq->data.bytes_xfered); rqc = NULL; goto cmd_abort; } break; case MMC_BLK_CMD_ERR: ret = mmc_blk_cmd_err(md, card, brq, req, ret); if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_RETRY: if (retry++ < 5) break; /* Fall through */ case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_DATA_ERR: { int err; err = mmc_blk_reset(md, card->host, type); if (!err) break; if (err == -ENODEV || mq_rq->packed_cmd != MMC_PACKED_NONE) goto cmd_abort; /* Fall through */ } case MMC_BLK_ECC_ERR: if (brq->data.blocks > 1) { /* Redo read one sector at a time */ pr_warning("%s: retrying using single block read\n", req->rq_disk->disk_name); disable_multi = 1; break; } /* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ ret = blk_end_request(req, -EIO, brq->data.blksz); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; default: pr_err("%s:%s: Unhandled return value (%d)", req->rq_disk->disk_name, __func__, status); goto cmd_abort; } if (ret) { if (mq_rq->packed_cmd == MMC_PACKED_NONE) { /* * In case of a incomplete request * prepare it again and resend. */ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } else { if (!mq_rq->packed_retries) goto cmd_abort; mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } } } while (ret); return 1; cmd_abort: if (mq_rq->packed_cmd == MMC_PACKED_NONE) { if (mmc_card_removed(card)) req->cmd_flags |= REQ_QUIET; while (ret) ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); } else { mmc_blk_abort_packed_req(mq_rq); } start_new_req: if (rqc) { /* * If current request is packed, it needs to put back. */ if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) mmc_blk_revert_packed_req(mq, mq->mqrq_cur); mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); } return 0; } static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME if (mmc_bus_needs_resume(card->host)) { mmc_resume_bus(card->host); mmc_blk_set_blksize(md, card); } #endif if (req && !mq->mqrq_prev->req) { /* claim host only for the first request */ mmc_claim_host(card->host); if (card->ext_csd.bkops_en) mmc_stop_bkops(card); } ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { blk_end_request_all(req, -EIO); } ret = 0; goto out; } mmc_blk_write_packing_control(mq, req); mq->flags &= ~MMC_QUEUE_NEW_REQUEST; mq->flags &= ~MMC_QUEUE_URGENT_REQUEST; if (req && req->cmd_flags & REQ_SANITIZE) { /* complete ongoing async transfer before issuing sanitize */ if (card->host && card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_sanitize_rq(mq, req); } else if (req && req->cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); if (req->cmd_flags & REQ_SECURE) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); } else if (req && req->cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { ret = mmc_blk_issue_rw_rq(mq, req); } out: /* * packet burst is over, when one of the following occurs: * - no more requests and new request notification is not in progress * - urgent notification in progress and current request is not urgent * (all existing requests completed or reinserted to the block layer) */ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || ((mq->flags & MMC_QUEUE_URGENT_REQUEST) && !(mq->mqrq_cur->req->cmd_flags & REQ_URGENT))) { if (mmc_card_need_bkops(card)) mmc_start_bkops(card, false); /* release host only when there are no more requests */ mmc_release_host(card->host); } return ret; } static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || !(card->csd.cmdclass & CCC_BLOCK_WRITE); } static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct device *parent, sector_t size, bool default_ro, const char *subname, int area_type) { struct mmc_blk_data *md; int devidx, ret; unsigned int percentage = BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK; devidx = find_first_zero_bit(dev_use, max_devices); if (devidx >= max_devices) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; } /* * !subname implies we are creating main mmc_blk_data that will be * associated with mmc_card with mmc_set_drvdata. Due to device * partitions, devidx will not coincide with a per-physical card * index anymore so we keep track of a name index. */ if (!subname) { md->name_idx = find_first_zero_bit(name_use, max_devices); __set_bit(md->name_idx, name_use); } else md->name_idx = ((struct mmc_blk_data *) dev_to_disk(parent)->private_data)->name_idx; md->area_type = area_type; /* * Set the read-only status based on the supported commands * and the write protect switch. */ md->read_only = mmc_blk_readonly(card); md->disk = alloc_disk(perdev_minors); if (md->disk == NULL) { ret = -ENOMEM; goto err_kfree; } spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); if (ret) goto err_putdisk; md->queue.issue_fn = mmc_blk_issue_rq; md->queue.data = md; md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); md->disk->flags = GENHD_FL_EXT_DEVT; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: * * - be set for removable media with permanent block devices * - be unset for removable block devices with permanent media * * Since MMC block devices clearly fall under the second * case, we do not set GENHD_FL_REMOVABLE. Userspace * should use the block device creation/destruction hotplug * messages to tell when the card is present. */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%d%s", md->name_idx, subname ? subname : ""); blk_queue_logical_block_size(md->queue.queue, 512); set_capacity(md->disk, size); card->bkops_info.size_percentage_to_queue_delayed_work = percentage; card->bkops_info.min_sectors_to_queue_delayed_work = ((unsigned int)size * percentage) / 100; if (mmc_host_cmd23(card->host)) { if (mmc_card_mmc(card) || (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT && mmc_sd_card_uhs(card))) md->flags |= MMC_BLK_CMD23; } if (mmc_card_mmc(card) && md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } return md; err_putdisk: put_disk(md->disk); err_kfree: kfree(md); out: return ERR_PTR(ret); } static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) { sector_t size; struct mmc_blk_data *md; if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ size = card->ext_csd.sectors; } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ size = card->csd.capacity << (card->csd.read_blkbits - 9); } md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, MMC_BLK_DATA_AREA_MAIN); return md; } static int mmc_blk_alloc_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_type, sector_t size, bool default_ro, const char *subname, int area_type) { char cap_str[10]; struct mmc_blk_data *part_md; part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, subname, area_type); if (IS_ERR(part_md)) return PTR_ERR(part_md); part_md->part_type = part_type; list_add(&part_md->part, &md->part); string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s partition %u %s\n", part_md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), part_md->part_type, cap_str); return 0; } /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi * to provide access to the partition. */ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { int idx, ret = 0; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, card->part[idx].force_ro, card->part[idx].name, card->part[idx].area_type); if (ret) return ret; } } return ret; } static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { card = md->queue.card; device_remove_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); /* Stop new requests from getting into the queue */ del_gendisk(md->disk); } /* Then flush out any already in there */ mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } static void mmc_blk_remove_parts(struct mmc_card *card, struct mmc_blk_data *md) { struct list_head *pos, *q; struct mmc_blk_data *part_md; __clear_bit(md->name_idx, name_use); list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); mmc_blk_remove_req(part_md); } } static int mmc_add_disk(struct mmc_blk_data *md) { int ret; struct mmc_card *card = md->queue.card; add_disk(md->disk); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); md->force_ro.attr.name = "force_ro"; md->force_ro.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); if (ret) goto force_ro_fail; if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { umode_t mode; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) mode = S_IRUGO; else mode = S_IRUGO | S_IWUSR; md->power_ro_lock.show = power_ro_lock_show; md->power_ro_lock.store = power_ro_lock_store; sysfs_attr_init(&md->power_ro_lock.attr); md->power_ro_lock.attr.mode = mode; md->power_ro_lock.attr.name = "ro_lock_until_next_power_on"; ret = device_create_file(disk_to_dev(md->disk), &md->power_ro_lock); if (ret) goto power_ro_lock_fail; } md->num_wr_reqs_to_start_packing.show = num_wr_reqs_to_start_packing_show; md->num_wr_reqs_to_start_packing.store = num_wr_reqs_to_start_packing_store; sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr); md->num_wr_reqs_to_start_packing.attr.name = "num_wr_reqs_to_start_packing"; md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); if (ret) goto num_wr_reqs_to_start_packing_fail; md->bkops_check_threshold.show = bkops_check_threshold_show; md->bkops_check_threshold.store = bkops_check_threshold_store; sysfs_attr_init(&md->bkops_check_threshold.attr); md->bkops_check_threshold.attr.name = "bkops_check_threshold"; md->bkops_check_threshold.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->bkops_check_threshold); if (ret) goto bkops_check_threshold_fails; md->no_pack_for_random.show = no_pack_for_random_show; md->no_pack_for_random.store = no_pack_for_random_store; sysfs_attr_init(&md->no_pack_for_random.attr); md->no_pack_for_random.attr.name = "no_pack_for_random"; md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->no_pack_for_random); if (ret) goto no_pack_for_random_fails; return ret; no_pack_for_random_fails: device_remove_file(disk_to_dev(md->disk), &md->bkops_check_threshold); bkops_check_threshold_fails: device_remove_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); num_wr_reqs_to_start_packing_fail: device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); power_ro_lock_fail: device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: del_gendisk(md->disk); return ret; } #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), /* * Some MMC cards experience performance degradation with CMD23 * instead of CMD12-bounded multiblock transfers. For now we'll * black list what's bad... * - Certain Toshiba cards. * * N.B. This doesn't affect SD cards. */ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), /* * Some Micron MMC cards needs longer data read timeout than * indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), /* Some TLC movinand cards needs Sync operation for performance*/ MMC_FIXUP("S5U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("J5U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("J5U00B", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("J5U00A", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("L7U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("N5U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("K5U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("K5U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("K7U00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("M4G1YC", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("M8G1WA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("MAG2WA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), MMC_FIXUP("MBG4WA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_MOVINAND_TLC), /* Some INAND MCP devices advertise incorrect timeout values */ MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_INAND_DATA_TIMEOUT), END_FIXUP }; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; char cap_str[10]; /* * Check that the card supports the command class(es) we need. */ if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); if (mmc_blk_alloc_parts(card, md)) goto out; mmc_set_drvdata(card, md); mmc_fixup_device(card, blk_fixups); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 1); #endif if (mmc_add_disk(md)) goto out; list_for_each_entry(part_md, &md->part, part) { if (mmc_add_disk(part_md)) goto out; } return 0; out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); return 0; } static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); mmc_blk_remove_parts(card, md); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 0); #endif } #ifdef CONFIG_PM static int mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); int rc = 0; if (md) { rc = mmc_queue_suspend(&md->queue); if (rc) goto out; list_for_each_entry(part_md, &md->part, part) { rc = mmc_queue_suspend(&part_md->queue); if (rc) goto out_resume; } } goto out; out_resume: mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } out: return rc; } static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { /* * Resume involves the card going into idle state, * so current partition is always the main one. */ md->part_curr = md->part_type; mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } } return 0; } #else #define mmc_blk_suspend NULL #define mmc_blk_resume NULL #endif static struct mmc_driver mmc_driver = { .drv = { .name = "mmcblk", }, .probe = mmc_blk_probe, .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, }; static int __init mmc_blk_init(void) { int res; if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) pr_info("mmcblk: using %d minors per device\n", perdev_minors); max_devices = 256 / perdev_minors; res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) goto out; res = mmc_register_driver(&mmc_driver); if (res) goto out2; return 0; out2: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); out: return res; } static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); } module_init(mmc_blk_init); module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
mahound/Cyanogenmod_kernel_samsung_loganreltexx
drivers/mmc/card/block.c
C
gpl-2.0
81,469
/* * drivers/media/video/msm/ov5642_reg_globaloptics.c * * Refer to drivers/media/video/msm/mt9d112_reg.c * For IC OV5642 of Module GLOBALOPTICS: 5.0Mp 1/4-Inch System-On-A-Chip (SOC) CMOS Digital Image Sensor * * Copyright (C) 2009-2010 ZTE Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <linux/delay.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <media/msm_camera.h> #include <mach/gpio.h> #include "mt9d113.h" #define MT9D113_SENSOR_PROBE_INIT #ifdef MT9D113_SENSOR_PROBE_INIT #define MT9D113_PROBE_WORKQUEUE #endif #if defined(MT9D113_PROBE_WORKQUEUE) #include <linux/workqueue.h> static struct platform_device *pdev_wq = NULL; static struct workqueue_struct *mt9d113_wq = NULL; static void mt9d113_workqueue(struct work_struct *work); static DECLARE_WORK(mt9d113_cb_work, mt9d113_workqueue); #endif #define MT9D113_CAMIO_MCLK 24000000 #define MT9D113_I2C_BOARD_NAME "mt9d113" #define MT9D113_I2C_BUS_ID (0) #define MT9D113_SLAVE_WR_ADDR 0x78 #define MT9D113_SLAVE_RD_ADDR 0x79 #define REG_MT9D113_MODEL_ID 0x0000 #define MT9D113_MODEL_ID 0x2580 #define REG_MT9D113_MODEL_ID_SUB 0x31FE #define MT9D113_MODEL_ID_SUB 0x0003 #define REG_MT9D113_STANDBY_CONTROL 0x0018 #if defined(CONFIG_MACH_R750) || defined(CONFIG_MACH_JOE) #define MT9D113_GPIO_SWITCH_CTL 39 #define MT9D113_GPIO_SWITCH_VAL 0 #else #undef MT9D113_GPIO_SWITCH_CTL #undef MT9D113_GPIO_SWITCH_VAL #endif struct mt9d113_work_t { struct work_struct work; }; struct mt9d113_ctrl_t { const struct msm_camera_sensor_info *sensordata; }; static struct mt9d113_work_t *mt9d113_sensorw = NULL; static struct i2c_client *mt9d113_client = NULL; static struct mt9d113_ctrl_t *mt9d113_ctrl = NULL; static uint16_t model_id; DECLARE_MUTEX(mt9d113_sem); static struct wake_lock mt9d113_wake_lock; static int mt9d113_sensor_init(const struct msm_camera_sensor_info *data); static int mt9d113_sensor_config(void __user *argp); static int mt9d113_sensor_release(void); static int mt9d113_sensor_release_internal(void); static int32_t mt9d113_i2c_add_driver(void); static void mt9d113_i2c_del_driver(void); extern int32_t msm_camera_power_backend(enum msm_camera_pwr_mode_t pwr_mode); extern int msm_camera_clk_switch(const struct msm_camera_sensor_info *data, uint32_t gpio_switch, uint32_t switch_val); #ifdef CONFIG_ZTE_PLATFORM #ifdef CONFIG_ZTE_FTM_FLAG_SUPPORT extern int zte_get_ftm_flag(void); #endif #endif static inline void mt9d113_init_suspend(void) { CDBG("%s: entry\n", __func__); wake_lock_init(&mt9d113_wake_lock, WAKE_LOCK_IDLE, "mt9d113"); } static inline void mt9d113_deinit_suspend(void) { CDBG("%s: entry\n", __func__); wake_lock_destroy(&mt9d113_wake_lock); } static inline void mt9d113_prevent_suspend(void) { CDBG("%s: entry\n", __func__); wake_lock(&mt9d113_wake_lock); } static inline void mt9d113_allow_suspend(void) { CDBG("%s: entry\n", __func__); wake_unlock(&mt9d113_wake_lock); } static int mt9d113_hard_standby(const struct msm_camera_sensor_info *dev, uint32_t on) { int rc; CDBG("%s: entry\n", __func__); rc = gpio_request(dev->sensor_pwd, "mt9d113"); if (0 == rc) { rc = gpio_direction_output(dev->sensor_pwd, on); mdelay(10); } gpio_free(dev->sensor_pwd); return rc; } static int mt9d113_hard_reset(const struct msm_camera_sensor_info *dev) { int rc = 0; CDBG("%s: entry\n", __func__); rc = gpio_request(dev->sensor_reset, "mt9d113"); if (0 == rc) { rc = gpio_direction_output(dev->sensor_reset, 1); mdelay(10); rc = gpio_direction_output(dev->sensor_reset, 0); mdelay(10); rc = gpio_direction_output(dev->sensor_reset, 1); mdelay(10); } gpio_free(dev->sensor_reset); return rc; } static int32_t mt9d113_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(mt9d113_client->adapter, msg, 1) < 0) { CCRT("%s: failed!\n", __func__); return -EIO; } return 0; } static int32_t mt9d113_i2c_write(unsigned short saddr, unsigned short waddr, unsigned short wdata, enum mt9d113_width_t width) { int32_t rc = -EFAULT; unsigned char buf[4]; memset(buf, 0, sizeof(buf)); switch (width) { case WORD_LEN: { buf[0] = (waddr & 0xFF00) >> 8; buf[1] = (waddr & 0x00FF); buf[2] = (wdata & 0xFF00) >> 8; buf[3] = (wdata & 0x00FF); rc = mt9d113_i2c_txdata(saddr, buf, 4); } break; case BYTE_LEN: { buf[0] = waddr; buf[1] = wdata; rc = mt9d113_i2c_txdata(saddr, buf, 2); } break; default: { rc = -EFAULT; } break; } if (rc < 0) { CCRT("%s: waddr = 0x%x, wdata = 0x%x, failed!\n", __func__, waddr, wdata); } return rc; } static int32_t mt9d113_i2c_write_table(struct mt9d113_i2c_reg_conf const *reg_conf_tbl, int len) { uint32_t i; int32_t rc = 0; #ifdef MT9D113_SENSOR_PROBE_INIT for (i = 0; i < len; i++) { rc = mt9d113_i2c_write(mt9d113_client->addr, reg_conf_tbl[i].waddr, reg_conf_tbl[i].wdata, reg_conf_tbl[i].width); if (rc < 0) { break; } if (reg_conf_tbl[i].mdelay_time != 0) { mdelay(reg_conf_tbl[i].mdelay_time); } if (0x00 == (!(i | 0xFFFFFFE0) && 0x0F)) { mdelay(1); } } #else if(reg_conf_tbl == mt9d113_regs.prevsnap_tbl) { for (i = 0; i < len; i++) { rc = mt9d113_i2c_write(mt9d113_client->addr, reg_conf_tbl[i].waddr, reg_conf_tbl[i].wdata, reg_conf_tbl[i].width); if (rc < 0) { break; } if (reg_conf_tbl[i].mdelay_time != 0) { mdelay(reg_conf_tbl[i].mdelay_time); } if ((i < (len >> 6)) && (0x00 == (!(i | 0xFFFFFFE0) && 0x0F))) { mdelay(1); } } } else { for (i = 0; i < len; i++) { rc = mt9d113_i2c_write(mt9d113_client->addr, reg_conf_tbl[i].waddr, reg_conf_tbl[i].wdata, reg_conf_tbl[i].width); if (rc < 0) { break; } if (reg_conf_tbl[i].mdelay_time != 0) { mdelay(reg_conf_tbl[i].mdelay_time); } } } #endif return rc; } static int mt9d113_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, int length) { struct i2c_msg msgs[] = { { .addr = saddr, .flags = 0, .len = 2, .buf = rxdata, }, { .addr = saddr, .flags = I2C_M_RD, .len = length, .buf = rxdata, }, }; if (i2c_transfer(mt9d113_client->adapter, msgs, 2) < 0) { CCRT("%s: failed!\n", __func__); return -EIO; } return 0; } static int32_t mt9d113_i2c_read(unsigned short saddr, unsigned short raddr, unsigned short *rdata, enum mt9d113_width_t width) { int32_t rc = 0; unsigned char buf[4]; if (!rdata) { CCRT("%s: rdata points to NULL!\n", __func__); return -EIO; } memset(buf, 0, sizeof(buf)); switch (width) { case WORD_LEN: { buf[0] = (raddr & 0xFF00) >> 8; buf[1] = (raddr & 0x00FF); rc = mt9d113_i2c_rxdata(saddr, buf, 2); if (rc < 0) { return rc; } *rdata = buf[0] << 8 | buf[1]; } break; default: { rc = -EFAULT; } break; } if (rc < 0) { CCRT("%s: failed!\n", __func__); } return rc; } static int32_t __attribute__((unused)) mt9d113_af_trigger(void) { CDBG("%s: not supported!\n", __func__); return 0; } static int32_t mt9d113_set_wb(int8_t wb_mode) { int32_t rc = 0; CDBG("%s: entry: wb_mode=%d\n", __func__, wb_mode); switch (wb_mode) { case CAMERA_WB_MODE_AWB: { rc = mt9d113_i2c_write_table(mt9d113_regs.wb_auto_tbl, mt9d113_regs.wb_auto_tbl_sz); } break; case CAMERA_WB_MODE_SUNLIGHT: { rc = mt9d113_i2c_write_table(mt9d113_regs.wb_daylight_tbl, mt9d113_regs.wb_daylight_tbl_sz); } break; case CAMERA_WB_MODE_INCANDESCENT: { rc = mt9d113_i2c_write_table(mt9d113_regs.wb_incandescent_tbl, mt9d113_regs.wb_incandescent_tbl_sz); } break; case CAMERA_WB_MODE_FLUORESCENT: { rc = mt9d113_i2c_write_table(mt9d113_regs.wb_flourescant_tbl, mt9d113_regs.wb_flourescant_tbl_sz); } break; case CAMERA_WB_MODE_CLOUDY: { rc = mt9d113_i2c_write_table(mt9d113_regs.wb_cloudy_tbl, mt9d113_regs.wb_cloudy_tbl_sz); } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } mdelay(100); return rc; } static int32_t mt9d113_set_contrast(int8_t contrast) { int32_t rc = 0; CDBG("%s: entry: contrast=%d\n", __func__, contrast); switch (contrast) { case CAMERA_CONTRAST_0: { rc = mt9d113_i2c_write_table(mt9d113_regs.contrast_tbl[0], mt9d113_regs.contrast_tbl_sz[0]); } break; case CAMERA_CONTRAST_1: { rc = mt9d113_i2c_write_table(mt9d113_regs.contrast_tbl[1], mt9d113_regs.contrast_tbl_sz[1]); } break; case CAMERA_CONTRAST_2: { rc = mt9d113_i2c_write_table(mt9d113_regs.contrast_tbl[2], mt9d113_regs.contrast_tbl_sz[2]); } break; case CAMERA_CONTRAST_3: { rc = mt9d113_i2c_write_table(mt9d113_regs.contrast_tbl[3], mt9d113_regs.contrast_tbl_sz[3]); } break; case CAMERA_CONTRAST_4: { rc = mt9d113_i2c_write_table(mt9d113_regs.contrast_tbl[4], mt9d113_regs.contrast_tbl_sz[4]); } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } mdelay(100); return rc; } static int32_t mt9d113_set_brightness(int8_t brightness) { int32_t rc = 0; CCRT("%s: entry: brightness=%d\n", __func__, brightness); switch (brightness) { case CAMERA_BRIGHTNESS_0: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[0], mt9d113_regs.brightness_tbl_sz[0]); } break; case CAMERA_BRIGHTNESS_1: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[1], mt9d113_regs.brightness_tbl_sz[1]); } break; case CAMERA_BRIGHTNESS_2: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[2], mt9d113_regs.brightness_tbl_sz[2]); } break; case CAMERA_BRIGHTNESS_3: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[3], mt9d113_regs.brightness_tbl_sz[3]); } break; case CAMERA_BRIGHTNESS_4: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[4], mt9d113_regs.brightness_tbl_sz[4]); } break; case CAMERA_BRIGHTNESS_5: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[5], mt9d113_regs.brightness_tbl_sz[5]); } break; case CAMERA_BRIGHTNESS_6: { rc = mt9d113_i2c_write_table(mt9d113_regs.brightness_tbl[6], mt9d113_regs.brightness_tbl_sz[6]); } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } return rc; } static int32_t mt9d113_set_saturation(int8_t saturation) { int32_t rc = 0; CCRT("%s: entry: saturation=%d\n", __func__, saturation); switch (saturation) { case CAMERA_SATURATION_0: { rc = mt9d113_i2c_write_table(mt9d113_regs.saturation_tbl[0], mt9d113_regs.saturation_tbl_sz[0]); } break; case CAMERA_SATURATION_1: { rc = mt9d113_i2c_write_table(mt9d113_regs.saturation_tbl[1], mt9d113_regs.saturation_tbl_sz[1]); } break; case CAMERA_SATURATION_2: { rc = mt9d113_i2c_write_table(mt9d113_regs.saturation_tbl[2], mt9d113_regs.saturation_tbl_sz[2]); } break; case CAMERA_SATURATION_3: { rc = mt9d113_i2c_write_table(mt9d113_regs.saturation_tbl[3], mt9d113_regs.saturation_tbl_sz[3]); } break; case CAMERA_SATURATION_4: { rc = mt9d113_i2c_write_table(mt9d113_regs.saturation_tbl[4], mt9d113_regs.saturation_tbl_sz[4]); } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } mdelay(100); return rc; } static int32_t mt9d113_set_sharpness(int8_t sharpness) { int32_t rc = 0; CDBG("%s: entry: sharpness=%d\n", __func__, sharpness); switch (sharpness) { case CAMERA_SHARPNESS_0: { rc = mt9d113_i2c_write_table(mt9d113_regs.sharpness_tbl[0], mt9d113_regs.sharpness_tbl_sz[0]); } break; case CAMERA_SHARPNESS_1: { rc = mt9d113_i2c_write_table(mt9d113_regs.sharpness_tbl[1], mt9d113_regs.sharpness_tbl_sz[1]); } break; case CAMERA_SHARPNESS_2: { rc = mt9d113_i2c_write_table(mt9d113_regs.sharpness_tbl[2], mt9d113_regs.sharpness_tbl_sz[2]); } break; case CAMERA_SHARPNESS_3: { rc = mt9d113_i2c_write_table(mt9d113_regs.sharpness_tbl[3], mt9d113_regs.sharpness_tbl_sz[3]); } break; case CAMERA_SHARPNESS_4: { rc = mt9d113_i2c_write_table(mt9d113_regs.sharpness_tbl[4], mt9d113_regs.sharpness_tbl_sz[4]); } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } return rc; } static int32_t mt9d113_set_iso(int8_t iso_val) { int32_t rc = 0; CDBG("%s: entry: iso_val=%d\n", __func__, iso_val); switch (iso_val) { case CAMERA_ISO_SET_AUTO: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20D, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0020, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0090, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0006, WORD_LEN); if (rc < 0) { return rc; } mdelay(200); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20D, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0020, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0090, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ISO_SET_HJR: { CCRT("%s: not supported!\n", __func__); rc = -EFAULT; } break; case CAMERA_ISO_SET_100: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20D, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0020, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0028, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ISO_SET_200: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20D, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0040, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0048, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ISO_SET_400: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20D, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0050, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA20E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0080, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ISO_SET_800: { CCRT("%s: not supported!\n", __func__); rc = -EFAULT; } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } mdelay(100); return rc; } static int32_t mt9d113_set_antibanding(int8_t antibanding) { int32_t rc = 0; CDBG("%s: entry: antibanding=%d\n", __func__, antibanding); switch (antibanding) { case CAMERA_ANTIBANDING_SET_OFF: { CCRT("%s: CAMERA_ANTIBANDING_SET_OFF NOT supported!\n", __func__); } break; case CAMERA_ANTIBANDING_SET_60HZ: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA118, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA11E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA124, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA12A, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA404, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x00A0, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ANTIBANDING_SET_50HZ: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA118, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA11E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA124, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA12A, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA404, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x00E0, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_ANTIBANDING_SET_AUTO: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA118, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0001, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA11E, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0001, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA124, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0000, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA12A, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0001, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; default: { CCRT("%s: parameter error!\n", __func__); rc = -EFAULT; } } mdelay(100); return rc; } static int32_t __attribute__((unused))mt9d113_set_lensshading(int8_t lensshading) { #if 0 int32_t rc = 0; uint16_t brightness_lev = 0; CDBG("%s: entry: lensshading=%d\n", __func__, lensshading); if (0 == lensshading) { CCRT("%s: lens shading is disabled!\n", __func__); return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098E, 0x3835, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0990, &brightness_lev, WORD_LEN); if (rc < 0) { return rc; } if (brightness_lev < 5) { rc = mt9d113_i2c_write_table(mt9d113_regs.lens_for_outdoor_tbl, mt9d113_regs.lens_for_outdoor_tbl_sz); if (rc < 0) { return rc; } } else { rc = mt9d113_i2c_write_table(mt9d113_regs.lens_for_indoor_tbl, mt9d113_regs.lens_for_indoor_tbl_sz); if (rc < 0) { return rc; } } return rc; #else return 0; #endif } static long mt9d113_set_exposure_compensation(int8_t exposure) { long rc = 0; CDBG("%s: entry: exposure=%d\n", __func__, exposure); switch(exposure) { case CAMERA_EXPOSURE_0: { } break; case CAMERA_EXPOSURE_1: { } break; case CAMERA_EXPOSURE_2: { } break; case CAMERA_EXPOSURE_3: { } break; case CAMERA_EXPOSURE_4: { } break; default: { CCRT("%s: parameter error!\n", __func__); return -EFAULT; } } return rc; } static long mt9d113_reg_init(void) { long rc; CDBG("%s: entry\n", __func__); rc = mt9d113_i2c_write_table(mt9d113_regs.prevsnap_tbl, mt9d113_regs.prevsnap_tbl_sz); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, REG_MT9D113_STANDBY_CONTROL, 0x0028, WORD_LEN); if (rc < 0) { return rc; } mdelay(10); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0006, WORD_LEN); if (rc < 0) { return rc; } mdelay(300); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } return 0; } static long mt9d113_set_sensor_mode(int32_t mode) { long rc = 0; CDBG("%s: entry\n", __func__); switch (mode) { case SENSOR_PREVIEW_MODE: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA115, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0000, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0001, WORD_LEN); if (rc < 0) { return rc; } mdelay(80); } break; case SENSOR_SNAPSHOT_MODE: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA115, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); if (rc < 0) { return rc; } } break; default: { return -EFAULT; } } return 0; } static long mt9d113_set_effect(int32_t mode, int32_t effect) { uint16_t __attribute__((unused)) reg_addr; uint16_t __attribute__((unused)) reg_val; long rc = 0; switch (mode) { case SENSOR_PREVIEW_MODE: { } break; case SENSOR_SNAPSHOT_MODE: { } break; default: { } break; } switch (effect) { case CAMERA_EFFECT_OFF: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2759, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6440, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x275B, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6440, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_EFFECT_MONO: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2759, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6441, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x275B, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6441, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_EFFECT_NEGATIVE: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2759, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6443, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x275B, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6443, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_EFFECT_SOLARIZE: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2759, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6444, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x275B, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6444, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; case CAMERA_EFFECT_SEPIA: { rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2763, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0xB023, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x2759, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6442, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0x275B, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x6442, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) { return rc; } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0005, WORD_LEN); if (rc < 0) { return rc; } } break; default: { return -EFAULT; } } mdelay(100); return rc; } static long mt9d113_power_up(void) { CDBG("%s: not supported!\n", __func__); return 0; } static long mt9d113_power_down(void) { CDBG("%s: not supported!\n", __func__); return 0; } #if 0 static int mt9d113_power_shutdown(uint32_t on) { int rc; CDBG("%s: entry\n", __func__); rc = gpio_request(MT9D113_GPIO_SHUTDOWN_CTL, "mt9d113"); if (0 == rc) { rc = gpio_direction_output(MT9D113_GPIO_SHUTDOWN_CTL, on); mdelay(1); } gpio_free(MT9D113_GPIO_SHUTDOWN_CTL); return rc; } #endif #if !defined(CONFIG_SENSOR_ADAPTER) static int mt9d113_sensor_init_probe(const struct msm_camera_sensor_info *data) { uint32_t switch_on; int rc = 0; CDBG("%s: entry\n", __func__); #if 0 switch_on = 0; rc = mt9d113_power_shutdown(switch_on); if (rc < 0) { CCRT("enter/quit lowest-power mode failed!\n"); goto init_probe_fail; } #endif switch_on = 0; rc = mt9d113_hard_standby(data, switch_on); if (rc < 0) { CCRT("set standby failed!\n"); goto init_probe_fail; } rc = mt9d113_hard_reset(data); if (rc < 0) { CCRT("hard reset failed!\n"); goto init_probe_fail; } model_id = 0x0000; rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID, &model_id, WORD_LEN); if (rc < 0) { goto init_probe_fail; } CDBG("%s: model_id = 0x%x\n", __func__, model_id); #ifdef CONFIG_SENSOR_INFO msm_sensorinfo_set_sensor_id(model_id); #else #endif if (model_id != MT9D113_MODEL_ID) { rc = -EFAULT; goto init_probe_fail; } rc = mt9d113_i2c_write_table(mt9d113_regs.pll_tbl, mt9d113_regs.pll_tbl_sz); if (rc < 0) { return rc; } model_id = 0x0000; rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID_SUB, &model_id, WORD_LEN); if (rc < 0) { goto init_probe_fail; } CDBG("%s: model_id_sub = 0x%x\n", __func__, model_id); #ifdef CONFIG_SENSOR_INFO msm_sensorinfo_set_sensor_id(model_id); #else #endif if (model_id != MT9D113_MODEL_ID_SUB) { rc = -EFAULT; goto init_probe_fail; } rc = mt9d113_reg_init(); if (rc < 0) { goto init_probe_fail; } return rc; init_probe_fail: CCRT("%s: rc = %d, failed!\n", __func__, rc); return rc; } #else static int mt9d113_sensor_i2c_probe_on(void) { int rc; struct i2c_board_info info; struct i2c_adapter *adapter; struct i2c_client *client; rc = mt9d113_i2c_add_driver(); if (rc < 0) { CCRT("%s: add i2c driver failed!\n", __func__); return rc; } memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = MT9D113_SLAVE_WR_ADDR >> 1; strlcpy(info.type, MT9D113_I2C_BOARD_NAME, I2C_NAME_SIZE); adapter = i2c_get_adapter(MT9D113_I2C_BUS_ID); if (!adapter) { CCRT("%s: get i2c adapter failed!\n", __func__); goto i2c_probe_failed; } client = i2c_new_device(adapter, &info); i2c_put_adapter(adapter); if (!client) { CCRT("%s: add i2c device failed!\n", __func__); goto i2c_probe_failed; } mt9d113_client = client; return 0; i2c_probe_failed: mt9d113_i2c_del_driver(); return -ENODEV; } static void mt9d113_sensor_i2c_probe_off(void) { i2c_unregister_device(mt9d113_client); mt9d113_i2c_del_driver(); } static int mt9d113_sensor_dev_probe(const struct msm_camera_sensor_info *pinfo) { int rc; rc = msm_camera_power_backend(MSM_CAMERA_PWRUP_MODE); if (rc < 0) { CCRT("%s: camera_power_backend failed!\n", __func__); return rc; } #if defined(CONFIG_MACH_R750) || defined(CONFIG_MACH_JOE) rc = msm_camera_clk_switch(pinfo, MT9D113_GPIO_SWITCH_CTL, MT9D113_GPIO_SWITCH_VAL); if (rc < 0) { CCRT("%s: camera_clk_switch failed!\n", __func__); return rc;; } #else #endif msm_camio_clk_rate_set(MT9D113_CAMIO_MCLK); mdelay(5); rc = mt9d113_hard_standby(pinfo, 0); if (rc < 0) { CCRT("set standby failed!\n"); return rc; } rc = mt9d113_hard_reset(pinfo); if (rc < 0) { CCRT("hard reset failed!\n"); return rc; } model_id = 0x0000; rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID, &model_id, WORD_LEN); if (rc < 0) { return rc; } CDBG("%s: model_id = 0x%x\n", __func__, model_id); #ifdef CONFIG_SENSOR_INFO msm_sensorinfo_set_sensor_id(model_id); #else #endif if (model_id != MT9D113_MODEL_ID) { return -EFAULT; } rc = mt9d113_i2c_write_table(mt9d113_regs.pll_tbl, mt9d113_regs.pll_tbl_sz); if (rc < 0) { return rc; } model_id = 0x0000; rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID_SUB, &model_id, WORD_LEN); if (rc < 0) { return rc; } CDBG("%s: model_id_sub = 0x%x\n", __func__, model_id); #ifdef CONFIG_SENSOR_INFO msm_sensorinfo_set_sensor_id(model_id); #else #endif if (model_id != MT9D113_MODEL_ID_SUB) { return -EFAULT; } return 0; } #endif static int mt9d113_sensor_probe_init(const struct msm_camera_sensor_info *data) { int rc; CDBG("%s: entry\n", __func__); if (!data || strcmp(data->sensor_name, "mt9d113")) { CCRT("%s: invalid parameters!\n", __func__); rc = -ENODEV; goto probe_init_fail; } mt9d113_ctrl = kzalloc(sizeof(struct mt9d113_ctrl_t), GFP_KERNEL); if (!mt9d113_ctrl) { CCRT("%s: kzalloc failed!\n", __func__); rc = -ENOMEM; goto probe_init_fail; } mt9d113_ctrl->sensordata = data; #if !defined(CONFIG_SENSOR_ADAPTER) rc = msm_camera_power_backend(MSM_CAMERA_PWRUP_MODE); if (rc < 0) { CCRT("%s: camera_power_backend failed!\n", __func__); goto probe_init_fail; } #if defined(CONFIG_MACH_R750) || defined(CONFIG_MACH_JOE) rc = msm_camera_clk_switch(mt9d113_ctrl->sensordata, MT9D113_GPIO_SWITCH_CTL, MT9D113_GPIO_SWITCH_VAL); if (rc < 0) { CCRT("%s: camera_clk_switch failed!\n", __func__); goto probe_init_fail; } #else #endif msm_camio_clk_rate_set(MT9D113_CAMIO_MCLK); mdelay(5); rc = mt9d113_sensor_init_probe(mt9d113_ctrl->sensordata); if (rc < 0) { CCRT("%s: sensor_init_probe failed!\n", __func__); goto probe_init_fail; } #else rc = mt9d113_sensor_dev_probe(mt9d113_ctrl->sensordata); if (rc < 0) { CCRT("%s: mt9d113_sensor_dev_probe failed!\n", __func__); goto probe_init_fail; } rc = mt9d113_reg_init(); if (rc < 0) { CCRT("%s: mt9d113_reg_init failed!\n", __func__); goto probe_init_fail; } #endif return 0; probe_init_fail: msm_camera_power_backend(MSM_CAMERA_PWRDWN_MODE); if(mt9d113_ctrl) { kfree(mt9d113_ctrl); } return rc; } #ifdef MT9D113_SENSOR_PROBE_INIT static int mt9d113_sensor_init(const struct msm_camera_sensor_info *data) { uint32_t switch_on; int rc; CDBG("%s: entry\n", __func__); if ((NULL == data) || strcmp(data->sensor_name, "mt9d113") || strcmp(mt9d113_ctrl->sensordata->sensor_name, "mt9d113")) { CCRT("%s: data is NULL, or sensor_name is not equal to mt9d113!\n", __func__); rc = -ENODEV; goto sensor_init_fail; } rc = msm_camera_power_backend(MSM_CAMERA_NORMAL_MODE); if (rc < 0) { CCRT("%s: camera_power_backend failed!\n", __func__); goto sensor_init_fail; } msm_camio_clk_rate_set(MT9D113_CAMIO_MCLK); mdelay(5); msm_camio_camif_pad_reg_reset(); mdelay(10); switch_on = 0; rc = mt9d113_hard_standby(mt9d113_ctrl->sensordata, switch_on); if (rc < 0) { CCRT("set standby failed!\n"); goto sensor_init_fail; } mdelay(10); return 0; sensor_init_fail: return rc; } #else static int mt9d113_sensor_init(const struct msm_camera_sensor_info *data) { int rc; rc = mt9d113_sensor_probe_init(data); return rc; } #endif static int mt9d113_sensor_config(void __user *argp) { struct sensor_cfg_data cfg_data; long rc = 0; CDBG("%s: entry\n", __func__); if (copy_from_user(&cfg_data, (void *)argp, sizeof(struct sensor_cfg_data))) { CCRT("%s: copy_from_user failed!\n", __func__); return -EFAULT; } CDBG("%s: cfgtype = %d, mode = %d\n", __func__, cfg_data.cfgtype, cfg_data.mode); switch (cfg_data.cfgtype) { case CFG_SET_MODE: { rc = mt9d113_set_sensor_mode(cfg_data.mode); } break; case CFG_SET_EFFECT: { rc = mt9d113_set_effect(cfg_data.mode, cfg_data.cfg.effect); } break; case CFG_PWR_UP: { rc = mt9d113_power_up(); } break; case CFG_PWR_DOWN: { rc = mt9d113_power_down(); } break; case CFG_SET_WB: { rc = mt9d113_set_wb(cfg_data.cfg.wb_mode); } break; case CFG_SET_AF: { rc = 0; } break; case CFG_SET_ISO: { rc = mt9d113_set_iso(cfg_data.cfg.iso_val); } break; case CFG_SET_ANTIBANDING: { rc = mt9d113_set_antibanding(cfg_data.cfg.antibanding); } break; case CFG_SET_BRIGHTNESS: { rc = mt9d113_set_brightness(cfg_data.cfg.brightness); } break; case CFG_SET_SATURATION: { rc = mt9d113_set_saturation(cfg_data.cfg.saturation); } break; case CFG_SET_CONTRAST: { rc = mt9d113_set_contrast(cfg_data.cfg.contrast); } break; case CFG_SET_SHARPNESS: { rc = mt9d113_set_sharpness(cfg_data.cfg.sharpness); } break; case CFG_SET_LENS_SHADING: { rc = 0; } break; case CFG_SET_EXPOSURE_COMPENSATION: { rc = mt9d113_set_exposure_compensation(cfg_data.cfg.exposure); } break; default: { rc = -EFAULT; } break; } mt9d113_prevent_suspend(); return rc; } #ifdef MT9D113_SENSOR_PROBE_INIT static int mt9d113_sensor_release_internal(void) { int rc; uint32_t switch_on; CDBG("%s: entry\n", __func__); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0028, 0x0000, WORD_LEN); if (rc < 0) { return rc; } mdelay(1); switch_on = 1; rc = mt9d113_hard_standby(mt9d113_ctrl->sensordata, switch_on); if (rc < 0) { return rc; } mdelay(200); rc = msm_camera_power_backend(MSM_CAMERA_STANDBY_MODE); if (rc < 0) { return rc; } return 0; } #else static int mt9d113_sensor_release_internal(void) { int rc; rc = msm_camera_power_backend(MSM_CAMERA_PWRDWN_MODE); kfree(mt9d113_ctrl); return rc; } #endif static int mt9d113_sensor_release(void) { int rc; rc = mt9d113_sensor_release_internal(); mt9d113_allow_suspend(); return rc; } static int mt9d113_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; CDBG("%s: entry\n", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { rc = -ENOTSUPP; goto probe_failure; } mt9d113_sensorw = kzalloc(sizeof(struct mt9d113_work_t), GFP_KERNEL); if (!mt9d113_sensorw) { rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, mt9d113_sensorw); mt9d113_client = client; return 0; probe_failure: kfree(mt9d113_sensorw); mt9d113_sensorw = NULL; CCRT("%s: rc = %d, failed!\n", __func__, rc); return rc; } static int __exit mt9d113_i2c_remove(struct i2c_client *client) { struct mt9d113_work_t *sensorw = i2c_get_clientdata(client); CDBG("%s: entry\n", __func__); free_irq(client->irq, sensorw); kfree(sensorw); mt9d113_deinit_suspend(); mt9d113_client = NULL; mt9d113_sensorw = NULL; return 0; } static const struct i2c_device_id mt9d113_id[] = { { "mt9d113", 0}, { }, }; static struct i2c_driver mt9d113_driver = { .id_table = mt9d113_id, .probe = mt9d113_i2c_probe, .remove = __exit_p(mt9d113_i2c_remove), .driver = { .name = MT9D113_I2C_BOARD_NAME, }, }; static int32_t mt9d113_i2c_add_driver(void) { int32_t rc = 0; rc = i2c_add_driver(&mt9d113_driver); if (IS_ERR_VALUE(rc)) { goto init_failure; } return rc; init_failure: CCRT("%s: rc = %d, failed!\n", __func__, rc); return rc; } static void mt9d113_i2c_del_driver(void) { i2c_del_driver(&mt9d113_driver); } void mt9d113_exit(void) { CDBG("%s: entry\n", __func__); mt9d113_i2c_del_driver(); } int mt9d113_sensor_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc; CDBG("%s: entry\n", __func__); #if !defined(CONFIG_SENSOR_ADAPTER) rc = mt9d113_i2c_add_driver(); if (rc < 0) { goto probe_failed; } #else #endif #ifdef MT9D113_SENSOR_PROBE_INIT rc = mt9d113_sensor_probe_init(info); if (rc < 0) { CCRT("%s: mt9d113_sensor_probe_init failed!\n", __func__); goto probe_failed; } rc = mt9d113_sensor_release_internal(); if (rc < 0) { CCRT("%s: mt9d113_sensor_release failed!\n", __func__); goto probe_failed; } #endif mt9d113_init_suspend(); s->s_init = mt9d113_sensor_init; s->s_config = mt9d113_sensor_config; s->s_release = mt9d113_sensor_release; return 0; probe_failed: CCRT("%s: rc = %d, failed!\n", __func__, rc); #if !defined(CONFIG_SENSOR_ADAPTER) mt9d113_i2c_del_driver(); #else #endif return rc; } #if defined(MT9D113_PROBE_WORKQUEUE) static void mt9d113_workqueue(struct work_struct *work) { int32_t rc; #ifdef CONFIG_ZTE_PLATFORM #ifdef CONFIG_ZTE_FTM_FLAG_SUPPORT if(zte_get_ftm_flag()) { return; } #endif #endif if (!pdev_wq) { CCRT("%s: pdev_wq is NULL!\n", __func__); return; } #if !defined(CONFIG_SENSOR_ADAPTER) rc = msm_camera_drv_start(pdev_wq, mt9d113_sensor_probe); #else rc = msm_camera_dev_start(pdev_wq, mt9d113_sensor_i2c_probe_on, mt9d113_sensor_i2c_probe_off, mt9d113_sensor_dev_probe); if (rc < 0) { CCRT("%s: msm_camera_dev_start failed!\n", __func__); goto probe_failed; } rc = msm_camera_drv_start(pdev_wq, mt9d113_sensor_probe); if (rc < 0) { goto probe_failed; } return; probe_failed: CCRT("%s: rc = %d, failed!\n", __func__, rc); msm_camera_power_backend(MSM_CAMERA_PWRDWN_MODE); return; #endif } static int32_t mt9d113_probe_workqueue(void) { int32_t rc; mt9d113_wq = create_singlethread_workqueue("mt9d113_wq"); if (!mt9d113_wq) { CCRT("%s: mt9d113_wq is NULL!\n", __func__); return -EFAULT; } rc = queue_work(mt9d113_wq, &mt9d113_cb_work); return 0; } static int __mt9d113_probe(struct platform_device *pdev) { int32_t rc; pdev_wq = pdev; rc = mt9d113_probe_workqueue(); return rc; } #else static int __mt9d113_probe(struct platform_device *pdev) { #ifdef CONFIG_ZTE_PLATFORM #ifdef CONFIG_ZTE_FTM_FLAG_SUPPORT if(zte_get_ftm_flag()) { return 0; } #endif #endif return msm_camera_drv_start(pdev, mt9d113_sensor_probe); } #endif static struct platform_driver msm_camera_driver = { .probe = __mt9d113_probe, .driver = { .name = "msm_camera_mt9d113", .owner = THIS_MODULE, }, }; static int __init mt9d113_init(void) { return platform_driver_register(&msm_camera_driver); } module_init(mt9d113_init);
ZTE-BLADE/ZTE-BLADE-2.6.32
drivers/media/video/msm/mt9d113_qtech_sunny_socket.c
C
gpl-2.0
56,303
/* * This file contains the RTC driver table for Motorola MCF5206eLITE * ColdFire evaluation board. * * Copyright (C) 2000 OKTET Ltd., St.-Petersburg, Russia * Author: Victor V. Vengerov <vvv@oktet.ru> * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * * http://www.rtems.com/license/LICENSE. */ #include <bsp.h> #include <libchip/rtc.h> #include <ds1307.h> /* Forward function declaration */ bool mcf5206elite_ds1307_probe(int minor); extern rtc_fns ds1307_fns; /* The following table configures the RTC drivers used in this BSP */ rtc_tbl RTC_Table[] = { { "/dev/rtc", /* sDeviceName */ RTC_CUSTOM, /* deviceType */ &ds1307_fns, /* pDeviceFns */ mcf5206elite_ds1307_probe, /* deviceProbe */ NULL, /* pDeviceParams */ 0x00, /* ulCtrlPort1, for DS1307-I2C bus number */ DS1307_I2C_ADDRESS, /* ulDataPort, for DS1307-I2C device addr */ NULL, /* getRegister - not applicable to DS1307 */ NULL /* setRegister - not applicable to DS1307 */ } }; /* Some information used by the RTC driver */ #define NUM_RTCS (sizeof(RTC_Table)/sizeof(rtc_tbl)) size_t RTC_Count = NUM_RTCS; rtems_device_minor_number RTC_Minor; /* mcf5206elite_ds1307_probe -- * RTC presence probe function. Return TRUE, if device is present. * Device presence checked by probe access to RTC device over I2C bus. * * PARAMETERS: * minor - minor RTC device number * * RETURNS: * TRUE, if RTC device is present */ bool mcf5206elite_ds1307_probe(int minor) { int try = 0; i2c_message_status status; rtc_tbl *rtc; i2c_bus_number bus; i2c_address addr; if (minor >= NUM_RTCS) return false; rtc = RTC_Table + minor; bus = rtc->ulCtrlPort1; addr = rtc->ulDataPort; do { status = i2c_wrbyte(bus, addr, 0); if (status == I2C_NO_DEVICE) return false; try++; } while ((try < 15) && (status != I2C_SUCCESSFUL)); if (status == I2C_SUCCESSFUL) return true; else return false; }
yangxi/omap4m3
c/src/lib/libbsp/m68k/mcf5206elite/tod/todcfg.c
C
gpl-2.0
2,267
/********************************************************************** * $Id$ lpc18xx_qei.c 2011-06-02 *//** * @file lpc18xx_pwr.c * @brief Contains all functions support for QEI firmware library on LPC18xx * @version 1.0 * @date 02. June. 2011 * @author NXP MCU SW Application Team * * Copyright(C) 2011, NXP Semiconductor * All rights reserved. * *********************************************************************** * Software that is described herein is for illustrative purposes only * which provides customers with programming information regarding the * products. This software is supplied "AS IS" without any warranties. * NXP Semiconductors assumes no responsibility or liability for the * use of the software, conveys no license or title under any patent, * copyright, or mask work right to the product. NXP Semiconductors * reserves the right to make changes in the software without * notification. NXP Semiconductors also make no representation or * warranty that such application will be suitable for the specified * use without further testing or modification. **********************************************************************/ /* Peripheral group ----------------------------------------------------------- */ /** @addtogroup QEI * @{ */ /* Includes ------------------------------------------------------------------- */ #include "lpc18xx_qei.h" #include "lpc18xx_cgu.h" /* If this source file built with example, the LPC18xx FW library configuration * file in each example directory ("lpc18xx_libcfg.h") must be included, * otherwise the default FW library configuration file must be included instead */ #ifdef __BUILD_WITH_EXAMPLE__ #include "lpc18xx_libcfg.h" #else #include "lpc18xx_libcfg_default.h" #endif /* __BUILD_WITH_EXAMPLE__ */ #ifdef _QEI /* Private Types -------------------------------------------------------------- */ /** @defgroup QEI_Private_Types QEI Private Types * @{ */ /** * @brief QEI configuration union type definition */ typedef union { QEI_CFG_Type bmQEIConfig; uint32_t ulQEIConfig; } QEI_CFGOPT_Type; /** * @} */ LPC_QEI_Type* QEI_GetPointer(uint8_t qeiId); /* Public Functions ----------------------------------------------------------- */ /** @addtogroup QEI_Public_Functions * @{ */ /*********************************************************************//** * @brief Get the point to typedef of QEI component * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return None **********************************************************************/ LPC_QEI_Type* QEI_GetPointer(uint8_t qeiId) { LPC_QEI_Type* pQei = NULL; if(qeiId == 0) { pQei = LPC_QEI; } return pQei; } /*********************************************************************//** * @brief Resets value for each type of QEI value, such as velocity, * counter, position, etc.. * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulResetType QEI Reset Type, should be one of the following: * - QEI_RESET_POS :Reset Position Counter * - QEI_RESET_POSOnIDX :Reset Position Counter on Index signal * - QEI_RESET_VEL :Reset Velocity * - QEI_RESET_IDX :Reset Index Counter * @return None **********************************************************************/ void QEI_Reset(uint8_t qeiId, uint32_t ulResetType) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->CON = ulResetType; } /*********************************************************************//** * @brief Initializes the QEI peripheral according to the specified * parameters in the QEI_ConfigStruct. * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] QEI_ConfigStruct Pointer to a QEI_CFG_Type structure * that contains the configuration information for the * specified QEI peripheral * @return None **********************************************************************/ void QEI_Init(uint8_t qeiId, QEI_CFG_Type *QEI_ConfigStruct) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); /* Set up clock and power for QEI module */ // Already enabled by BASE_M3_CLK // Reset all remaining value in QEI peripheral pQei->MAXPOS = 0x00; pQei->CMPOS0 = 0x00; pQei->CMPOS1 = 0x00; pQei->CMPOS2 = 0x00; pQei->INXCMP0 = 0x00; pQei->VELCOMP = 0x00; pQei->LOAD = 0x00; pQei->CON = QEI_CON_RESP | QEI_CON_RESV | QEI_CON_RESI; pQei->FILTERPHA = 0x00; pQei->FILTERPHB = 0x00; pQei->FILTERINX = 0x00; // Disable all Interrupt pQei->IEC = QEI_IECLR_BITMASK; // Clear all Interrupt pending pQei->CLR = QEI_INTCLR_BITMASK; // Set QEI configuration value corresponding to its setting up value pQei->CONF = ((QEI_CFGOPT_Type *)QEI_ConfigStruct)->ulQEIConfig; } /*********************************************************************//** * @brief De-Initalize QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return None **********************************************************************/ void QEI_DeInit(uint8_t qeiId) { /* Turn off clock and power for QEI module */ } /*****************************************************************************//** * @brief Fills each QIE_InitStruct member with its default value: * - DirectionInvert = QEI_DIRINV_NONE * - SignalMode = QEI_SIGNALMODE_QUAD * - CaptureMode = QEI_CAPMODE_4X * - InvertIndex = QEI_INVINX_NONE * @param[in] QIE_InitStruct Pointer to a QEI_CFG_Type structure which will be * initialized. * @return None *******************************************************************************/ void QEI_GetCfgDefault(QEI_CFG_Type *QIE_InitStruct) { QIE_InitStruct->CaptureMode = QEI_CAPMODE_4X; QIE_InitStruct->DirectionInvert = QEI_DIRINV_NONE; QIE_InitStruct->InvertIndex = QEI_INVINX_NONE; QIE_InitStruct->SignalMode = QEI_SIGNALMODE_QUAD; } /*********************************************************************//** * @brief Check whether if specified flag status is set or not * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulFlagType Status Flag Type, should be one of the following: * - QEI_STATUS_DIR: Direction Status * @return New Status of this status flag (SET or RESET) **********************************************************************/ FlagStatus QEI_GetStatus(uint8_t qeiId, uint32_t ulFlagType) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return ((pQei->STAT & ulFlagType) ? SET : RESET); } /*********************************************************************//** * @brief Get current position value in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return Current position value of QEI peripheral **********************************************************************/ uint32_t QEI_GetPosition(uint8_t qeiId) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return (pQei->POS); } /*********************************************************************//** * @brief Set max position value for QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulMaxPos Max position value to set * @return None **********************************************************************/ void QEI_SetMaxPosition(uint8_t qeiId, uint32_t ulMaxPos) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->MAXPOS = ulMaxPos; } /*********************************************************************//** * @brief Set position compare value for QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] bPosCompCh Compare Position channel, should be: * - QEI_COMPPOS_CH_0 :QEI compare position channel 0 * - QEI_COMPPOS_CH_1 :QEI compare position channel 1 * - QEI_COMPPOS_CH_2 :QEI compare position channel 2 * @param[in] ulPosComp Compare Position value to set * @return None **********************************************************************/ void QEI_SetPositionComp(uint8_t qeiId, uint8_t bPosCompCh, uint32_t ulPosComp) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); uint32_t *tmp; tmp = (uint32_t *) (&(pQei->CMPOS0) + bPosCompCh * 4); *tmp = ulPosComp; } /*********************************************************************//** * @brief Get current index counter of QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return Current value of QEI index counter **********************************************************************/ uint32_t QEI_GetIndex(uint8_t qeiId) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return (pQei->INXCNT); } /*********************************************************************//** * @brief Set value for index compare in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulIndexComp Compare Index Value to set * @return None **********************************************************************/ void QEI_SetIndexComp(uint8_t qeiId, uint32_t ulIndexComp) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->INXCMP0 = ulIndexComp; } /*********************************************************************//** * @brief Set timer reload value for QEI peripheral. When the velocity timer is * over-flow, the value that set for Timer Reload register will be loaded * into the velocity timer for next period. The calculated velocity in RPM * therefore will be affect by this value. * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] QEIReloadStruct QEI reload structure * @return None **********************************************************************/ void QEI_SetTimerReload(uint8_t qeiId, QEI_RELOADCFG_Type *QEIReloadStruct) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); uint64_t pclk; if (QEIReloadStruct->ReloadOption == QEI_TIMERRELOAD_TICKVAL) { pQei->LOAD = QEIReloadStruct->ReloadValue - 1; } else { #if 1 pclk = CGU_GetPCLKFrequency(CGU_PERIPHERAL_M3CORE); pclk = (pclk /(1000000/QEIReloadStruct->ReloadValue)) - 1; pQei->LOAD = (uint32_t)pclk; #else ld = M3Frequency; if (ld/1000000 > 0) { ld /= 1000000; ld *= QEIReloadStruct->ReloadValue; ld -= 1; } else { ld *= QEIReloadStruct->ReloadValue; ld /= 1000000; ld -= 1; } pQei->LOAD = ld; #endif } } /*********************************************************************//** * @brief Get current timer counter in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return Current timer counter in QEI peripheral **********************************************************************/ uint32_t QEI_GetTimer(uint8_t qeiId) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return (pQei->TIME); } /*********************************************************************//** * @brief Get current velocity pulse counter in current time period * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return Current velocity pulse counter value **********************************************************************/ uint32_t QEI_GetVelocity(uint8_t qeiId) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return (pQei->VEL); } /*********************************************************************//** * @brief Get the most recently measured velocity of the QEI. When * the Velocity timer in QEI is over-flow, the current velocity * value will be loaded into Velocity Capture register. * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @return The most recently measured velocity value **********************************************************************/ uint32_t QEI_GetVelocityCap(uint8_t qeiId) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return (pQei->CAP); } /*********************************************************************//** * @brief Set Velocity Compare value for QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulVelComp Compare Velocity value to set * @return None **********************************************************************/ void QEI_SetVelocityComp(uint8_t qeiId, uint32_t ulVelComp) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->VELCOMP = ulVelComp; } /*********************************************************************//** * @brief Set value of sampling count for the digital filter in * QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulSamplingPulse Value of sampling count to set * @return None **********************************************************************/ void QEI_SetDigiFilter(uint8_t qeiId, st_Qei_FilterCfg FilterVal) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->FILTERPHA = FilterVal.PHA_FilterVal; pQei->FILTERPHB = FilterVal.PHB_FilterVal; pQei->FILTERINX = FilterVal.INX_FilterVal; } /*********************************************************************//** * @brief Check whether if specified interrupt flag status in QEI * peripheral is set or not * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulIntType Interrupt Flag Status type, should be: * - QEI_INTFLAG_INX_Int : index pulse was detected interrupt * - QEI_INTFLAG_TIM_Int : Velocity timer over flow interrupt * - QEI_INTFLAG_VELC_Int : Capture velocity is less than compare interrupt * - QEI_INTFLAG_DIR_Int : Change of direction interrupt * - QEI_INTFLAG_ERR_Int : An encoder phase error interrupt * - QEI_INTFLAG_ENCLK_Int : An encoder clock pulse was detected interrupt * - QEI_INTFLAG_POS0_Int : position 0 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS1_Int : position 1 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS2_Int : position 2 compare value is equal to the current position interrupt * - QEI_INTFLAG_REV_Int : Index compare value is equal to the current index count interrupt * - QEI_INTFLAG_POS0REV_Int : Combined position 0 and revolution count interrupt * - QEI_INTFLAG_POS1REV_Int : Combined position 1 and revolution count interrupt * - QEI_INTFLAG_POS2REV_Int : Combined position 2 and revolution count interrupt * @return New State of specified interrupt flag status (SET or RESET) **********************************************************************/ FlagStatus QEI_GetIntStatus(uint8_t qeiId, uint32_t ulIntType) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); return((pQei->INTSTAT & ulIntType) ? SET : RESET); } /*********************************************************************//** * @brief Enable/Disable specified interrupt in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulIntType Interrupt Flag Status type, should be: * - QEI_INTFLAG_INX_Int : index pulse was detected interrupt * - QEI_INTFLAG_TIM_Int : Velocity timer over flow interrupt * - QEI_INTFLAG_VELC_Int : Capture velocity is less than compare interrupt * - QEI_INTFLAG_DIR_Int : Change of direction interrupt * - QEI_INTFLAG_ERR_Int : An encoder phase error interrupt * - QEI_INTFLAG_ENCLK_Int : An encoder clock pulse was detected interrupt * - QEI_INTFLAG_POS0_Int : position 0 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS1_Int : position 1 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS2_Int : position 2 compare value is equal to the current position interrupt * - QEI_INTFLAG_REV_Int : Index compare value is equal to the current index count interrupt * - QEI_INTFLAG_POS0REV_Int : Combined position 0 and revolution count interrupt * - QEI_INTFLAG_POS1REV_Int : Combined position 1 and revolution count interrupt * - QEI_INTFLAG_POS2REV_Int : Combined position 2 and revolution count interrupt * @param[in] NewState New function state, should be: * - DISABLE * - ENABLE * @return None **********************************************************************/ void QEI_IntCmd(uint8_t qeiId, uint32_t ulIntType, FunctionalState NewState) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); if (NewState == ENABLE) { pQei->IES = ulIntType; } else { pQei->IEC = ulIntType; } } /*********************************************************************//** * @brief Sets (forces) specified interrupt in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulIntType Interrupt Flag Status type, should be: * - QEI_INTFLAG_INX_Int : index pulse was detected interrupt * - QEI_INTFLAG_TIM_Int : Velocity timer over flow interrupt * - QEI_INTFLAG_VELC_Int : Capture velocity is less than compare interrupt * - QEI_INTFLAG_DIR_Int : Change of direction interrupt * - QEI_INTFLAG_ERR_Int : An encoder phase error interrupt * - QEI_INTFLAG_ENCLK_Int : An encoder clock pulse was detected interrupt * - QEI_INTFLAG_POS0_Int : position 0 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS1_Int : position 1 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS2_Int : position 2 compare value is equal to the current position interrupt * - QEI_INTFLAG_REV_Int : Index compare value is equal to the current index count interrupt * - QEI_INTFLAG_POS0REV_Int : Combined position 0 and revolution count interrupt * - QEI_INTFLAG_POS1REV_Int : Combined position 1 and revolution count interrupt * - QEI_INTFLAG_POS2REV_Int : Combined position 2 and revolution count interrupt * @return None **********************************************************************/ void QEI_IntSet(uint8_t qeiId, uint32_t ulIntType) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->SET = ulIntType; } /*********************************************************************//** * @brief Clear (force) specified interrupt (pending) in QEI peripheral * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulIntType Interrupt Flag Status type, should be: * - QEI_INTFLAG_INX_Int : index pulse was detected interrupt * - QEI_INTFLAG_TIM_Int : Velocity timer over flow interrupt * - QEI_INTFLAG_VELC_Int : Capture velocity is less than compare interrupt * - QEI_INTFLAG_DIR_Int : Change of direction interrupt * - QEI_INTFLAG_ERR_Int : An encoder phase error interrupt * - QEI_INTFLAG_ENCLK_Int : An encoder clock pulse was detected interrupt * - QEI_INTFLAG_POS0_Int : position 0 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS1_Int : position 1 compare value is equal to the current position interrupt * - QEI_INTFLAG_POS2_Int : position 2 compare value is equal to the current position interrupt * - QEI_INTFLAG_REV_Int : Index compare value is equal to the current index count interrupt * - QEI_INTFLAG_POS0REV_Int : Combined position 0 and revolution count interrupt * - QEI_INTFLAG_POS1REV_Int : Combined position 1 and revolution count interrupt * - QEI_INTFLAG_POS2REV_Int : Combined position 2 and revolution count interrupt * @return None **********************************************************************/ void QEI_IntClear(uint8_t qeiId, uint32_t ulIntType) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); pQei->CLR = ulIntType; } /*********************************************************************//** * @brief Calculates the actual velocity in RPM passed via velocity * capture value and Pulse Per Round (of the encoder) value * parameter input. * @param[in] qeiId The Id of the expected QEI component, should be: 0 * @param[in] ulVelCapValue Velocity capture input value that can be * got from QEI_GetVelocityCap() function * @param[in] ulPPR Pulse per round of encoder * @return The actual value of velocity in RPM (Round per minute) **********************************************************************/ uint32_t QEI_CalculateRPM(uint8_t qeiId, uint32_t ulVelCapValue, uint32_t ulPPR) { LPC_QEI_Type* pQei = QEI_GetPointer(qeiId); uint64_t rpm, clock, Load, edges; // Get current Clock rate for timer input clock = CGU_GetPCLKFrequency(CGU_PERIPHERAL_M3CORE); // Get Timer load value (velocity capture period) Load = (uint64_t)(pQei->LOAD + 1); // Get Edge edges = (uint64_t)((pQei->CONF & QEI_CONF_CAPMODE) ? 4 : 2); // Calculate RPM rpm = ((clock * ulVelCapValue * 60) / (Load * ulPPR * edges)); return (uint32_t)(rpm); } /** * @} */ #endif /* _QEI */ /** * @} */ /* --------------------------------- End Of File ------------------------------ */
Lindem-Data-Acquisition-AS/TM4C129-discontinued
libraries/FreeRTOSv8.0.1/FreeRTOS-Plus/Demo/FreeRTOS_Plus_UDP_and_CLI_LPC1830_GCC/ThirdParty/CMSISv2p10_LPC18xx_DriverLib/src/lpc18xx_qei.c
C
gpl-2.0
20,918
/* * Driver O/S-independent utility routines * * $Copyright Open Broadcom Corporation$ * $Id: bcmutils.c,v 1.277.2.18 2011-01-26 02:32:08 $ */ #include <typedefs.h> #include <bcmdefs.h> #include <stdarg.h> #ifdef BCMDRIVER #include <osl.h> #include <bcmutils.h> #include <siutils.h> #if defined(BCMNVRAM) #include <bcmnvram.h> #endif #else /* !BCMDRIVER */ #include <stdio.h> #include <string.h> #include <bcmutils.h> #if defined(BCMEXTSUP) #include <bcm_osl.h> #endif #endif /* !BCMDRIVER */ #include <bcmendian.h> #include <bcmdevs.h> #include <proto/ethernet.h> #include <proto/vlan.h> #include <proto/bcmip.h> #include <proto/802.1d.h> #include <proto/802.11.h> void *_bcmutils_dummy_fn = NULL; #ifdef BCMDRIVER /* copy a pkt buffer chain into a buffer */ uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; if (len < 0) len = 4096; /* "infinite" */ /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(PKTDATA(osh, p) + offset, buf, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* copy a buffer into a pkt buffer chain */ uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(buf, PKTDATA(osh, p) + offset, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* return total length of buffer chain */ uint BCMFASTPATH pkttotlen(osl_t *osh, void *p) { uint total; total = 0; for (; p; p = PKTNEXT(osh, p)) total += PKTLEN(osh, p); return (total); } /* return the last buffer of chained pkt */ void * pktlast(osl_t *osh, void *p) { for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) ; return (p); } /* count segments of a chained packet */ uint BCMFASTPATH pktsegcnt(osl_t *osh, void *p) { uint cnt; for (cnt = 0; p; p = PKTNEXT(osh, p)) cnt++; return cnt; } /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence */ void * BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); q = &pq->q[prec]; if (q->head) PKTSETLINK(q->tail, p); else q->head = p; q->tail = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; return p; } void * BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); q = &pq->q[prec]; if (q->head == NULL) q->tail = p; PKTSETLINK(p, q->head); q->head = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; return p; } void * BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec) { struct pktq_prec *q; void *p; ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; pq->len--; PKTSETLINK(p, NULL); return p; } void * BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec) { struct pktq_prec *q; void *p, *prev; ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; if (prev) PKTSETLINK(prev, NULL); else q->head = NULL; q->tail = prev; q->len--; pq->len--; return p; } void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) { struct pktq_prec *q; void *p, *prev = NULL; q = &pq->q[prec]; p = q->head; while (p) { if (fn == NULL || (*fn)(p, arg)) { bool head = (p == q->head); if (head) q->head = PKTLINK(p); else PKTSETLINK(prev, PKTLINK(p)); PKTSETLINK(p, NULL); PKTFREE(osh, p, dir); q->len--; pq->len--; p = (head ? q->head : PKTLINK(prev)); } else { prev = p; p = PKTLINK(p); } } if (q->head == NULL) { ASSERT(q->len == 0); q->tail = NULL; } } bool BCMFASTPATH pktq_pdel(struct pktq *pq, void *pktbuf, int prec) { struct pktq_prec *q; void *p; ASSERT(prec >= 0 && prec < pq->num_prec); if (!pktbuf) return FALSE; q = &pq->q[prec]; if (q->head == pktbuf) { if ((q->head = PKTLINK(pktbuf)) == NULL) q->tail = NULL; } else { for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) ; if (p == NULL) return FALSE; PKTSETLINK(p, PKTLINK(pktbuf)); if (q->tail == pktbuf) q->tail = p; } q->len--; pq->len--; PKTSETLINK(pktbuf, NULL); return TRUE; } void pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); /* pq is variable size; only zero out what's requested */ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); pq->num_prec = (uint16)num_prec; pq->max = (uint16)max_len; for (prec = 0; prec < num_prec; prec++) pq->q[prec].max = pq->max; } void * BCMFASTPATH pktq_deq(struct pktq *pq, int *prec_out) { struct pktq_prec *q; void *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; pq->len--; if (prec_out) *prec_out = prec; PKTSETLINK(p, NULL); return p; } void * BCMFASTPATH pktq_deq_tail(struct pktq *pq, int *prec_out) { struct pktq_prec *q; void *p, *prev; int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; if (prev) PKTSETLINK(prev, NULL); else q->head = NULL; q->tail = prev; q->len--; pq->len--; if (prec_out) *prec_out = prec; PKTSETLINK(p, NULL); return p; } void * pktq_peek(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; if (prec_out) *prec_out = prec; return (pq->q[prec].head); } void * pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; if (prec_out) *prec_out = prec; return (pq->q[prec].tail); } void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) { int prec; for (prec = 0; prec < pq->num_prec; prec++) pktq_pflush(osh, pq, prec, dir, fn, arg); if (fn == NULL) ASSERT(pq->len == 0); } /* Return sum of lengths of a specific set of precedences */ int pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].len; return len; } /* Priority dequeue from a specific set of precedences */ void * BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct pktq_prec *q; void *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) if (prec-- == 0) return NULL; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; if (prec_out) *prec_out = prec; pq->len--; PKTSETLINK(p, NULL); return p; } #endif /* BCMDRIVER */ const unsigned char bcm_ctype[] = { _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, _BCM_C, /* 8-15 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ }; ulong BCMROMFN(bcm_strtoul)(char *cp, char **endp, uint base) { ulong result, last_result = 0, value; bool minus; minus = FALSE; while (bcm_isspace(*cp)) cp++; if (cp[0] == '+') cp++; else if (cp[0] == '-') { minus = TRUE; cp++; } if (base == 0) { if (cp[0] == '0') { if ((cp[1] == 'x') || (cp[1] == 'X')) { base = 16; cp = &cp[2]; } else { base = 8; cp = &cp[1]; } } else base = 10; } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { cp = &cp[2]; } result = 0; while (bcm_isxdigit(*cp) && (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { result = result*base + value; /* Detected overflow */ if (result < last_result && !minus) return (ulong)-1; last_result = result; cp++; } if (minus) result = (ulong)(-(long)result); if (endp) *endp = (char *)cp; return (result); } int BCMROMFN(bcm_atoi)(char *s) { return (int)bcm_strtoul(s, NULL, 10); } /* return pointer to location of substring 'needle' in 'haystack' */ char* BCMROMFN(bcmstrstr)(char *haystack, char *needle) { int len, nlen; int i; if ((haystack == NULL) || (needle == NULL)) return (haystack); nlen = strlen(needle); len = strlen(haystack) - nlen + 1; for (i = 0; i < len; i++) if (memcmp(needle, &haystack[i], nlen) == 0) return (&haystack[i]); return (NULL); } char* BCMROMFN(bcmstrcat)(char *dest, const char *src) { char *p; p = dest + strlen(dest); while ((*p++ = *src++) != '\0') ; return (dest); } char* BCMROMFN(bcmstrncat)(char *dest, const char *src, uint size) { char *endp; char *p; p = dest + strlen(dest); endp = p + size; while (p != endp && (*p++ = *src++) != '\0') ; return (dest); } /**************************************************************************** * Function: bcmstrtok * * Purpose: * Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), * but allows strToken() to be used by different strings or callers at the same * time. Each call modifies '*string' by substituting a NULL character for the * first delimiter that is encountered, and updates 'string' to point to the char * after the delimiter. Leading delimiters are skipped. * * Parameters: * string (mod) Ptr to string ptr, updated by token. * delimiters (in) Set of delimiter characters. * tokdelim (out) Character that delimits the returned token. (May * be set to NULL if token delimiter is not required). * * Returns: Pointer to the next token found. NULL when no more tokens are found. ***************************************************************************** */ char * bcmstrtok(char **string, const char *delimiters, char *tokdelim) { unsigned char *str; unsigned long map[8]; int count; char *nextoken; if (tokdelim != NULL) { /* Prime the token delimiter */ *tokdelim = '\0'; } /* Clear control map */ for (count = 0; count < 8; count++) { map[count] = 0; } /* Set bits in delimiter table */ do { map[*delimiters >> 5] |= (1 << (*delimiters & 31)); } while (*delimiters++); str = (unsigned char*)*string; /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets str to point to the terminal * null (*str == '\0') */ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { str++; } nextoken = (char*)str; /* Find the end of the token. If it is not the end of the string, * put a null there. */ for (; *str; str++) { if (map[*str >> 5] & (1 << (*str & 31))) { if (tokdelim != NULL) { *tokdelim = *str; } *str++ = '\0'; break; } } *string = (char*)str; /* Determine if a token has been found. */ if (nextoken == (char *) str) { return NULL; } else { return nextoken; } } #define xToLower(C) \ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) /**************************************************************************** * Function: bcmstricmp * * Purpose: Compare to strings case insensitively. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstricmp(const char *s1, const char *s2) { char dc, sc; while (*s2 && *s1) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; } if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /**************************************************************************** * Function: bcmstrnicmp * * Purpose: Compare to strings case insensitively, upto a max of 'cnt' * characters. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * cnt (in) Max characters to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstrnicmp(const char* s1, const char* s2, int cnt) { char dc, sc; while (*s2 && *s1 && cnt) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; cnt--; } if (!cnt) return 0; if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /* parse a xx:xx:xx:xx:xx:xx format ethernet address */ int BCMROMFN(bcm_ether_atoe)(char *p, struct ether_addr *ea) { int i = 0; for (;;) { ea->octet[i++] = (char) bcm_strtoul(p, &p, 16); if (!*p++ || i == 6) break; } return (i == 6); } #if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) /* registry routine buffer preparation utility functions: * parameter order is like strncpy, but returns count * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) */ ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) { ulong copyct = 1; ushort i; if (abuflen == 0) return 0; /* wbuflen is in bytes */ wbuflen /= sizeof(ushort); for (i = 0; i < wbuflen; ++i) { if (--abuflen == 0) break; *abuf++ = (char) *wbuf++; ++copyct; } *abuf = '\0'; return copyct; } #endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ char * bcm_ether_ntoa(const struct ether_addr *ea, char *buf) { static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x"; snprintf(buf, 18, template, ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff, ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff); return (buf); } char * bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) { snprintf(buf, 16, "%d.%d.%d.%d", ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); return (buf); } #ifdef BCMDRIVER void bcm_mdelay(uint ms) { uint i; for (i = 0; i < ms; i++) { OSL_DELAY(1000); } } #if defined(DHD_DEBUG) /* pretty hex print a pkt buffer chain */ void prpkt(const char *msg, osl_t *osh, void *p0) { void *p; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); for (p = p0; p; p = PKTNEXT(osh, p)) prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); } #endif /* Takes an Ethernet frame and sets out-of-bound PKTPRIO. * Also updates the inplace vlan tag if requested. * For debugging, it returns an indication of what it did. */ uint BCMFASTPATH pktsetprio(void *pkt, bool update_vtag) { struct ether_header *eh; struct ethervlan_header *evh; uint8 *pktdata; int priority = 0; int rc = 0; pktdata = (uint8 *) PKTDATA(NULL, pkt); ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); eh = (struct ether_header *) pktdata; if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) { uint16 vlan_tag; int vlan_prio, dscp_prio = 0; evh = (struct ethervlan_header *)eh; vlan_tag = ntoh16(evh->vlan_tag); vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) { uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); uint8 tos_tc = IP_TOS46(ip_body); dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); } /* DSCP priority gets precedence over 802.1P (vlan tag) */ if (dscp_prio != 0) { priority = dscp_prio; rc |= PKTPRIO_VDSCP; } else { priority = vlan_prio; rc |= PKTPRIO_VLAN; } /* * If the DSCP priority is not the same as the VLAN priority, * then overwrite the priority field in the vlan tag, with the * DSCP priority value. This is required for Linux APs because * the VLAN driver on Linux, overwrites the skb->priority field * with the priority value in the vlan tag */ if (update_vtag && (priority != vlan_prio)) { vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; evh->vlan_tag = hton16(vlan_tag); rc |= PKTPRIO_UPD; } } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { uint8 *ip_body = pktdata + sizeof(struct ether_header); uint8 tos_tc = IP_TOS46(ip_body); priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); rc |= PKTPRIO_DSCP; } ASSERT(priority >= 0 && priority <= MAXPRIO); PKTSETPRIO(pkt, priority); return (rc | priority); } static char bcm_undeferrstr[32]; static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; /* Convert the error codes into related error strings */ const char * bcmerrorstr(int bcmerror) { /* check if someone added a bcmerror code but forgot to add errorstring */ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); if (bcmerror > 0 || bcmerror < BCME_LAST) { snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); return bcm_undeferrstr; } ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); return bcmerrorstrtable[-bcmerror]; } /* iovar table lookup */ const bcm_iovar_t* bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) { const bcm_iovar_t *vi; const char *lookup_name; /* skip any ':' delimited option prefixes */ lookup_name = strrchr(name, ':'); if (lookup_name != NULL) lookup_name++; else lookup_name = name; ASSERT(table != NULL); for (vi = table; vi->name; vi++) { if (!strcmp(vi->name, lookup_name)) return vi; } /* ran to end of table */ return NULL; /* var name not found */ } int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) { int bcmerror = 0; /* length check on io buf */ switch (vi->type) { case IOVT_BOOL: case IOVT_INT8: case IOVT_INT16: case IOVT_INT32: case IOVT_UINT8: case IOVT_UINT16: case IOVT_UINT32: /* all integers are int32 sized args at the ioctl interface */ if (len < (int)sizeof(int)) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_BUFFER: /* buffer must meet minimum length requirement */ if (len < vi->minlen) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_VOID: if (!set) { /* Cannot return nil... */ bcmerror = BCME_UNSUPPORTED; } else if (len) { /* Set is an action w/o parameters */ bcmerror = BCME_BUFTOOLONG; } break; default: /* unknown type for length check in iovar info */ ASSERT(0); bcmerror = BCME_UNSUPPORTED; } return bcmerror; } #endif /* BCMDRIVER */ /******************************************************************************* * crc8 * * Computes a crc8 over the input data using the polynomial: * * x^8 + x^7 +x^6 + x^4 + x^2 + 1 * * The caller provides the initial value (either CRC8_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC8_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const uint8 crc8_table[256] = { 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F }; #define CRC_INNER_LOOP(n, c, x) \ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] uint8 BCMROMFN(hndcrc8)( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint8 crc /* either CRC8_INIT_VALUE or previous return value */ ) { /* hard code the crc loop instead of using CRC_INNER_LOOP macro * to avoid the undefined and unnecessary (uint8 >> 8) operation. */ while (nbytes-- > 0) crc = crc8_table[(crc ^ *pdata++) & 0xff]; return crc; } /******************************************************************************* * crc16 * * Computes a crc16 over the input data using the polynomial: * * x^16 + x^12 +x^5 + 1 * * The caller provides the initial value (either CRC16_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC16_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const uint16 crc16_table[256] = { 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 }; uint16 BCMROMFN(hndcrc16)( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint16 crc /* either CRC16_INIT_VALUE or previous return value */ ) { while (nbytes-- > 0) CRC_INNER_LOOP(16, crc, *pdata++); return crc; } static const uint32 crc32_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; /* * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if * accumulating over multiple pieces. */ uint32 BCMROMFN(hndcrc32)(uint8 *pdata, uint nbytes, uint32 crc) { uint8 *pend; #ifdef __mips__ uint8 tmp[4]; ulong *tptr = (ulong *)tmp; /* in case the beginning of the buffer isn't aligned */ pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc); nbytes -= (pend - pdata); while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); /* handle bulk of data as 32-bit words */ pend = pdata + (nbytes & 0xfffffffc); while (pdata < pend) { *tptr = *(ulong *)pdata; pdata += sizeof(ulong *); CRC_INNER_LOOP(32, crc, tmp[0]); CRC_INNER_LOOP(32, crc, tmp[1]); CRC_INNER_LOOP(32, crc, tmp[2]); CRC_INNER_LOOP(32, crc, tmp[3]); } /* 1-3 bytes at end of buffer */ pend = pdata + (nbytes & 0x03); while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); #else pend = pdata + nbytes; while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); #endif /* __mips__ */ return crc; } #ifdef notdef #define CLEN 1499 /* CRC Length */ #define CBUFSIZ (CLEN+4) #define CNBUFS 5 /* # of bufs */ void testcrc32(void) { uint j, k, l; uint8 *buf; uint len[CNBUFS]; uint32 crcr; uint32 crc32tv[CNBUFS] = {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); /* step through all possible alignments */ for (l = 0; l <= 4; l++) { for (j = 0; j < CNBUFS; j++) { len[j] = CLEN; for (k = 0; k < len[j]; k++) *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; } for (j = 0; j < CNBUFS; j++) { crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); ASSERT(crcr == crc32tv[j]); } } MFREE(buf, CBUFSIZ*CNBUFS); return; } #endif /* notdef */ /* * Advance from the current 1-byte tag/1-byte length/variable-length value * triple, to the next, returning a pointer to the next. * If the current or next TLV is invalid (does not fit in given buffer length), * NULL is returned. * *buflen is not modified if the TLV elt parameter is invalid, or is decremented * by the TLV parameter's length if it is valid. */ bcm_tlv_t * BCMROMFN(bcm_next_tlv)(bcm_tlv_t *elt, int *buflen) { int len; /* validate current elt */ if (!bcm_valid_tlv(elt, *buflen)) return NULL; /* advance to next elt */ len = elt->len; elt = (bcm_tlv_t*)(elt->data + len); *buflen -= (2 + len); /* validate next elt */ if (!bcm_valid_tlv(elt, *buflen)) return NULL; return elt; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ bcm_tlv_t * BCMROMFN(bcm_parse_tlvs)(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (len + 2))) return (elt); elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); totlen -= (len + 2); } return NULL; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag. Stop parsing when we see an element whose ID is greater * than the target key. */ bcm_tlv_t * BCMROMFN(bcm_parse_ordered_tlvs)(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { uint id = elt->id; int len = elt->len; /* Punt if we start seeing IDs > than target key */ if (id > key) return (NULL); /* validate remaining totlen */ if ((id == key) && (totlen >= (len + 2))) return (elt); elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); totlen -= (len + 2); } return NULL; } #if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ defined(DHD_DEBUG) int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) { int i; char* p = buf; char hexstr[16]; int slen = 0, nlen = 0; uint32 bit; const char* name; if (len < 2 || !buf) return 0; buf[0] = '\0'; for (i = 0; flags != 0; i++) { bit = bd[i].bit; name = bd[i].name; if (bit == 0 && flags != 0) { /* print any unnamed bits */ snprintf(hexstr, 16, "0x%X", flags); name = hexstr; flags = 0; /* exit loop */ } else if ((flags & bit) == 0) continue; flags &= ~bit; nlen = strlen(name); slen += nlen; /* count btwn flag space */ if (flags != 0) slen += 1; /* need NULL char as well */ if (len <= slen) break; /* copy NULL char but don't count it */ strncpy(p, name, nlen + 1); p += nlen; /* copy btwn flag space and NULL char */ if (flags != 0) p += snprintf(p, 2, " "); len -= slen; } /* indicate the str was too short */ if (flags != 0) { if (len < 2) p -= 2 - len; /* overwrite last char */ p += snprintf(p, 2, ">"); } return (int)(p - buf); } #endif #if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ defined(DHD_DEBUG) || defined(WLMEDIA_PEAKRATE) /* print bytes formatted as hex to a string. return the resulting string length */ int bcm_format_hex(char *str, const void *bytes, int len) { int i; char *p = str; const uint8 *src = (const uint8*)bytes; for (i = 0; i < len; i++) { p += snprintf(p, 3, "%02X", *src); src++; } return (int)(p - str); } #endif /* pretty hex print a contiguous buffer */ void prhex(const char *msg, uchar *buf, uint nbytes) { char line[128], *p; int len = sizeof(line); int nchar; uint i; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); p = line; for (i = 0; i < nbytes; i++) { if (i % 16 == 0) { nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ p += nchar; len -= nchar; } if (len > 0) { nchar = snprintf(p, len, "%02x ", buf[i]); p += nchar; len -= nchar; } if (i % 16 == 15) { printf("%s\n", line); /* flush line */ p = line; len = sizeof(line); } } /* flush last partial line */ if (p != line) printf("%s\n", line); } static const char *crypto_algo_names[] = { "NONE", "WEP1", "TKIP", "WEP128", "AES_CCM", "AES_OCB_MSDU", "AES_OCB_MPDU", "NALG" "UNDEF", "UNDEF", "UNDEF", #ifdef BCMWAPI_WPI "WAPI", #endif /* BCMWAPI_WPI */ "UNDEF" }; const char * bcm_crypto_algo_name(uint algo) { return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; } char * bcm_chipname(uint chipid, char *buf, uint len) { const char *fmt; fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; snprintf(buf, len, fmt, chipid); return buf; } /* Produce a human-readable string for boardrev */ char * bcm_brev_str(uint32 brev, char *buf) { if (brev < 0x100) snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); else snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); return (buf); } #define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ /* dump large strings to console */ void printbig(char *buf) { uint len, max_len; char c; len = strlen(buf); max_len = BUFSIZE_TODUMP_ATONCE; while (len > max_len) { c = buf[max_len]; buf[max_len] = '\0'; printf("%s", buf); buf[max_len] = c; buf += max_len; len -= max_len; } /* print the remaining string */ printf("%s\n", buf); return; } /* routine to dump fields in a fileddesc structure */ uint bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, char *buf, uint32 bufsize) { uint filled_len; int len; struct fielddesc *cur_ptr; filled_len = 0; cur_ptr = fielddesc_array; while (bufsize > 1) { if (cur_ptr->nameandfmt == NULL) break; len = snprintf(buf, bufsize, cur_ptr->nameandfmt, read_rtn(arg0, arg1, cur_ptr->offset)); /* check for snprintf overflow or error */ if (len < 0 || (uint32)len >= bufsize) len = bufsize - 1; buf += len; bufsize -= len; filled_len += len; cur_ptr++; } return filled_len; } uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) { uint len; len = strlen(name) + 1; if ((len + datalen) > buflen) return 0; strncpy(buf, name, buflen); /* append data onto the end of the name string */ memcpy(&buf[len], data, datalen); len += datalen; return len; } /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a uint16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; uint16 BCMROMFN(bcm_qdbm_to_mw)(uint8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) { /* clamp to max uint16 mW value */ return 0xFFFF; } /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return ((nqdBm_to_mW_map[idx] + factor/2) / factor); } uint8 BCMROMFN(bcm_mw_to_qdbm)(uint16 mw) { uint8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - nqdBm_to_mW_map[qdbm])/2; if (mw_uint < boundary) break; } qdbm += (uint8)offset; return (qdbm); } uint BCMROMFN(bcm_bitcount)(uint8 *bitmap, uint length) { uint bitcount = 0, i; uint8 tmp; for (i = 0; i < length; i++) { tmp = bitmap[i]; while (tmp) { bitcount++; tmp &= (tmp - 1); } } return bitcount; } #ifdef BCMDRIVER /* Initialization of bcmstrbuf structure */ void bcm_binit(struct bcmstrbuf *b, char *buf, uint size) { b->origsize = b->size = size; b->origbuf = b->buf = buf; } /* Buffer sprintf wrapper to guard against buffer overflow */ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vsnprintf(b->buf, b->size, fmt, ap); /* Non Ansi C99 compliant returns -1, * Ansi compliant return r >= b->size, * bcmstdlib returns 0, handle all */ if ((r == -1) || (r >= (int)b->size) || (r == 0)) { b->size = 0; } else { b->size -= r; b->buf += r; } va_end(ap); return r; } void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) { int i; for (i = 0; i < num_bytes; i++) { num[i] += amount; if (num[i] >= amount) break; amount = 1; } } int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes) { int i; for (i = nbytes - 1; i >= 0; i--) { if (arg1[i] != arg2[i]) return (arg1[i] - arg2[i]); } return 0; } void bcm_print_bytes(char *name, const uchar *data, int len) { int i; int per_line = 0; printf("%s: %d \n", name ? name : "", len); for (i = 0; i < len; i++) { printf("%02x ", *data++); per_line++; if (per_line == 16) { per_line = 0; printf("\n"); } } printf("\n"); } #if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) #define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) { uint i, c; char *p = buf; char *endp = buf + SSID_FMT_BUF_LEN; if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; for (i = 0; i < ssid_len; i++) { c = (uint)ssid[i]; if (c == '\\') { *p++ = '\\'; *p++ = '\\'; } else if (bcm_isprint((uchar)c)) { *p++ = (char)c; } else { p += snprintf(p, (endp - p), "\\x%02X", c); } } *p = '\0'; ASSERT(p < endp); return (int)(p - buf); } #endif #endif /* BCMDRIVER */ /* * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL. * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0 * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. */ unsigned int process_nvram_vars(char *varbuf, unsigned int len) { char *dp; bool findNewline; int column; unsigned int buf_len, n; unsigned int pad = 0; dp = varbuf; findNewline = FALSE; column = 0; for (n = 0; n < len; n++) { if (varbuf[n] == '\r') continue; if (findNewline && varbuf[n] != '\n') continue; findNewline = FALSE; if (varbuf[n] == '#') { findNewline = TRUE; continue; } if (varbuf[n] == '\n') { if (column == 0) continue; *dp++ = 0; column = 0; continue; } *dp++ = varbuf[n]; column++; } buf_len = (unsigned int)(dp - varbuf); if (buf_len % 4) { pad = 4 - buf_len % 4; if (pad && (buf_len + pad <= len)) { buf_len += pad; } } while (dp < varbuf + n) *dp++ = 0; return buf_len; }
hroark13/n861_two_n860
drivers/net/wireless/bcmdhd/bcmutils.c
C
gpl-2.0
45,582
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/iopoll.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include "mdss_fb.h" #include "mdss_mdp.h" #include "mdss_timeout.h" /* wait for at least 2 vsyncs for lowest refresh rate (24hz) */ #define VSYNC_TIMEOUT_US 100000 #define MDP_INTR_MASK_INTF_VSYNC(intf_num) \ (1 << (2 * (intf_num - MDSS_MDP_INTF0) + MDSS_MDP_IRQ_INTF_VSYNC)) /* intf timing settings */ struct intf_timing_params { u32 width; u32 height; u32 xres; u32 yres; u32 h_back_porch; u32 h_front_porch; u32 v_back_porch; u32 v_front_porch; u32 hsync_pulse_width; u32 vsync_pulse_width; u32 border_clr; u32 underflow_clr; u32 hsync_skew; }; struct mdss_mdp_video_ctx { u32 intf_num; char __iomem *base; u32 intf_type; u8 ref_cnt; u8 timegen_en; bool polling_en; u32 poll_cnt; struct completion vsync_comp; int wait_pending; atomic_t vsync_ref; spinlock_t vsync_lock; struct list_head vsync_handlers; }; static inline void mdp_video_write(struct mdss_mdp_video_ctx *ctx, u32 reg, u32 val) { writel_relaxed(val, ctx->base + reg); } static inline u32 mdp_video_read(struct mdss_mdp_video_ctx *ctx, u32 reg) { return readl_relaxed(ctx->base + reg); } static inline u32 mdss_mdp_video_line_count(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_video_ctx *ctx = ctl->priv_data; u32 line_cnt = 0; mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); line_cnt = mdp_video_read(ctx, MDSS_MDP_REG_INTF_LINE_COUNT); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); return line_cnt; } int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata, u32 *offsets, u32 count) { struct mdss_mdp_video_ctx *head; u32 i; head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_video_ctx) * count, GFP_KERNEL); if (!head) return -ENOMEM; for (i = 0; i < count; i++) { head[i].base = mdata->mdp_base + offsets[i]; pr_debug("adding Video Intf #%d offset=0x%x virt=%p\n", i, offsets[i], head[i].base); head[i].ref_cnt = 0; head[i].intf_num = i + MDSS_MDP_INTF0; INIT_LIST_HEAD(&head[i].vsync_handlers); } mdata->video_intf = head; mdata->nintf = count; return 0; } static int mdss_mdp_video_timegen_setup(struct mdss_mdp_video_ctx *ctx, struct intf_timing_params *p) { u32 hsync_period, vsync_period; u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end; u32 active_h_start, active_h_end, active_v_start, active_v_end; u32 den_polarity, hsync_polarity, vsync_polarity; u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl; hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width + p->h_front_porch; vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height + p->v_front_porch; display_v_start = ((p->vsync_pulse_width + p->v_back_porch) * hsync_period) + p->hsync_skew; display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) + p->hsync_skew - 1; if (ctx->intf_type == MDSS_INTF_EDP) { display_v_start += p->hsync_pulse_width + p->h_back_porch; display_v_end -= p->h_front_porch; } hsync_start_x = p->h_back_porch + p->hsync_pulse_width; hsync_end_x = hsync_period - p->h_front_porch - 1; if (p->width != p->xres) { active_h_start = hsync_start_x; active_h_end = active_h_start + p->xres - 1; } else { active_h_start = 0; active_h_end = 0; } if (p->height != p->yres) { active_v_start = display_v_start; active_v_end = active_v_start + (p->yres * hsync_period) - 1; } else { active_v_start = 0; active_v_end = 0; } if (active_h_end) { active_hctl = (active_h_end << 16) | active_h_start; active_hctl |= BIT(31); /* ACTIVE_H_ENABLE */ } else { active_hctl = 0; } if (active_v_end) active_v_start |= BIT(31); /* ACTIVE_V_ENABLE */ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width; display_hctl = (hsync_end_x << 16) | hsync_start_x; den_polarity = 0; if (MDSS_INTF_HDMI == ctx->intf_type) { hsync_polarity = p->yres >= 720 ? 0 : 1; vsync_polarity = p->yres >= 720 ? 0 : 1; } else { hsync_polarity = 0; vsync_polarity = 0; } polarity_ctl = (den_polarity << 2) | /* DEN Polarity */ (vsync_polarity << 1) | /* VSYNC Polarity */ (hsync_polarity << 0); /* HSYNC Polarity */ mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl); mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0, p->vsync_pulse_width * hsync_period); mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl); mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0, display_v_start); mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end); mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl); mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_START_F0, active_v_start); mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_END_F0, active_v_end); mdp_video_write(ctx, MDSS_MDP_REG_INTF_BORDER_COLOR, p->border_clr); mdp_video_write(ctx, MDSS_MDP_REG_INTF_UNDERFLOW_COLOR, p->underflow_clr); mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_SKEW, p->hsync_skew); mdp_video_write(ctx, MDSS_MDP_REG_INTF_POLARITY_CTL, polarity_ctl); mdp_video_write(ctx, MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN, 0x3); return 0; } static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear) { struct mdss_mdp_video_ctx *ctx = ctl->priv_data; if (atomic_inc_return(&ctx->vsync_ref) == 1) mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num); else if (clear) mdss_mdp_irq_clear(ctl->mdata, MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num); } static inline void video_vsync_irq_disable(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_video_ctx *ctx = ctl->priv_data; if (atomic_dec_return(&ctx->vsync_ref) == 0) mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num); } static int mdss_mdp_video_add_vsync_handler(struct mdss_mdp_ctl *ctl, struct mdss_mdp_vsync_handler *handle) { struct mdss_mdp_video_ctx *ctx; unsigned long flags; int ret = 0; bool irq_en = false; if (!handle || !(handle->vsync_handler)) { ret = -EINVAL; goto exit; } ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data; if (!ctx) { pr_err("invalid ctx for ctl=%d\n", ctl->num); ret = -ENODEV; goto exit; } spin_lock_irqsave(&ctx->vsync_lock, flags); if (!handle->enabled) { handle->enabled = true; list_add(&handle->list, &ctx->vsync_handlers); irq_en = true; } spin_unlock_irqrestore(&ctx->vsync_lock, flags); if (irq_en) video_vsync_irq_enable(ctl, false); exit: return ret; } static int mdss_mdp_video_remove_vsync_handler(struct mdss_mdp_ctl *ctl, struct mdss_mdp_vsync_handler *handle) { struct mdss_mdp_video_ctx *ctx; unsigned long flags; bool irq_dis = false; ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data; if (!ctx) { pr_err("invalid ctx for ctl=%d\n", ctl->num); return -ENODEV; } spin_lock_irqsave(&ctx->vsync_lock, flags); if (handle->enabled) { handle->enabled = false; list_del_init(&handle->list); irq_dis = true; } spin_unlock_irqrestore(&ctx->vsync_lock, flags); if (irq_dis) video_vsync_irq_disable(ctl); return 0; } static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_video_ctx *ctx; struct mdss_mdp_vsync_handler *tmp, *handle; int rc; pr_debug("stop ctl=%d\n", ctl->num); ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data; if (!ctx) { pr_err("invalid ctx for ctl=%d\n", ctl->num); return -ENODEV; } if (ctx->timegen_en) { rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL); if (rc == -EBUSY) { pr_debug("intf #%d busy don't turn off\n", ctl->intf_num); return rc; } WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc); mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); ctx->timegen_en = false; rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL); WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc); mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num); } list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list) mdss_mdp_video_remove_vsync_handler(ctl, handle); mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num, NULL, NULL); mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num, NULL, NULL); ctx->ref_cnt--; ctl->priv_data = NULL; return 0; } static void mdss_mdp_video_vsync_intr_done(void *arg) { struct mdss_mdp_ctl *ctl = arg; struct mdss_mdp_video_ctx *ctx = ctl->priv_data; struct mdss_mdp_vsync_handler *tmp; ktime_t vsync_time; if (!ctx) { pr_err("invalid ctx\n"); return; } vsync_time = ktime_get(); ctl->vsync_cnt++; pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d\n", ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time)); ctx->polling_en = false; complete_all(&ctx->vsync_comp); spin_lock(&ctx->vsync_lock); list_for_each_entry(tmp, &ctx->vsync_handlers, list) { tmp->vsync_handler(ctl, vsync_time); } spin_unlock(&ctx->vsync_lock); } static int mdss_mdp_video_pollwait(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_video_ctx *ctx = ctl->priv_data; u32 mask, status; int rc; mask = MDP_INTR_MASK_INTF_VSYNC(ctl->intf_num); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); rc = readl_poll_timeout(ctl->mdata->mdp_base + MDSS_MDP_REG_INTR_STATUS, status, (status & mask) || try_wait_for_completion(&ctx->vsync_comp), 1000, VSYNC_TIMEOUT_US); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); if (rc == 0) { pr_debug("vsync poll successful! rc=%d status=0x%x\n", rc, status); ctx->poll_cnt++; if (status) { struct mdss_mdp_vsync_handler *tmp; unsigned long flags; ktime_t vsync_time = ktime_get(); spin_lock_irqsave(&ctx->vsync_lock, flags); list_for_each_entry(tmp, &ctx->vsync_handlers, list) tmp->vsync_handler(ctl, vsync_time); spin_unlock_irqrestore(&ctx->vsync_lock, flags); } } else { pr_warn("vsync poll timed out! rc=%d status=0x%x mask=0x%x\n", rc, status, mask); } return rc; } static int mdss_mdp_video_wait4comp(struct mdss_mdp_ctl *ctl, void *arg) { struct mdss_mdp_video_ctx *ctx; int rc; static int timeout_occurred; u32 prev_vsync_cnt; ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data; if (!ctx) { pr_err("invalid ctx\n"); return -ENODEV; } WARN(!ctx->wait_pending, "waiting without commit! ctl=%d", ctl->num); if (ctx->polling_en) { rc = mdss_mdp_video_pollwait(ctl); } else { prev_vsync_cnt = ctl->vsync_cnt; rc = wait_for_completion_timeout(&ctx->vsync_comp, usecs_to_jiffies(VSYNC_TIMEOUT_US)); if (rc == 0) { pr_err("%s: TIMEOUT (vsync_cnt: prev: %u cur: %u)\n", __func__, prev_vsync_cnt, ctl->vsync_cnt); timeout_occurred = 1; mdss_timeout_dump(ctl->mfd, __func__); pr_warn("vsync wait timeout %d, fallback to poll mode\n", ctl->num); ctx->polling_en++; rc = mdss_mdp_video_pollwait(ctl); } else { if (timeout_occurred) pr_info("%s: recovered from previous timeout\n", __func__); timeout_occurred = 0; rc = 0; } } if (ctx->wait_pending) { ctx->wait_pending = 0; video_vsync_irq_disable(ctl); } return rc; } static void mdss_mdp_video_underrun_intr_done(void *arg) { struct mdss_mdp_ctl *ctl = arg; if (unlikely(!ctl)) return; ctl->underrun_cnt++; pr_debug("display underrun detected for ctl=%d count=%d\n", ctl->num, ctl->underrun_cnt); } static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg) { struct mdss_mdp_video_ctx *ctx; int rc; pr_debug("kickoff ctl=%d\n", ctl->num); ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data; if (!ctx) { pr_err("invalid ctx\n"); return -ENODEV; } if (!ctx->wait_pending) { ctx->wait_pending++; video_vsync_irq_enable(ctl, true); INIT_COMPLETION(ctx->vsync_comp); } else { WARN(1, "commit without wait! ctl=%d", ctl->num); } if (!ctx->timegen_en) { rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL); if (rc) { pr_warn("intf #%d unblank error (%d)\n", ctl->intf_num, rc); video_vsync_irq_disable(ctl); ctx->wait_pending = 0; return rc; } pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num); mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1); wmb(); rc = wait_for_completion_timeout(&ctx->vsync_comp, usecs_to_jiffies(VSYNC_TIMEOUT_US)); WARN(rc == 0, "timeout (%d) enabling timegen on ctl=%d\n", rc, ctl->num); ctx->timegen_en = true; rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL); WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc); } return 0; } void mdss_mdp_video_lock_panel(struct mdss_mdp_ctl *ctl) { mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_LOCK_PANEL_MUTEX, NULL); } void mdss_mdp_video_unlock_panel(struct mdss_mdp_ctl *ctl) { mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNLOCK_PANEL_MUTEX, NULL); } int mdss_mdp_video_copy_splash_screen(struct mdss_panel_data *pdata) { void *virt = NULL; unsigned long bl_fb_addr = 0; unsigned long *bl_fb_addr_va; unsigned long pipe_addr, pipe_src_size; u32 height, width, rgb_size, bpp; size_t size; static struct ion_handle *ihdl; struct ion_client *iclient = mdss_get_ionclient(); static ion_phys_addr_t phys; pipe_addr = MDSS_MDP_REG_SSPP_OFFSET(3) + MDSS_MDP_REG_SSPP_SRC0_ADDR; pipe_src_size = MDSS_MDP_REG_SSPP_OFFSET(3) + MDSS_MDP_REG_SSPP_SRC_SIZE; bpp = 3; rgb_size = MDSS_MDP_REG_READ(pipe_src_size); bl_fb_addr = MDSS_MDP_REG_READ(pipe_addr); height = (rgb_size >> 16) & 0xffff; width = rgb_size & 0xffff; size = PAGE_ALIGN(height * width * bpp); pr_debug("%s:%d splash_height=%d splash_width=%d Buffer size=%d\n", __func__, __LINE__, height, width, size); ihdl = ion_alloc(iclient, size, SZ_1M, ION_HEAP(ION_QSECOM_HEAP_ID), 0); if (IS_ERR_OR_NULL(ihdl)) { pr_err("unable to alloc fbmem from ion (%p)\n", ihdl); return -ENOMEM; } pdata->panel_info.splash_ihdl = ihdl; virt = ion_map_kernel(iclient, ihdl); ion_phys(iclient, ihdl, &phys, &size); pr_debug("%s %d Allocating %u bytes at 0x%lx (%pa phys)\n", __func__, __LINE__, size, (unsigned long int)virt, &phys); bl_fb_addr_va = (unsigned long *)ioremap(bl_fb_addr, size); memcpy(virt, bl_fb_addr_va, size); iounmap(bl_fb_addr_va); MDSS_MDP_REG_WRITE(pipe_addr, phys); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_CTL_FLUSH + MDSS_MDP_REG_CTL_OFFSET(0), 0x48); return 0; } int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl) { struct ion_client *iclient = mdss_get_ionclient(); struct mdss_panel_data *pdata; int ret = 0, off; int mdss_mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION); int mdss_v2_intf_off = 0; off = 0; pdata = ctl->panel_data; pdata->panel_info.cont_splash_enabled = 0; ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN, NULL); if (ret) { pr_err("%s: Failed to handle 'CONT_SPLASH_BEGIN' event\n", __func__); return ret; } mdss_mdp_ctl_write(ctl, 0, MDSS_MDP_LM_BORDER_COLOR); off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num); if (mdss_mdp_rev >= MDSS_MDP_HW_REV_102) mdss_v2_intf_off = 0xEC00; MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN - mdss_v2_intf_off, 0); /* wait for 1 VSYNC for the pipe to be unstaged */ msleep(20); ion_free(iclient, pdata->panel_info.splash_ihdl); ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_FINISH, NULL); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); return ret; } void mdss_mdp_video_dump_ctx(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_video_ctx *ctx = ctl->priv_data; MDSS_TIMEOUT_LOG("timegen_en=%u\n", ctx->timegen_en); MDSS_TIMEOUT_LOG("polling_en=%u\n", ctx->polling_en); MDSS_TIMEOUT_LOG("poll_cnt=%u\n", ctx->poll_cnt); MDSS_TIMEOUT_LOG("wait_pending=%d\n", ctx->wait_pending); } int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl) { struct mdss_data_type *mdata; struct mdss_panel_info *pinfo; struct mdss_mdp_video_ctx *ctx; struct mdss_mdp_mixer *mixer; struct intf_timing_params itp = {0}; u32 dst_bpp; int i; mdata = ctl->mdata; pinfo = &ctl->panel_data->panel_info; mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT); if (!mixer) { pr_err("mixer not setup correctly\n"); return -ENODEV; } i = ctl->intf_num - MDSS_MDP_INTF0; if (i < mdata->nintf) { ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + i; if (ctx->ref_cnt) { pr_err("Intf %d already in use\n", ctl->intf_num); return -EBUSY; } pr_debug("video Intf #%d base=%p", ctx->intf_num, ctx->base); ctx->ref_cnt++; } else { pr_err("Invalid intf number: %d\n", ctl->intf_num); return -EINVAL; } pr_debug("start ctl=%u\n", ctl->num); ctl->priv_data = ctx; ctx->intf_type = ctl->intf_type; init_completion(&ctx->vsync_comp); spin_lock_init(&ctx->vsync_lock); atomic_set(&ctx->vsync_ref, 0); mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num, mdss_mdp_video_vsync_intr_done, ctl); mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num, mdss_mdp_video_underrun_intr_done, ctl); dst_bpp = pinfo->fbc.enabled ? (pinfo->fbc.target_bpp) : (pinfo->bpp); itp.width = mult_frac((pinfo->xres + pinfo->lcdc.xres_pad), dst_bpp, pinfo->bpp); itp.height = pinfo->yres + pinfo->lcdc.yres_pad; itp.border_clr = pinfo->lcdc.border_clr; itp.underflow_clr = pinfo->lcdc.underflow_clr; itp.hsync_skew = pinfo->lcdc.hsync_skew; itp.xres = mult_frac(pinfo->xres, dst_bpp, pinfo->bpp); itp.yres = pinfo->yres; itp.h_back_porch = mult_frac(pinfo->lcdc.h_back_porch, dst_bpp, pinfo->bpp); itp.h_front_porch = mult_frac(pinfo->lcdc.h_front_porch, dst_bpp, pinfo->bpp); itp.v_back_porch = mult_frac(pinfo->lcdc.v_back_porch, dst_bpp, pinfo->bpp); itp.v_front_porch = mult_frac(pinfo->lcdc.v_front_porch, dst_bpp, pinfo->bpp); itp.hsync_pulse_width = mult_frac(pinfo->lcdc.h_pulse_width, dst_bpp, pinfo->bpp); itp.vsync_pulse_width = pinfo->lcdc.v_pulse_width; if (mdss_mdp_video_timegen_setup(ctx, &itp)) { pr_err("unable to get timing parameters\n"); return -EINVAL; } mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format); ctl->stop_fnc = mdss_mdp_video_stop; ctl->display_fnc = mdss_mdp_video_display; ctl->wait_fnc = mdss_mdp_video_wait4comp; ctl->read_line_cnt_fnc = mdss_mdp_video_line_count; ctl->add_vsync_handler = mdss_mdp_video_add_vsync_handler; ctl->remove_vsync_handler = mdss_mdp_video_remove_vsync_handler; ctl->ctx_dump_fnc = mdss_mdp_video_dump_ctx; return 0; }
Spartonos/android_kernel_motorola_falcon_umts
drivers/video/msm/mdss/mdss_mdp_intf_video.c
C
gpl-2.0
19,252
/* * linux/sound/oss/dmasound/dmasound_awacs.c * * PowerMac `AWACS' and `Burgundy' DMA Sound Driver * with some limited support for DACA & Tumbler * * See linux/sound/oss/dmasound/dmasound_core.c for copyright and * history prior to 2001/01/26. * * 26/01/2001 ed 0.1 Iain Sandoe * - added version info. * - moved dbdma command buffer allocation to PMacXXXSqSetup() * - fixed up beep dbdma cmd buffers * * 08/02/2001 [0.2] * - make SNDCTL_DSP_GETFMTS return the correct info for the h/w * - move soft format translations to a separate file * - [0.3] make SNDCTL_DSP_GETCAPS return correct info. * - [0.4] more informative machine name strings. * - [0.5] * - record changes. * - made the default_hard/soft entries. * 04/04/2001 [0.6] * - minor correction to bit assignments in awacs_defs.h * - incorporate mixer changes from 2.2.x back-port. * - take out passthru as a rec input (it isn't). * - make Input Gain slider work the 'right way up'. * - try to make the mixer sliders more logical - so now the * input selectors are just two-state (>50% == ON) and the * Input Gain slider handles the rest of the gain issues. * - try to pick slider representations that most closely match * the actual use - e.g. IGain for input gain... * - first stab at over/under-run detection. * - minor cosmetic changes to IRQ identification. * - fix bug where rates > max would be reported as supported. * - first stab at over/under-run detection. * - make use of i2c for mixer settings conditional on perch * rather than cuda (some machines without perch have cuda). * - fix bug where TX stops when dbdma status comes up "DEAD" * so far only reported on PowerComputing clones ... but. * - put in AWACS/Screamer register write timeouts. * - part way to partitioning the init() stuff * - first pass at 'tumbler' stuff (not support - just an attempt * to allow the driver to load on new G4s). * 01/02/2002 [0.7] - BenH * - all sort of minor bits went in since the latest update, I * bumped the version number for that reason * * 07/26/2002 [0.8] - BenH * - More minor bits since last changelog (I should be more careful * with those) * - Support for snapper & better tumbler integration by Toby Sargeant * - Headphone detect for scremer by Julien Blache * - More tumbler fixed by Andreas Schwab * 11/29/2003 [0.8.1] - Renzo Davoli (King Enzo) * - Support for Snapper line in * - snapper input resampling (for rates < 44100) * - software line gain control */ /* GENERAL FIXME/TODO: check that the assumptions about what is written to mac-io is valid for DACA & Tumbler. This driver is in bad need of a rewrite. The dbdma code has to be split, some proper device-tree parsing code has to be written, etc... */ #include <linux/types.h> #include <linux/module.h> #include <linux/config.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/soundcard.h> #include <linux/adb.h> #include <linux/nvram.h> #include <linux/tty.h> #include <linux/vt_kern.h> #include <linux/spinlock.h> #include <linux/kmod.h> #include <linux/interrupt.h> #include <linux/input.h> #include <asm/semaphore.h> #ifdef CONFIG_ADB_CUDA #include <linux/cuda.h> #endif #ifdef CONFIG_ADB_PMU #include <linux/pmu.h> #endif #include <linux/i2c-dev.h> #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/dbdma.h> #include <asm/pmac_feature.h> #include <asm/irq.h> #include <asm/nvram.h> #include "awacs_defs.h" #include "dmasound.h" #include "tas3001c.h" #include "tas3004.h" #include "tas_common.h" #define DMASOUND_AWACS_REVISION 0 #define DMASOUND_AWACS_EDITION 7 #define AWACS_SNAPPER 110 /* fake revision # for snapper */ #define AWACS_BURGUNDY 100 /* fake revision # for burgundy */ #define AWACS_TUMBLER 90 /* fake revision # for tumbler */ #define AWACS_DACA 80 /* fake revision # for daca (ibook) */ #define AWACS_AWACS 2 /* holding revision for AWACS */ #define AWACS_SCREAMER 3 /* holding revision for Screamer */ /* * Interrupt numbers and addresses, & info obtained from the device tree. */ static int awacs_irq, awacs_tx_irq, awacs_rx_irq; static volatile struct awacs_regs __iomem *awacs; static volatile u32 __iomem *i2s; static volatile struct dbdma_regs __iomem *awacs_txdma, *awacs_rxdma; static int awacs_rate_index; static int awacs_subframe; static struct device_node* awacs_node; static struct device_node* i2s_node; static char awacs_name[64]; static int awacs_revision; static int awacs_sleeping; static DECLARE_MUTEX(dmasound_sem); static int sound_device_id; /* exists after iMac revA */ static int hw_can_byteswap = 1 ; /* most pmac sound h/w can */ /* model info */ /* To be replaced with better interaction with pmac_feature.c */ static int is_pbook_3X00; static int is_pbook_g3; /* expansion info */ static int has_perch; static int has_ziva; /* for earlier powerbooks which need fiddling with mac-io to enable * cd etc. */ static unsigned char __iomem *latch_base; static unsigned char __iomem *macio_base; /* * Space for the DBDMA command blocks. */ static void *awacs_tx_cmd_space; static volatile struct dbdma_cmd *awacs_tx_cmds; static int number_of_tx_cmd_buffers; static void *awacs_rx_cmd_space; static volatile struct dbdma_cmd *awacs_rx_cmds; static int number_of_rx_cmd_buffers; /* * Cached values of AWACS registers (we can't read them). * Except on the burgundy (and screamer). XXX */ int awacs_reg[8]; int awacs_reg1_save; /* tracking values for the mixer contents */ static int spk_vol; static int line_vol; static int passthru_vol; static int ip_gain; /* mic preamp settings */ static int rec_lev = 0x4545 ; /* default CD gain 69 % */ static int mic_lev; static int cd_lev = 0x6363 ; /* 99 % */ static int line_lev; static int hdp_connected; /* * Stuff for outputting a beep. The values range from -327 to +327 * so we can multiply by an amplitude in the range 0..100 to get a * signed short value to put in the output buffer. */ static short beep_wform[256] = { 0, 40, 79, 117, 153, 187, 218, 245, 269, 288, 304, 316, 323, 327, 327, 324, 318, 310, 299, 288, 275, 262, 249, 236, 224, 213, 204, 196, 190, 186, 183, 182, 182, 183, 186, 189, 192, 196, 200, 203, 206, 208, 209, 209, 209, 207, 204, 201, 197, 193, 188, 183, 179, 174, 170, 166, 163, 161, 160, 159, 159, 160, 161, 162, 164, 166, 168, 169, 171, 171, 171, 170, 169, 167, 163, 159, 155, 150, 144, 139, 133, 128, 122, 117, 113, 110, 107, 105, 103, 103, 103, 103, 104, 104, 105, 105, 105, 103, 101, 97, 92, 86, 78, 68, 58, 45, 32, 18, 3, -11, -26, -41, -55, -68, -79, -88, -95, -100, -102, -102, -99, -93, -85, -75, -62, -48, -33, -16, 0, 16, 33, 48, 62, 75, 85, 93, 99, 102, 102, 100, 95, 88, 79, 68, 55, 41, 26, 11, -3, -18, -32, -45, -58, -68, -78, -86, -92, -97, -101, -103, -105, -105, -105, -104, -104, -103, -103, -103, -103, -105, -107, -110, -113, -117, -122, -128, -133, -139, -144, -150, -155, -159, -163, -167, -169, -170, -171, -171, -171, -169, -168, -166, -164, -162, -161, -160, -159, -159, -160, -161, -163, -166, -170, -174, -179, -183, -188, -193, -197, -201, -204, -207, -209, -209, -209, -208, -206, -203, -200, -196, -192, -189, -186, -183, -182, -182, -183, -186, -190, -196, -204, -213, -224, -236, -249, -262, -275, -288, -299, -310, -318, -324, -327, -327, -323, -316, -304, -288, -269, -245, -218, -187, -153, -117, -79, -40, }; /* beep support */ #define BEEP_SRATE 22050 /* 22050 Hz sample rate */ #define BEEP_BUFLEN 512 #define BEEP_VOLUME 15 /* 0 - 100 */ static int beep_vol = BEEP_VOLUME; static int beep_playing; static int awacs_beep_state; static short *beep_buf; static void *beep_dbdma_cmd_space; static volatile struct dbdma_cmd *beep_dbdma_cmd; /* Burgundy functions */ static void awacs_burgundy_wcw(unsigned addr,unsigned newval); static unsigned awacs_burgundy_rcw(unsigned addr); static void awacs_burgundy_write_volume(unsigned address, int volume); static int awacs_burgundy_read_volume(unsigned address); static void awacs_burgundy_write_mvolume(unsigned address, int volume); static int awacs_burgundy_read_mvolume(unsigned address); /* we will allocate a single 'emergency' dbdma cmd block to use if the tx status comes up "DEAD". This happens on some PowerComputing Pmac clones, either owing to a bug in dbdma or some interaction between IDE and sound. However, this measure would deal with DEAD status if if appeared elsewhere. for the sake of memory efficiency we'll allocate this cmd as part of the beep cmd stuff. */ static volatile struct dbdma_cmd *emergency_dbdma_cmd; #ifdef CONFIG_PM /* * Stuff for restoring after a sleep. */ static int awacs_sleep_notify(struct pmu_sleep_notifier *self, int when); struct pmu_sleep_notifier awacs_sleep_notifier = { awacs_sleep_notify, SLEEP_LEVEL_SOUND, }; #endif /* CONFIG_PM */ /* for (soft) sample rate translations */ int expand_bal; /* Balance factor for expanding (not volume!) */ int expand_read_bal; /* Balance factor for expanding reads (not volume!) */ /*** Low level stuff *********************************************************/ static void *PMacAlloc(unsigned int size, int flags); static void PMacFree(void *ptr, unsigned int size); static int PMacIrqInit(void); #ifdef MODULE static void PMacIrqCleanup(void); #endif static void PMacSilence(void); static void PMacInit(void); static int PMacSetFormat(int format); static int PMacSetVolume(int volume); static void PMacPlay(void); static void PMacRecord(void); static irqreturn_t pmac_awacs_tx_intr(int irq, void *devid, struct pt_regs *regs); static irqreturn_t pmac_awacs_rx_intr(int irq, void *devid, struct pt_regs *regs); static irqreturn_t pmac_awacs_intr(int irq, void *devid, struct pt_regs *regs); static void awacs_write(int val); static int awacs_get_volume(int reg, int lshift); static int awacs_volume_setter(int volume, int n, int mute, int lshift); /*** Mid level stuff **********************************************************/ static int PMacMixerIoctl(u_int cmd, u_long arg); static int PMacWriteSqSetup(void); static int PMacReadSqSetup(void); static void PMacAbortRead(void); extern TRANS transAwacsNormal ; extern TRANS transAwacsExpand ; extern TRANS transAwacsNormalRead ; extern TRANS transAwacsExpandRead ; extern int daca_init(void); extern void daca_cleanup(void); extern int daca_set_volume(uint left_vol, uint right_vol); extern void daca_get_volume(uint * left_vol, uint *right_vol); extern int daca_enter_sleep(void); extern int daca_leave_sleep(void); #define TRY_LOCK() \ if ((rc = down_interruptible(&dmasound_sem)) != 0) \ return rc; #define LOCK() down(&dmasound_sem); #define UNLOCK() up(&dmasound_sem); /* We use different versions that the ones provided in dmasound.h * * FIXME: Use different names ;) */ #undef IOCTL_IN #undef IOCTL_OUT #define IOCTL_IN(arg, ret) \ rc = get_user(ret, (int __user *)(arg)); \ if (rc) break; #define IOCTL_OUT(arg, ret) \ ioctl_return2((int __user *)(arg), ret) static inline int ioctl_return2(int __user *addr, int value) { return value < 0 ? value : put_user(value, addr); } /*** AE - TUMBLER / SNAPPER START ************************************************/ int gpio_audio_reset, gpio_audio_reset_pol; int gpio_amp_mute, gpio_amp_mute_pol; int gpio_headphone_mute, gpio_headphone_mute_pol; int gpio_headphone_detect, gpio_headphone_detect_pol; int gpio_headphone_irq; int setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int* gpio_pol) { struct device_node *np; u32* pp; np = find_devices("gpio"); if (!np) return -ENODEV; np = np->child; while(np != 0) { if (name) { char *property = get_property(np,"audio-gpio",NULL); if (property != 0 && strcmp(property,name) == 0) break; } else if (compatible && device_is_compatible(np, compatible)) break; np = np->sibling; } if (!np) return -ENODEV; pp = (u32 *)get_property(np, "AAPL,address", NULL); if (!pp) return -ENODEV; *gpio_addr = (*pp) & 0x0000ffff; pp = (u32 *)get_property(np, "audio-gpio-active-state", NULL); if (pp) *gpio_pol = *pp; else *gpio_pol = 1; if (np->n_intrs > 0) return np->intrs[0].line; return 0; } static inline void write_audio_gpio(int gpio_addr, int data) { if (!gpio_addr) return; pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio_addr, data ? 0x05 : 0x04); } static inline int read_audio_gpio(int gpio_addr) { if (!gpio_addr) return 0; return ((pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio_addr, 0) & 0x02) !=0); } /* * Headphone interrupt via GPIO (Tumbler, Snapper, DACA) */ static irqreturn_t headphone_intr(int irq, void *devid, struct pt_regs *regs) { unsigned long flags; spin_lock_irqsave(&dmasound.lock, flags); if (read_audio_gpio(gpio_headphone_detect) == gpio_headphone_detect_pol) { printk(KERN_INFO "Audio jack plugged, muting speakers.\n"); write_audio_gpio(gpio_headphone_mute, !gpio_headphone_mute_pol); write_audio_gpio(gpio_amp_mute, gpio_amp_mute_pol); tas_output_device_change(sound_device_id,TAS_OUTPUT_HEADPHONES,0); } else { printk(KERN_INFO "Audio jack unplugged, enabling speakers.\n"); write_audio_gpio(gpio_amp_mute, !gpio_amp_mute_pol); write_audio_gpio(gpio_headphone_mute, gpio_headphone_mute_pol); tas_output_device_change(sound_device_id,TAS_OUTPUT_INTERNAL_SPKR,0); } spin_unlock_irqrestore(&dmasound.lock, flags); return IRQ_HANDLED; } /* Initialize tumbler */ static int tas_dmasound_init(void) { setup_audio_gpio( "audio-hw-reset", NULL, &gpio_audio_reset, &gpio_audio_reset_pol); setup_audio_gpio( "amp-mute", NULL, &gpio_amp_mute, &gpio_amp_mute_pol); setup_audio_gpio("headphone-mute", NULL, &gpio_headphone_mute, &gpio_headphone_mute_pol); gpio_headphone_irq = setup_audio_gpio( "headphone-detect", NULL, &gpio_headphone_detect, &gpio_headphone_detect_pol); /* Fix some broken OF entries in desktop machines */ if (!gpio_headphone_irq) gpio_headphone_irq = setup_audio_gpio( NULL, "keywest-gpio15", &gpio_headphone_detect, &gpio_headphone_detect_pol); write_audio_gpio(gpio_audio_reset, gpio_audio_reset_pol); msleep(100); write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol); msleep(100); if (gpio_headphone_irq) { if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",NULL) < 0) { printk(KERN_ERR "tumbler: Can't request headphone interrupt\n"); gpio_headphone_irq = 0; } else { u8 val; /* Activate headphone status interrupts */ val = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio_headphone_detect, 0); pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio_headphone_detect, val | 0x80); /* Trigger it */ headphone_intr(0,NULL,NULL); } } if (!gpio_headphone_irq) { /* Some machine enter this case ? */ printk(KERN_WARNING "tumbler: Headphone detect IRQ not found, enabling all outputs !\n"); write_audio_gpio(gpio_amp_mute, !gpio_amp_mute_pol); write_audio_gpio(gpio_headphone_mute, !gpio_headphone_mute_pol); } return 0; } static int tas_dmasound_cleanup(void) { if (gpio_headphone_irq) free_irq(gpio_headphone_irq, NULL); return 0; } /* We don't support 48k yet */ static int tas_freqs[1] = { 44100 } ; static int tas_freqs_ok[1] = { 1 } ; /* don't know what to do really - just have to leave it where * OF left things */ static int tas_set_frame_rate(void) { if (i2s) { out_le32(i2s + (I2S_REG_SERIAL_FORMAT >> 2), 0x41190000); out_le32(i2s + (I2S_REG_DATAWORD_SIZES >> 2), 0x02000200); } dmasound.hard.speed = 44100 ; awacs_rate_index = 0 ; return 44100 ; } static int tas_mixer_ioctl(u_int cmd, u_long arg) { int __user *argp = (int __user *)arg; int data; int rc; rc=tas_device_ioctl(cmd, arg); if (rc != -EINVAL) { return rc; } if ((cmd & ~0xff) == MIXER_WRITE(0) && tas_supported_mixers() & (1<<(cmd & 0xff))) { rc = get_user(data, argp); if (rc<0) return rc; tas_set_mixer_level(cmd & 0xff, data); tas_get_mixer_level(cmd & 0xff, &data); return ioctl_return2(argp, data); } if ((cmd & ~0xff) == MIXER_READ(0) && tas_supported_mixers() & (1<<(cmd & 0xff))) { tas_get_mixer_level(cmd & 0xff, &data); return ioctl_return2(argp, data); } switch(cmd) { case SOUND_MIXER_READ_DEVMASK: data = tas_supported_mixers() | SOUND_MASK_SPEAKER; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_STEREODEVS: data = tas_stereo_mixers(); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_CAPS: rc = IOCTL_OUT(arg, 0); break; case SOUND_MIXER_READ_RECMASK: // XXX FIXME: find a way to check what is really available */ data = SOUND_MASK_LINE | SOUND_MASK_MIC; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECSRC: if (awacs_reg[0] & MASK_MUX_AUDIN) data |= SOUND_MASK_LINE; if (awacs_reg[0] & MASK_MUX_MIC) data |= SOUND_MASK_MIC; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_RECSRC: IOCTL_IN(arg, data); data =0; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_SPEAKER: /* really bell volume */ IOCTL_IN(arg, data); beep_vol = data & 0xff; /* fall through */ case SOUND_MIXER_READ_SPEAKER: rc = IOCTL_OUT(arg, (beep_vol<<8) | beep_vol); break; case SOUND_MIXER_OUTMASK: case SOUND_MIXER_OUTSRC: default: rc = -EINVAL; } return rc; } static void __init tas_init_frame_rates(unsigned int *prop, unsigned int l) { int i ; if (prop) { for (i=0; i<1; i++) tas_freqs_ok[i] = 0; for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; for (i = 0; i < 1; ++i) { if (r == tas_freqs[i]) { tas_freqs_ok[i] = 1; break; } } } } /* else we assume that all the rates are available */ } /*** AE - TUMBLER / SNAPPER END ************************************************/ /*** Low level stuff *********************************************************/ /* * PCI PowerMac, with AWACS, Screamer, Burgundy, DACA or Tumbler and DBDMA. */ static void *PMacAlloc(unsigned int size, int flags) { return kmalloc(size, flags); } static void PMacFree(void *ptr, unsigned int size) { kfree(ptr); } static int __init PMacIrqInit(void) { if (awacs) if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", NULL)) return 0; if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", NULL) || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", NULL)) return 0; return 1; } #ifdef MODULE static void PMacIrqCleanup(void) { /* turn off input & output dma */ DBDMA_DO_STOP(awacs_txdma); DBDMA_DO_STOP(awacs_rxdma); if (awacs) /* disable interrupts from awacs interface */ out_le32(&awacs->control, in_le32(&awacs->control) & 0xfff); /* Switch off the sound clock */ pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, awacs_node, 0, 0); /* Make sure proper bits are set on pismo & tipb */ if ((machine_is_compatible("PowerBook3,1") || machine_is_compatible("PowerBook3,2")) && awacs) { awacs_reg[1] |= MASK_PAROUT0 | MASK_PAROUT1; awacs_write(MASK_ADDR1 | awacs_reg[1]); msleep(200); } if (awacs) free_irq(awacs_irq, NULL); free_irq(awacs_tx_irq, NULL); free_irq(awacs_rx_irq, NULL); if (awacs) iounmap(awacs); if (i2s) iounmap(i2s); iounmap(awacs_txdma); iounmap(awacs_rxdma); release_OF_resource(awacs_node, 0); release_OF_resource(awacs_node, 1); release_OF_resource(awacs_node, 2); kfree(awacs_tx_cmd_space); kfree(awacs_rx_cmd_space); kfree(beep_dbdma_cmd_space); kfree(beep_buf); #ifdef CONFIG_PM pmu_unregister_sleep_notifier(&awacs_sleep_notifier); #endif } #endif /* MODULE */ static void PMacSilence(void) { /* turn off output dma */ DBDMA_DO_STOP(awacs_txdma); } /* don't know what to do really - just have to leave it where * OF left things */ static int daca_set_frame_rate(void) { if (i2s) { out_le32(i2s + (I2S_REG_SERIAL_FORMAT >> 2), 0x41190000); out_le32(i2s + (I2S_REG_DATAWORD_SIZES >> 2), 0x02000200); } dmasound.hard.speed = 44100 ; awacs_rate_index = 0 ; return 44100 ; } static int awacs_freqs[8] = { 44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350 }; static int awacs_freqs_ok[8] = { 1, 1, 1, 1, 1, 1, 1, 1 }; static int awacs_set_frame_rate(int desired, int catch_r) { int tolerance, i = 8 ; /* * If we have a sample rate which is within catchRadius percent * of the requested value, we don't have to expand the samples. * Otherwise choose the next higher rate. * N.B.: burgundy awacs only works at 44100 Hz. */ do { tolerance = catch_r * awacs_freqs[--i] / 100; if (awacs_freqs_ok[i] && dmasound.soft.speed <= awacs_freqs[i] + tolerance) break; } while (i > 0); dmasound.hard.speed = awacs_freqs[i]; awacs_rate_index = i; out_le32(&awacs->control, MASK_IEPC | (i << 8) | 0x11 ); awacs_reg[1] = (awacs_reg[1] & ~MASK_SAMPLERATE) | (i << 3); awacs_write(awacs_reg[1] | MASK_ADDR1); return dmasound.hard.speed; } static int burgundy_set_frame_rate(void) { awacs_rate_index = 0 ; awacs_reg[1] = (awacs_reg[1] & ~MASK_SAMPLERATE) ; /* XXX disable error interrupt on burgundy for now */ out_le32(&awacs->control, MASK_IEPC | 0 | 0x11 | MASK_IEE); return 44100 ; } static int set_frame_rate(int desired, int catch_r) { switch (awacs_revision) { case AWACS_BURGUNDY: dmasound.hard.speed = burgundy_set_frame_rate(); break ; case AWACS_TUMBLER: case AWACS_SNAPPER: dmasound.hard.speed = tas_set_frame_rate(); break ; case AWACS_DACA: dmasound.hard.speed = daca_set_frame_rate(); break ; default: dmasound.hard.speed = awacs_set_frame_rate(desired, catch_r); break ; } return dmasound.hard.speed ; } static void awacs_recalibrate(void) { /* Sorry for the horrible delays... I hope to get that improved * by making the whole PM process asynchronous in a future version */ msleep(750); awacs_reg[1] |= MASK_CMUTE | MASK_AMUTE; awacs_write(awacs_reg[1] | MASK_RECALIBRATE | MASK_ADDR1); msleep(1000); awacs_write(awacs_reg[1] | MASK_ADDR1); } static void PMacInit(void) { int tolerance; switch (dmasound.soft.format) { case AFMT_S16_LE: case AFMT_U16_LE: if (hw_can_byteswap) dmasound.hard.format = AFMT_S16_LE; else dmasound.hard.format = AFMT_S16_BE; break; default: dmasound.hard.format = AFMT_S16_BE; break; } dmasound.hard.stereo = 1; dmasound.hard.size = 16; /* set dmasound.hard.speed - on the basis of what we want (soft) * and the tolerance we'll allow. */ set_frame_rate(dmasound.soft.speed, catchRadius) ; tolerance = (catchRadius * dmasound.hard.speed) / 100; if (dmasound.soft.speed >= dmasound.hard.speed - tolerance) { dmasound.trans_write = &transAwacsNormal; dmasound.trans_read = &transAwacsNormalRead; } else { dmasound.trans_write = &transAwacsExpand; dmasound.trans_read = &transAwacsExpandRead; } if (awacs) { if (hw_can_byteswap && (dmasound.hard.format == AFMT_S16_LE)) out_le32(&awacs->byteswap, BS_VAL); else out_le32(&awacs->byteswap, 0); } expand_bal = -dmasound.soft.speed; expand_read_bal = -dmasound.soft.speed; } static int PMacSetFormat(int format) { int size; int req_format = format; switch (format) { case AFMT_QUERY: return dmasound.soft.format; case AFMT_MU_LAW: case AFMT_A_LAW: case AFMT_U8: case AFMT_S8: size = 8; break; case AFMT_S16_LE: if(!hw_can_byteswap) format = AFMT_S16_BE; case AFMT_S16_BE: size = 16; break; case AFMT_U16_LE: if(!hw_can_byteswap) format = AFMT_U16_BE; case AFMT_U16_BE: size = 16; break; default: /* :-) */ printk(KERN_ERR "dmasound: unknown format 0x%x, using AFMT_U8\n", format); size = 8; format = AFMT_U8; } if (req_format == format) { dmasound.soft.format = format; dmasound.soft.size = size; if (dmasound.minDev == SND_DEV_DSP) { dmasound.dsp.format = format; dmasound.dsp.size = size; } } return format; } #define AWACS_VOLUME_TO_MASK(x) (15 - ((((x) - 1) * 15) / 99)) #define AWACS_MASK_TO_VOLUME(y) (100 - ((y) * 99 / 15)) static int awacs_get_volume(int reg, int lshift) { int volume; volume = AWACS_MASK_TO_VOLUME((reg >> lshift) & 0xf); volume |= AWACS_MASK_TO_VOLUME(reg & 0xf) << 8; return volume; } static int awacs_volume_setter(int volume, int n, int mute, int lshift) { int r1, rn; if (mute && volume == 0) { r1 = awacs_reg[1] | mute; } else { r1 = awacs_reg[1] & ~mute; rn = awacs_reg[n] & ~(0xf | (0xf << lshift)); rn |= ((AWACS_VOLUME_TO_MASK(volume & 0xff) & 0xf) << lshift); rn |= AWACS_VOLUME_TO_MASK((volume >> 8) & 0xff) & 0xf; awacs_reg[n] = rn; awacs_write((n << 12) | rn); volume = awacs_get_volume(rn, lshift); } if (r1 != awacs_reg[1]) { awacs_reg[1] = r1; awacs_write(r1 | MASK_ADDR1); } return volume; } static int PMacSetVolume(int volume) { printk(KERN_WARNING "Bogus call to PMacSetVolume !\n"); return 0; } static void awacs_setup_for_beep(int speed) { out_le32(&awacs->control, (in_le32(&awacs->control) & ~0x1f00) | ((speed > 0 ? speed : awacs_rate_index) << 8)); if (hw_can_byteswap && (dmasound.hard.format == AFMT_S16_LE) && speed == -1) out_le32(&awacs->byteswap, BS_VAL); else out_le32(&awacs->byteswap, 0); } /* CHECK: how much of this *really* needs IRQs masked? */ static void __PMacPlay(void) { volatile struct dbdma_cmd *cp; int next_frg, count; count = 300 ; /* > two cycles at the lowest sample rate */ /* what we want to send next */ next_frg = (write_sq.front + write_sq.active) % write_sq.max_count; if (awacs_beep_state) { /* sound takes precedence over beeps */ /* stop the dma channel */ out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); while ( (in_le32(&awacs_txdma->status) & RUN) && count--) udelay(1); if (awacs) awacs_setup_for_beep(-1); out_le32(&awacs_txdma->cmdptr, virt_to_bus(&(awacs_tx_cmds[next_frg]))); beep_playing = 0; awacs_beep_state = 0; } /* this won't allow more than two frags to be in the output queue at once. (or one, if the max frags is 2 - because count can't exceed 2 in that case) */ while (write_sq.active < 2 && write_sq.active < write_sq.count) { count = (write_sq.count == write_sq.active + 1) ? write_sq.rear_size:write_sq.block_size ; if (count < write_sq.block_size) { if (!write_sq.syncing) /* last block not yet filled,*/ break; /* and we're not syncing or POST-ed */ else { /* pretend the block is full to force a new block to be started on the next write */ write_sq.rear_size = write_sq.block_size ; write_sq.syncing &= ~2 ; /* clear POST */ } } cp = &awacs_tx_cmds[next_frg]; st_le16(&cp->req_count, count); st_le16(&cp->xfer_status, 0); st_le16(&cp->command, OUTPUT_MORE + INTR_ALWAYS); /* put a STOP at the end of the queue - but only if we have space for it. This means that, if we under-run and we only have two fragments, we might re-play sound from an existing queued frag. I guess the solution to that is not to set two frags if you are likely to under-run... */ if (write_sq.count < write_sq.max_count) { if (++next_frg >= write_sq.max_count) next_frg = 0 ; /* wrap */ /* if we get here then we've underrun so we will stop*/ st_le16(&awacs_tx_cmds[next_frg].command, DBDMA_STOP); } /* set the dbdma controller going, if it is not already */ if (write_sq.active == 0) out_le32(&awacs_txdma->cmdptr, virt_to_bus(cp)); (void)in_le32(&awacs_txdma->status); out_le32(&awacs_txdma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); ++write_sq.active; } } static void PMacPlay(void) { LOCK(); if (!awacs_sleeping) { unsigned long flags; spin_lock_irqsave(&dmasound.lock, flags); __PMacPlay(); spin_unlock_irqrestore(&dmasound.lock, flags); } UNLOCK(); } static void PMacRecord(void) { unsigned long flags; if (read_sq.active) return; spin_lock_irqsave(&dmasound.lock, flags); /* This is all we have to do......Just start it up. */ out_le32(&awacs_rxdma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); read_sq.active = 1; spin_unlock_irqrestore(&dmasound.lock, flags); } /* if the TX status comes up "DEAD" - reported on some Power Computing machines we need to re-start the dbdma - but from a different physical start address and with a different transfer length. It would get very messy to do this with the normal dbdma_cmd blocks - we would have to re-write the buffer start addresses each time. So, we will keep a single dbdma_cmd block which can be fiddled with. When DEAD status is first reported the content of the faulted dbdma block is copied into the emergency buffer and we note that the buffer is in use. we then bump the start physical address by the amount that was successfully output before it died. On any subsequent DEAD result we just do the bump-ups (we know that we are already using the emergency dbdma_cmd). CHECK: this just tries to "do it". It is possible that we should abandon xfers when the number of residual bytes gets below a certain value - I can see that this might cause a loop-forever if too small a transfer causes DEAD status. However this is a TODO for now - we'll see what gets reported. When we get a successful transfer result with the emergency buffer we just pretend that it completed using the original dmdma_cmd and carry on. The 'next_cmd' field will already point back to the original loop of blocks. */ static irqreturn_t pmac_awacs_tx_intr(int irq, void *devid, struct pt_regs *regs) { int i = write_sq.front; int stat; int i_nowrap = write_sq.front; volatile struct dbdma_cmd *cp; /* != 0 when we are dealing with a DEAD xfer */ static int emergency_in_use; spin_lock(&dmasound.lock); while (write_sq.active > 0) { /* we expect to have done something*/ if (emergency_in_use) /* we are dealing with DEAD xfer */ cp = emergency_dbdma_cmd ; else cp = &awacs_tx_cmds[i]; stat = ld_le16(&cp->xfer_status); if (stat & DEAD) { unsigned short req, res ; unsigned int phy ; #ifdef DEBUG_DMASOUND printk("dmasound_pmac: tx-irq: xfer died - patching it up...\n") ; #endif /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&awacs_txdma->status); out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); write_sq.died++ ; if (!emergency_in_use) { /* new problem */ memcpy((void *)emergency_dbdma_cmd, (void *)cp, sizeof(struct dbdma_cmd)); emergency_in_use = 1; cp = emergency_dbdma_cmd; } /* now bump the values to reflect the amount we haven't yet shifted */ req = ld_le16(&cp->req_count); res = ld_le16(&cp->res_count); phy = ld_le32(&cp->phy_addr); phy += (req - res); st_le16(&cp->req_count, res); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); st_le32(&cp->phy_addr, phy); st_le32(&cp->cmd_dep, virt_to_bus(&awacs_tx_cmds[(i+1)%write_sq.max_count])); st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); /* point at our patched up command block */ out_le32(&awacs_txdma->cmdptr, virt_to_bus(cp)); /* we must re-start the controller */ (void)in_le32(&awacs_txdma->status); /* should complete clearing the DEAD status */ out_le32(&awacs_txdma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); break; /* this block is still going */ } if ((stat & ACTIVE) == 0) break; /* this frame is still going */ if (emergency_in_use) emergency_in_use = 0 ; /* done that */ --write_sq.count; --write_sq.active; i_nowrap++; if (++i >= write_sq.max_count) i = 0; } /* if we stopped and we were not sync-ing - then we under-ran */ if( write_sq.syncing == 0 ){ stat = in_le32(&awacs_txdma->status) ; /* we hit the dbdma_stop */ if( (stat & ACTIVE) == 0 ) write_sq.xruns++ ; } /* if we used some data up then wake the writer to supply some more*/ if (i_nowrap != write_sq.front) WAKE_UP(write_sq.action_queue); write_sq.front = i; /* but make sure we funnel what we've already got */\ if (!awacs_sleeping) __PMacPlay(); /* make the wake-on-empty conditional on syncing */ if (!write_sq.active && (write_sq.syncing & 1)) WAKE_UP(write_sq.sync_queue); /* any time we're empty */ spin_unlock(&dmasound.lock); return IRQ_HANDLED; } static irqreturn_t pmac_awacs_rx_intr(int irq, void *devid, struct pt_regs *regs) { int stat ; /* For some reason on my PowerBook G3, I get one interrupt * when the interrupt vector is installed (like something is * pending). This happens before the dbdma is initialized by * us, so I just check the command pointer and if it is zero, * just blow it off. */ if (in_le32(&awacs_rxdma->cmdptr) == 0) return IRQ_HANDLED; /* We also want to blow 'em off when shutting down. */ if (read_sq.active == 0) return IRQ_HANDLED; spin_lock(&dmasound.lock); /* Check multiple buffers in case we were held off from * interrupt processing for a long time. Geeze, I really hope * this doesn't happen. */ while ((stat=awacs_rx_cmds[read_sq.rear].xfer_status)) { /* if we got a "DEAD" status then just log it for now. and try to restart dma. TODO: figure out how best to fix it up */ if (stat & DEAD){ #ifdef DEBUG_DMASOUND printk("dmasound_pmac: rx-irq: DIED - attempting resurection\n"); #endif /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&awacs_txdma->status); out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); awacs_rx_cmds[read_sq.rear].xfer_status = 0; awacs_rx_cmds[read_sq.rear].res_count = 0; read_sq.died++ ; (void)in_le32(&awacs_txdma->status); /* re-start the same block */ out_le32(&awacs_rxdma->cmdptr, virt_to_bus(&awacs_rx_cmds[read_sq.rear])); /* we must re-start the controller */ (void)in_le32(&awacs_rxdma->status); /* should complete clearing the DEAD status */ out_le32(&awacs_rxdma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); spin_unlock(&dmasound.lock); return IRQ_HANDLED; /* try this block again */ } /* Clear status and move on to next buffer. */ awacs_rx_cmds[read_sq.rear].xfer_status = 0; read_sq.rear++; /* Wrap the buffer ring. */ if (read_sq.rear >= read_sq.max_active) read_sq.rear = 0; /* If we have caught up to the front buffer, bump it. * This will cause weird (but not fatal) results if the * read loop is currently using this buffer. The user is * behind in this case anyway, so weird things are going * to happen. */ if (read_sq.rear == read_sq.front) { read_sq.front++; read_sq.xruns++ ; /* we overan */ if (read_sq.front >= read_sq.max_active) read_sq.front = 0; } } WAKE_UP(read_sq.action_queue); spin_unlock(&dmasound.lock); return IRQ_HANDLED; } static irqreturn_t pmac_awacs_intr(int irq, void *devid, struct pt_regs *regs) { int ctrl; int status; int r1; spin_lock(&dmasound.lock); ctrl = in_le32(&awacs->control); status = in_le32(&awacs->codec_stat); if (ctrl & MASK_PORTCHG) { /* tested on Screamer, should work on others too */ if (awacs_revision == AWACS_SCREAMER) { if (((status & MASK_HDPCONN) >> 3) && (hdp_connected == 0)) { hdp_connected = 1; r1 = awacs_reg[1] | MASK_SPKMUTE; awacs_reg[1] = r1; awacs_write(r1 | MASK_ADDR_MUTE); } else if (((status & MASK_HDPCONN) >> 3 == 0) && (hdp_connected == 1)) { hdp_connected = 0; r1 = awacs_reg[1] & ~MASK_SPKMUTE; awacs_reg[1] = r1; awacs_write(r1 | MASK_ADDR_MUTE); } } } if (ctrl & MASK_CNTLERR) { int err = (in_le32(&awacs->codec_stat) & MASK_ERRCODE) >> 16; /* CHECK: we just swallow burgundy errors at the moment..*/ if (err != 0 && awacs_revision != AWACS_BURGUNDY) printk(KERN_ERR "dmasound_pmac: error %x\n", err); } /* Writing 1s to the CNTLERR and PORTCHG bits clears them... */ out_le32(&awacs->control, ctrl); spin_unlock(&dmasound.lock); return IRQ_HANDLED; } static void awacs_write(int val) { int count = 300 ; if (awacs_revision >= AWACS_DACA || !awacs) return ; while ((in_le32(&awacs->codec_ctrl) & MASK_NEWECMD) && count--) udelay(1) ; /* timeout is > 2 samples at lowest rate */ out_le32(&awacs->codec_ctrl, val | (awacs_subframe << 22)); (void)in_le32(&awacs->byteswap); } /* this is called when the beep timer expires... it will be called even if the beep has been overidden by other sound output. */ static void awacs_nosound(unsigned long xx) { unsigned long flags; int count = 600 ; /* > four samples at lowest rate */ spin_lock_irqsave(&dmasound.lock, flags); if (beep_playing) { st_le16(&beep_dbdma_cmd->command, DBDMA_STOP); out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); while ((in_le32(&awacs_txdma->status) & RUN) && count--) udelay(1); if (awacs) awacs_setup_for_beep(-1); beep_playing = 0; } spin_unlock_irqrestore(&dmasound.lock, flags); } /* * We generate the beep with a single dbdma command that loops a buffer * forever - without generating interrupts. * * So, to stop it you have to stop dma output as per awacs_nosound. */ static int awacs_beep_event(struct input_dev *dev, unsigned int type, unsigned int code, int hz) { unsigned long flags; int beep_speed = 0; int srate; int period, ncycles, nsamples; int i, j, f; short *p; static int beep_hz_cache; static int beep_nsamples_cache; static int beep_volume_cache; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (hz) hz = 1000; break; case SND_TONE: break; default: return -1; } if (beep_buf == NULL) return -1; /* quick-hack fix for DACA, Burgundy & Tumbler */ if (awacs_revision >= AWACS_DACA){ srate = 44100 ; } else { for (i = 0; i < 8 && awacs_freqs[i] >= BEEP_SRATE; ++i) if (awacs_freqs_ok[i]) beep_speed = i; srate = awacs_freqs[beep_speed]; } if (hz <= srate / BEEP_BUFLEN || hz > srate / 2) { /* cancel beep currently playing */ awacs_nosound(0); return 0; } spin_lock_irqsave(&dmasound.lock, flags); if (beep_playing || write_sq.active || beep_buf == NULL) { spin_unlock_irqrestore(&dmasound.lock, flags); return -1; /* too hard, sorry :-( */ } beep_playing = 1; st_le16(&beep_dbdma_cmd->command, OUTPUT_MORE + BR_ALWAYS); spin_unlock_irqrestore(&dmasound.lock, flags); if (hz == beep_hz_cache && beep_vol == beep_volume_cache) { nsamples = beep_nsamples_cache; } else { period = srate * 256 / hz; /* fixed point */ ncycles = BEEP_BUFLEN * 256 / period; nsamples = (period * ncycles) >> 8; f = ncycles * 65536 / nsamples; j = 0; p = beep_buf; for (i = 0; i < nsamples; ++i, p += 2) { p[0] = p[1] = beep_wform[j >> 8] * beep_vol; j = (j + f) & 0xffff; } beep_hz_cache = hz; beep_volume_cache = beep_vol; beep_nsamples_cache = nsamples; } st_le16(&beep_dbdma_cmd->req_count, nsamples*4); st_le16(&beep_dbdma_cmd->xfer_status, 0); st_le32(&beep_dbdma_cmd->cmd_dep, virt_to_bus(beep_dbdma_cmd)); st_le32(&beep_dbdma_cmd->phy_addr, virt_to_bus(beep_buf)); awacs_beep_state = 1; spin_lock_irqsave(&dmasound.lock, flags); if (beep_playing) { /* i.e. haven't been terminated already */ int count = 300 ; out_le32(&awacs_txdma->control, (RUN|WAKE|FLUSH|PAUSE) << 16); while ((in_le32(&awacs_txdma->status) & RUN) && count--) udelay(1); /* timeout > 2 samples at lowest rate*/ if (awacs) awacs_setup_for_beep(beep_speed); out_le32(&awacs_txdma->cmdptr, virt_to_bus(beep_dbdma_cmd)); (void)in_le32(&awacs_txdma->status); out_le32(&awacs_txdma->control, RUN | (RUN << 16)); } spin_unlock_irqrestore(&dmasound.lock, flags); return 0; } /* used in init and for wake-up */ static void load_awacs(void) { awacs_write(awacs_reg[0] + MASK_ADDR0); awacs_write(awacs_reg[1] + MASK_ADDR1); awacs_write(awacs_reg[2] + MASK_ADDR2); awacs_write(awacs_reg[4] + MASK_ADDR4); if (awacs_revision == AWACS_SCREAMER) { awacs_write(awacs_reg[5] + MASK_ADDR5); msleep(100); awacs_write(awacs_reg[6] + MASK_ADDR6); msleep(2); awacs_write(awacs_reg[1] + MASK_ADDR1); awacs_write(awacs_reg[7] + MASK_ADDR7); } if (awacs) { if (hw_can_byteswap && (dmasound.hard.format == AFMT_S16_LE)) out_le32(&awacs->byteswap, BS_VAL); else out_le32(&awacs->byteswap, 0); } } #ifdef CONFIG_PM /* * Save state when going to sleep, restore it afterwards. */ /* FIXME: sort out disabling/re-enabling of read stuff as well */ static int awacs_sleep_notify(struct pmu_sleep_notifier *self, int when) { unsigned long flags; switch (when) { case PBOOK_SLEEP_NOW: LOCK(); awacs_sleeping = 1; /* Tell the rest of the driver we are now going to sleep */ mb(); if (awacs_revision == AWACS_SCREAMER || awacs_revision == AWACS_AWACS) { awacs_reg1_save = awacs_reg[1]; awacs_reg[1] |= MASK_AMUTE | MASK_CMUTE; awacs_write(MASK_ADDR1 | awacs_reg[1]); } PMacSilence(); /* stop rx - if going - a bit of a daft user... but */ out_le32(&awacs_rxdma->control, (RUN|WAKE|FLUSH << 16)); /* deny interrupts */ if (awacs) disable_irq(awacs_irq); disable_irq(awacs_tx_irq); disable_irq(awacs_rx_irq); /* Chip specific sleep code */ switch (awacs_revision) { case AWACS_TUMBLER: case AWACS_SNAPPER: write_audio_gpio(gpio_headphone_mute, gpio_headphone_mute_pol); write_audio_gpio(gpio_amp_mute, gpio_amp_mute_pol); tas_enter_sleep(); write_audio_gpio(gpio_audio_reset, gpio_audio_reset_pol); break ; case AWACS_DACA: daca_enter_sleep(); break ; case AWACS_BURGUNDY: break ; case AWACS_SCREAMER: case AWACS_AWACS: default: out_le32(&awacs->control, 0x11) ; break ; } /* Disable sound clock */ pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, awacs_node, 0, 0); /* According to Darwin, we do that after turning off the sound * chip clock. All this will have to be cleaned up once we properly * parse the OF sound-objects */ if ((machine_is_compatible("PowerBook3,1") || machine_is_compatible("PowerBook3,2")) && awacs) { awacs_reg[1] |= MASK_PAROUT0 | MASK_PAROUT1; awacs_write(MASK_ADDR1 | awacs_reg[1]); msleep(200); } break; case PBOOK_WAKE: /* Enable sound clock */ pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, awacs_node, 0, 1); if ((machine_is_compatible("PowerBook3,1") || machine_is_compatible("PowerBook3,2")) && awacs) { msleep(100); awacs_reg[1] &= ~(MASK_PAROUT0 | MASK_PAROUT1); awacs_write(MASK_ADDR1 | awacs_reg[1]); msleep(300); } else msleep(1000); /* restore settings */ switch (awacs_revision) { case AWACS_TUMBLER: case AWACS_SNAPPER: write_audio_gpio(gpio_headphone_mute, gpio_headphone_mute_pol); write_audio_gpio(gpio_amp_mute, gpio_amp_mute_pol); write_audio_gpio(gpio_audio_reset, gpio_audio_reset_pol); msleep(100); write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol); msleep(150); tas_leave_sleep(); /* Stub for now */ headphone_intr(0,NULL,NULL); break; case AWACS_DACA: msleep(10); /* Check this !!! */ daca_leave_sleep(); break ; /* dont know how yet */ case AWACS_BURGUNDY: break ; case AWACS_SCREAMER: case AWACS_AWACS: default: load_awacs() ; break ; } /* Recalibrate chip */ if (awacs_revision == AWACS_SCREAMER && awacs) awacs_recalibrate(); /* Make sure dma is stopped */ PMacSilence(); if (awacs) enable_irq(awacs_irq); enable_irq(awacs_tx_irq); enable_irq(awacs_rx_irq); if (awacs) { /* OK, allow ints back again */ out_le32(&awacs->control, MASK_IEPC | (awacs_rate_index << 8) | 0x11 | (awacs_revision < AWACS_DACA ? MASK_IEE: 0)); } if (macio_base && is_pbook_g3) { /* FIXME: should restore the setup we had...*/ out_8(macio_base + 0x37, 3); } else if (is_pbook_3X00) { in_8(latch_base + 0x190); } /* Remove mute */ if (awacs_revision == AWACS_SCREAMER || awacs_revision == AWACS_AWACS) { awacs_reg[1] = awacs_reg1_save; awacs_write(MASK_ADDR1 | awacs_reg[1]); } awacs_sleeping = 0; /* Resume pending sounds. */ /* we don't try to restart input... */ spin_lock_irqsave(&dmasound.lock, flags); __PMacPlay(); spin_unlock_irqrestore(&dmasound.lock, flags); UNLOCK(); } return PBOOK_SLEEP_OK; } #endif /* CONFIG_PM */ /* All the burgundy functions: */ /* Waits for busy flag to clear */ static inline void awacs_burgundy_busy_wait(void) { int count = 50; /* > 2 samples at 44k1 */ while ((in_le32(&awacs->codec_ctrl) & MASK_NEWECMD) && count--) udelay(1) ; } static inline void awacs_burgundy_extend_wait(void) { int count = 50 ; /* > 2 samples at 44k1 */ while ((!(in_le32(&awacs->codec_stat) & MASK_EXTEND)) && count--) udelay(1) ; count = 50; while ((in_le32(&awacs->codec_stat) & MASK_EXTEND) && count--) udelay(1); } static void awacs_burgundy_wcw(unsigned addr, unsigned val) { out_le32(&awacs->codec_ctrl, addr + 0x200c00 + (val & 0xff)); awacs_burgundy_busy_wait(); out_le32(&awacs->codec_ctrl, addr + 0x200d00 +((val>>8) & 0xff)); awacs_burgundy_busy_wait(); out_le32(&awacs->codec_ctrl, addr + 0x200e00 +((val>>16) & 0xff)); awacs_burgundy_busy_wait(); out_le32(&awacs->codec_ctrl, addr + 0x200f00 +((val>>24) & 0xff)); awacs_burgundy_busy_wait(); } static unsigned awacs_burgundy_rcw(unsigned addr) { unsigned val = 0; unsigned long flags; /* should have timeouts here */ spin_lock_irqsave(&dmasound.lock, flags); out_le32(&awacs->codec_ctrl, addr + 0x100000); awacs_burgundy_busy_wait(); awacs_burgundy_extend_wait(); val += (in_le32(&awacs->codec_stat) >> 4) & 0xff; out_le32(&awacs->codec_ctrl, addr + 0x100100); awacs_burgundy_busy_wait(); awacs_burgundy_extend_wait(); val += ((in_le32(&awacs->codec_stat)>>4) & 0xff) <<8; out_le32(&awacs->codec_ctrl, addr + 0x100200); awacs_burgundy_busy_wait(); awacs_burgundy_extend_wait(); val += ((in_le32(&awacs->codec_stat)>>4) & 0xff) <<16; out_le32(&awacs->codec_ctrl, addr + 0x100300); awacs_burgundy_busy_wait(); awacs_burgundy_extend_wait(); val += ((in_le32(&awacs->codec_stat)>>4) & 0xff) <<24; spin_unlock_irqrestore(&dmasound.lock, flags); return val; } static void awacs_burgundy_wcb(unsigned addr, unsigned val) { out_le32(&awacs->codec_ctrl, addr + 0x300000 + (val & 0xff)); awacs_burgundy_busy_wait(); } static unsigned awacs_burgundy_rcb(unsigned addr) { unsigned val = 0; unsigned long flags; /* should have timeouts here */ spin_lock_irqsave(&dmasound.lock, flags); out_le32(&awacs->codec_ctrl, addr + 0x100000); awacs_burgundy_busy_wait(); awacs_burgundy_extend_wait(); val += (in_le32(&awacs->codec_stat) >> 4) & 0xff; spin_unlock_irqrestore(&dmasound.lock, flags); return val; } static int awacs_burgundy_check(void) { /* Checks to see the chip is alive and kicking */ int error = in_le32(&awacs->codec_ctrl) & MASK_ERRCODE; return error == 0xf0000; } static int awacs_burgundy_init(void) { if (awacs_burgundy_check()) { printk(KERN_WARNING "dmasound_pmac: burgundy not working :-(\n"); return 1; } awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_OUTPUTENABLES, DEF_BURGUNDY_OUTPUTENABLES); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, DEF_BURGUNDY_MORE_OUTPUTENABLES); awacs_burgundy_wcw(MASK_ADDR_BURGUNDY_OUTPUTSELECTS, DEF_BURGUNDY_OUTPUTSELECTS); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_INPSEL21, DEF_BURGUNDY_INPSEL21); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_INPSEL3, DEF_BURGUNDY_INPSEL3); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_GAINCD, DEF_BURGUNDY_GAINCD); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_GAINLINE, DEF_BURGUNDY_GAINLINE); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_GAINMIC, DEF_BURGUNDY_GAINMIC); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_GAINMODEM, DEF_BURGUNDY_GAINMODEM); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_ATTENSPEAKER, DEF_BURGUNDY_ATTENSPEAKER); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_ATTENLINEOUT, DEF_BURGUNDY_ATTENLINEOUT); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_ATTENHP, DEF_BURGUNDY_ATTENHP); awacs_burgundy_wcw(MASK_ADDR_BURGUNDY_MASTER_VOLUME, DEF_BURGUNDY_MASTER_VOLUME); awacs_burgundy_wcw(MASK_ADDR_BURGUNDY_VOLCD, DEF_BURGUNDY_VOLCD); awacs_burgundy_wcw(MASK_ADDR_BURGUNDY_VOLLINE, DEF_BURGUNDY_VOLLINE); awacs_burgundy_wcw(MASK_ADDR_BURGUNDY_VOLMIC, DEF_BURGUNDY_VOLMIC); return 0; } static void awacs_burgundy_write_volume(unsigned address, int volume) { int hardvolume,lvolume,rvolume; lvolume = (volume & 0xff) ? (volume & 0xff) + 155 : 0; rvolume = ((volume >>8)&0xff) ? ((volume >> 8)&0xff ) + 155 : 0; hardvolume = lvolume + (rvolume << 16); awacs_burgundy_wcw(address, hardvolume); } static int awacs_burgundy_read_volume(unsigned address) { int softvolume,wvolume; wvolume = awacs_burgundy_rcw(address); softvolume = (wvolume & 0xff) - 155; softvolume += (((wvolume >> 16) & 0xff) - 155)<<8; return softvolume > 0 ? softvolume : 0; } static int awacs_burgundy_read_mvolume(unsigned address) { int lvolume,rvolume,wvolume; wvolume = awacs_burgundy_rcw(address); wvolume &= 0xffff; rvolume = (wvolume & 0xff) - 155; lvolume = ((wvolume & 0xff00)>>8) - 155; return lvolume + (rvolume << 8); } static void awacs_burgundy_write_mvolume(unsigned address, int volume) { int lvolume,rvolume,hardvolume; lvolume = (volume &0xff) ? (volume & 0xff) + 155 :0; rvolume = ((volume >>8) & 0xff) ? (volume >> 8) + 155 :0; hardvolume = lvolume + (rvolume << 8); hardvolume += (hardvolume << 16); awacs_burgundy_wcw(address, hardvolume); } /* End burgundy functions */ /* Set up output volumes on machines with the 'perch/whisper' extension card. * this has an SGS i2c chip (7433) which is accessed using the cuda. * * TODO: split this out and make use of the other parts of the SGS chip to * do Bass, Treble etc. */ static void awacs_enable_amp(int spkr_vol) { #ifdef CONFIG_ADB_CUDA struct adb_request req; if (sys_ctrler != SYS_CTRLER_CUDA) return; /* turn on headphones */ cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a, 4, 0); while (!req.complete) cuda_poll(); cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a, 6, 0); while (!req.complete) cuda_poll(); /* turn on speaker */ cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a, 3, (100 - (spkr_vol & 0xff)) * 32 / 100); while (!req.complete) cuda_poll(); cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a, 5, (100 - ((spkr_vol >> 8) & 0xff)) * 32 / 100); while (!req.complete) cuda_poll(); cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a, 1, 0x29); while (!req.complete) cuda_poll(); #endif /* CONFIG_ADB_CUDA */ } /*** Mid level stuff *********************************************************/ /* * /dev/mixer abstraction */ static void do_line_lev(int data) { line_lev = data ; awacs_reg[0] &= ~MASK_MUX_AUDIN; if ((data & 0xff) >= 50) awacs_reg[0] |= MASK_MUX_AUDIN; awacs_write(MASK_ADDR0 | awacs_reg[0]); } static void do_ip_gain(int data) { ip_gain = data ; data &= 0xff; awacs_reg[0] &= ~MASK_GAINLINE; if (awacs_revision == AWACS_SCREAMER) { awacs_reg[6] &= ~MASK_MIC_BOOST ; if (data >= 33) { awacs_reg[0] |= MASK_GAINLINE; if( data >= 66) awacs_reg[6] |= MASK_MIC_BOOST ; } awacs_write(MASK_ADDR6 | awacs_reg[6]) ; } else { if (data >= 50) awacs_reg[0] |= MASK_GAINLINE; } awacs_write(MASK_ADDR0 | awacs_reg[0]); } static void do_mic_lev(int data) { mic_lev = data ; data &= 0xff; awacs_reg[0] &= ~MASK_MUX_MIC; if (data >= 50) awacs_reg[0] |= MASK_MUX_MIC; awacs_write(MASK_ADDR0 | awacs_reg[0]); } static void do_cd_lev(int data) { cd_lev = data ; awacs_reg[0] &= ~MASK_MUX_CD; if ((data & 0xff) >= 50) awacs_reg[0] |= MASK_MUX_CD; awacs_write(MASK_ADDR0 | awacs_reg[0]); } static void do_rec_lev(int data) { int left, right ; rec_lev = data ; /* need to fudge this to use the volume setter routine */ left = 100 - (data & 0xff) ; if( left < 0 ) left = 0 ; right = 100 - ((data >> 8) & 0xff) ; if( right < 0 ) right = 0 ; left |= (right << 8 ); left = awacs_volume_setter(left, 0, 0, 4); } static void do_passthru_vol(int data) { passthru_vol = data ; awacs_reg[1] &= ~MASK_LOOPTHRU; if (awacs_revision == AWACS_SCREAMER) { if( data ) { /* switch it on for non-zero */ awacs_reg[1] |= MASK_LOOPTHRU; awacs_write(MASK_ADDR1 | awacs_reg[1]); } data = awacs_volume_setter(data, 5, 0, 6) ; } else { if ((data & 0xff) >= 50) awacs_reg[1] |= MASK_LOOPTHRU; awacs_write(MASK_ADDR1 | awacs_reg[1]); data = (awacs_reg[1] & MASK_LOOPTHRU)? 100: 0; } } static int awacs_mixer_ioctl(u_int cmd, u_long arg) { int data; int rc; switch (cmd) { case SOUND_MIXER_READ_CAPS: /* say we will allow multiple inputs? prob. wrong so I'm switching it to single */ return IOCTL_OUT(arg, 1); case SOUND_MIXER_READ_DEVMASK: data = SOUND_MASK_VOLUME | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_IGAIN | SOUND_MASK_RECLEV | SOUND_MASK_ALTPCM | SOUND_MASK_MONITOR; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECMASK: data = SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECSRC: data = 0; if (awacs_reg[0] & MASK_MUX_AUDIN) data |= SOUND_MASK_LINE; if (awacs_reg[0] & MASK_MUX_MIC) data |= SOUND_MASK_MIC; if (awacs_reg[0] & MASK_MUX_CD) data |= SOUND_MASK_CD; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_RECSRC: IOCTL_IN(arg, data); data &= (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD); awacs_reg[0] &= ~(MASK_MUX_CD | MASK_MUX_MIC | MASK_MUX_AUDIN); if (data & SOUND_MASK_LINE) awacs_reg[0] |= MASK_MUX_AUDIN; if (data & SOUND_MASK_MIC) awacs_reg[0] |= MASK_MUX_MIC; if (data & SOUND_MASK_CD) awacs_reg[0] |= MASK_MUX_CD; awacs_write(awacs_reg[0] | MASK_ADDR0); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_STEREODEVS: data = SOUND_MASK_VOLUME | SOUND_MASK_SPEAKER| SOUND_MASK_RECLEV ; if (awacs_revision == AWACS_SCREAMER) data |= SOUND_MASK_MONITOR ; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_VOLUME: IOCTL_IN(arg, data); line_vol = data ; awacs_volume_setter(data, 2, 0, 6); /* fall through */ case SOUND_MIXER_READ_VOLUME: rc = IOCTL_OUT(arg, line_vol); break; case SOUND_MIXER_WRITE_SPEAKER: IOCTL_IN(arg, data); spk_vol = data ; if (has_perch) awacs_enable_amp(data); else (void)awacs_volume_setter(data, 4, MASK_CMUTE, 6); /* fall though */ case SOUND_MIXER_READ_SPEAKER: rc = IOCTL_OUT(arg, spk_vol); break; case SOUND_MIXER_WRITE_ALTPCM: /* really bell volume */ IOCTL_IN(arg, data); beep_vol = data & 0xff; /* fall through */ case SOUND_MIXER_READ_ALTPCM: rc = IOCTL_OUT(arg, beep_vol); break; case SOUND_MIXER_WRITE_LINE: IOCTL_IN(arg, data); do_line_lev(data) ; /* fall through */ case SOUND_MIXER_READ_LINE: rc = IOCTL_OUT(arg, line_lev); break; case SOUND_MIXER_WRITE_IGAIN: IOCTL_IN(arg, data); do_ip_gain(data) ; /* fall through */ case SOUND_MIXER_READ_IGAIN: rc = IOCTL_OUT(arg, ip_gain); break; case SOUND_MIXER_WRITE_MIC: IOCTL_IN(arg, data); do_mic_lev(data); /* fall through */ case SOUND_MIXER_READ_MIC: rc = IOCTL_OUT(arg, mic_lev); break; case SOUND_MIXER_WRITE_CD: IOCTL_IN(arg, data); do_cd_lev(data); /* fall through */ case SOUND_MIXER_READ_CD: rc = IOCTL_OUT(arg, cd_lev); break; case SOUND_MIXER_WRITE_RECLEV: IOCTL_IN(arg, data); do_rec_lev(data) ; /* fall through */ case SOUND_MIXER_READ_RECLEV: rc = IOCTL_OUT(arg, rec_lev); break; case MIXER_WRITE(SOUND_MIXER_MONITOR): IOCTL_IN(arg, data); do_passthru_vol(data) ; /* fall through */ case MIXER_READ(SOUND_MIXER_MONITOR): rc = IOCTL_OUT(arg, passthru_vol); break; default: rc = -EINVAL; } return rc; } static void awacs_mixer_init(void) { awacs_volume_setter(line_vol, 2, 0, 6); if (has_perch) awacs_enable_amp(spk_vol); else (void)awacs_volume_setter(spk_vol, 4, MASK_CMUTE, 6); do_line_lev(line_lev) ; do_ip_gain(ip_gain) ; do_mic_lev(mic_lev) ; do_cd_lev(cd_lev) ; do_rec_lev(rec_lev) ; do_passthru_vol(passthru_vol) ; } static int burgundy_mixer_ioctl(u_int cmd, u_long arg) { int data; int rc; /* We are, we are, we are... Burgundy or better */ switch(cmd) { case SOUND_MIXER_READ_DEVMASK: data = SOUND_MASK_VOLUME | SOUND_MASK_CD | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_ALTPCM; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECMASK: data = SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECSRC: data = 0; if (awacs_reg[0] & MASK_MUX_AUDIN) data |= SOUND_MASK_LINE; if (awacs_reg[0] & MASK_MUX_MIC) data |= SOUND_MASK_MIC; if (awacs_reg[0] & MASK_MUX_CD) data |= SOUND_MASK_CD; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_RECSRC: IOCTL_IN(arg, data); data &= (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD); awacs_reg[0] &= ~(MASK_MUX_CD | MASK_MUX_MIC | MASK_MUX_AUDIN); if (data & SOUND_MASK_LINE) awacs_reg[0] |= MASK_MUX_AUDIN; if (data & SOUND_MASK_MIC) awacs_reg[0] |= MASK_MUX_MIC; if (data & SOUND_MASK_CD) awacs_reg[0] |= MASK_MUX_CD; awacs_write(awacs_reg[0] | MASK_ADDR0); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_STEREODEVS: data = SOUND_MASK_VOLUME | SOUND_MASK_SPEAKER | SOUND_MASK_RECLEV | SOUND_MASK_CD | SOUND_MASK_LINE; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_CAPS: rc = IOCTL_OUT(arg, 0); break; case SOUND_MIXER_WRITE_VOLUME: IOCTL_IN(arg, data); awacs_burgundy_write_mvolume(MASK_ADDR_BURGUNDY_MASTER_VOLUME, data); /* Fall through */ case SOUND_MIXER_READ_VOLUME: rc = IOCTL_OUT(arg, awacs_burgundy_read_mvolume(MASK_ADDR_BURGUNDY_MASTER_VOLUME)); break; case SOUND_MIXER_WRITE_SPEAKER: IOCTL_IN(arg, data); if (!(data & 0xff)) { /* Mute the left speaker */ awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, awacs_burgundy_rcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES) & ~0x2); } else { /* Unmute the left speaker */ awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, awacs_burgundy_rcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES) | 0x2); } if (!(data & 0xff00)) { /* Mute the right speaker */ awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, awacs_burgundy_rcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES) & ~0x4); } else { /* Unmute the right speaker */ awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, awacs_burgundy_rcb(MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES) | 0x4); } data = (((data&0xff)*16)/100 > 0xf ? 0xf : (((data&0xff)*16)/100)) + ((((data>>8)*16)/100 > 0xf ? 0xf : ((((data>>8)*16)/100)))<<4); awacs_burgundy_wcb(MASK_ADDR_BURGUNDY_ATTENSPEAKER, ~data); /* Fall through */ case SOUND_MIXER_READ_SPEAKER: data = awacs_burgundy_rcb(MASK_ADDR_BURGUNDY_ATTENSPEAKER); data = (((data & 0xf)*100)/16) + ((((data>>4)*100)/16)<<8); rc = IOCTL_OUT(arg, (~data) & 0x0000ffff); break; case SOUND_MIXER_WRITE_ALTPCM: /* really bell volume */ IOCTL_IN(arg, data); beep_vol = data & 0xff; /* fall through */ case SOUND_MIXER_READ_ALTPCM: rc = IOCTL_OUT(arg, beep_vol); break; case SOUND_MIXER_WRITE_LINE: IOCTL_IN(arg, data); awacs_burgundy_write_volume(MASK_ADDR_BURGUNDY_VOLLINE, data); /* fall through */ case SOUND_MIXER_READ_LINE: data = awacs_burgundy_read_volume(MASK_ADDR_BURGUNDY_VOLLINE); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_MIC: IOCTL_IN(arg, data); /* Mic is mono device */ data = (data << 8) + (data << 24); awacs_burgundy_write_volume(MASK_ADDR_BURGUNDY_VOLMIC, data); /* fall through */ case SOUND_MIXER_READ_MIC: data = awacs_burgundy_read_volume(MASK_ADDR_BURGUNDY_VOLMIC); data <<= 24; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_CD: IOCTL_IN(arg, data); awacs_burgundy_write_volume(MASK_ADDR_BURGUNDY_VOLCD, data); /* fall through */ case SOUND_MIXER_READ_CD: data = awacs_burgundy_read_volume(MASK_ADDR_BURGUNDY_VOLCD); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_RECLEV: IOCTL_IN(arg, data); data = awacs_volume_setter(data, 0, 0, 4); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECLEV: data = awacs_get_volume(awacs_reg[0], 4); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_OUTMASK: case SOUND_MIXER_OUTSRC: default: rc = -EINVAL; } return rc; } static int daca_mixer_ioctl(u_int cmd, u_long arg) { int data; int rc; /* And the DACA's no genius either! */ switch(cmd) { case SOUND_MIXER_READ_DEVMASK: data = SOUND_MASK_VOLUME; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECMASK: data = 0; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_RECSRC: data = 0; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_WRITE_RECSRC: IOCTL_IN(arg, data); data =0; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_STEREODEVS: data = SOUND_MASK_VOLUME; rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_READ_CAPS: rc = IOCTL_OUT(arg, 0); break; case SOUND_MIXER_WRITE_VOLUME: IOCTL_IN(arg, data); daca_set_volume(data, data); /* Fall through */ case SOUND_MIXER_READ_VOLUME: daca_get_volume(& data, &data); rc = IOCTL_OUT(arg, data); break; case SOUND_MIXER_OUTMASK: case SOUND_MIXER_OUTSRC: default: rc = -EINVAL; } return rc; } static int PMacMixerIoctl(u_int cmd, u_long arg) { int rc; /* Different IOCTLS for burgundy and, eventually, DACA & Tumbler */ TRY_LOCK(); switch (awacs_revision){ case AWACS_BURGUNDY: rc = burgundy_mixer_ioctl(cmd, arg); break ; case AWACS_DACA: rc = daca_mixer_ioctl(cmd, arg); break; case AWACS_TUMBLER: case AWACS_SNAPPER: rc = tas_mixer_ioctl(cmd, arg); break ; default: /* ;-)) */ rc = awacs_mixer_ioctl(cmd, arg); } UNLOCK(); return rc; } static void PMacMixerInit(void) { switch (awacs_revision) { case AWACS_TUMBLER: printk("AE-Init tumbler mixer\n"); break ; case AWACS_SNAPPER: printk("AE-Init snapper mixer\n"); break ; case AWACS_DACA: case AWACS_BURGUNDY: break ; /* don't know yet */ case AWACS_AWACS: case AWACS_SCREAMER: default: awacs_mixer_init() ; break ; } } /* Write/Read sq setup functions: Check to see if we have enough (or any) dbdma cmd buffers for the user's fragment settings. If not, allocate some. If this fails we will point at the beep buffer - as an emergency provision - to stop dma tromping on some random bit of memory (if someone lets it go anyway). The command buffers are then set up to point to the fragment buffers (allocated elsewhere). We need n+1 commands the last of which holds a NOP + loop to start. */ static int PMacWriteSqSetup(void) { int i, count = 600 ; volatile struct dbdma_cmd *cp; LOCK(); /* stop the controller from doing any output - if it isn't already. it _should_ be before this is called anyway */ out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); while ((in_le32(&awacs_txdma->status) & RUN) && count--) udelay(1); #ifdef DEBUG_DMASOUND if (count <= 0) printk("dmasound_pmac: write sq setup: timeout waiting for dma to stop\n"); #endif if ((write_sq.max_count + 1) > number_of_tx_cmd_buffers) { kfree(awacs_tx_cmd_space); number_of_tx_cmd_buffers = 0; /* we need nbufs + 1 (for the loop) and we should request + 1 again because the DBDMA_ALIGN might pull the start up by up to sizeof(struct dbdma_cmd) - 4. */ awacs_tx_cmd_space = kmalloc ((write_sq.max_count + 1 + 1) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (awacs_tx_cmd_space == NULL) { /* don't leave it dangling - nasty but better than a random address */ out_le32(&awacs_txdma->cmdptr, virt_to_bus(beep_dbdma_cmd)); printk(KERN_ERR "dmasound_pmac: can't allocate dbdma cmd buffers" ", driver disabled\n"); UNLOCK(); return -ENOMEM; } awacs_tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(awacs_tx_cmd_space); number_of_tx_cmd_buffers = write_sq.max_count + 1; } cp = awacs_tx_cmds; memset((void *)cp, 0, (write_sq.max_count+1) * sizeof(struct dbdma_cmd)); for (i = 0; i < write_sq.max_count; ++i, ++cp) { st_le32(&cp->phy_addr, virt_to_bus(write_sq.buffers[i])); } st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); st_le32(&cp->cmd_dep, virt_to_bus(awacs_tx_cmds)); /* point the controller at the command stack - ready to go */ out_le32(&awacs_txdma->cmdptr, virt_to_bus(awacs_tx_cmds)); UNLOCK(); return 0; } static int PMacReadSqSetup(void) { int i, count = 600; volatile struct dbdma_cmd *cp; LOCK(); /* stop the controller from doing any input - if it isn't already. it _should_ be before this is called anyway */ out_le32(&awacs_rxdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); while ((in_le32(&awacs_rxdma->status) & RUN) && count--) udelay(1); #ifdef DEBUG_DMASOUND if (count <= 0) printk("dmasound_pmac: read sq setup: timeout waiting for dma to stop\n"); #endif if ((read_sq.max_count+1) > number_of_rx_cmd_buffers ) { kfree(awacs_rx_cmd_space); number_of_rx_cmd_buffers = 0; /* we need nbufs + 1 (for the loop) and we should request + 1 again because the DBDMA_ALIGN might pull the start up by up to sizeof(struct dbdma_cmd) - 4 (assuming kmalloc aligns 32 bits). */ awacs_rx_cmd_space = kmalloc ((read_sq.max_count + 1 + 1) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (awacs_rx_cmd_space == NULL) { /* don't leave it dangling - nasty but better than a random address */ out_le32(&awacs_rxdma->cmdptr, virt_to_bus(beep_dbdma_cmd)); printk(KERN_ERR "dmasound_pmac: can't allocate dbdma cmd buffers" ", driver disabled\n"); UNLOCK(); return -ENOMEM; } awacs_rx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(awacs_rx_cmd_space); number_of_rx_cmd_buffers = read_sq.max_count + 1 ; } cp = awacs_rx_cmds; memset((void *)cp, 0, (read_sq.max_count+1) * sizeof(struct dbdma_cmd)); /* Set dma buffers up in a loop */ for (i = 0; i < read_sq.max_count; i++,cp++) { st_le32(&cp->phy_addr, virt_to_bus(read_sq.buffers[i])); st_le16(&cp->command, INPUT_MORE + INTR_ALWAYS); st_le16(&cp->req_count, read_sq.block_size); st_le16(&cp->xfer_status, 0); } /* The next two lines make the thing loop around. */ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); st_le32(&cp->cmd_dep, virt_to_bus(awacs_rx_cmds)); /* point the controller at the command stack - ready to go */ out_le32(&awacs_rxdma->cmdptr, virt_to_bus(awacs_rx_cmds)); UNLOCK(); return 0; } /* TODO: this needs work to guarantee that when it returns DMA has stopped but in a more elegant way than is done here.... */ static void PMacAbortRead(void) { int i; volatile struct dbdma_cmd *cp; LOCK(); /* give it a chance to update the output and provide the IRQ that is expected. */ out_le32(&awacs_rxdma->control, ((FLUSH) << 16) + FLUSH ); cp = awacs_rx_cmds; for (i = 0; i < read_sq.max_count; i++,cp++) st_le16(&cp->command, DBDMA_STOP); /* * We should probably wait for the thing to stop before we * release the memory. */ msleep(100) ; /* give it a (small) chance to act */ /* apply the sledgehammer approach - just stop it now */ out_le32(&awacs_rxdma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); UNLOCK(); } extern char *get_afmt_string(int); static int PMacStateInfo(char *b, size_t sp) { int i, len = 0; len = sprintf(b,"HW rates: "); switch (awacs_revision){ case AWACS_DACA: case AWACS_BURGUNDY: len += sprintf(b,"44100 ") ; break ; case AWACS_TUMBLER: case AWACS_SNAPPER: for (i=0; i<1; i++){ if (tas_freqs_ok[i]) len += sprintf(b+len,"%d ", tas_freqs[i]) ; } break ; case AWACS_AWACS: case AWACS_SCREAMER: default: for (i=0; i<8; i++){ if (awacs_freqs_ok[i]) len += sprintf(b+len,"%d ", awacs_freqs[i]) ; } break ; } len += sprintf(b+len,"s/sec\n") ; if (len < sp) { len += sprintf(b+len,"HW AFMTS: "); i = AFMT_U16_BE ; while (i) { if (i & dmasound.mach.hardware_afmts) len += sprintf(b+len,"%s ", get_afmt_string(i & dmasound.mach.hardware_afmts)); i >>= 1 ; } len += sprintf(b+len,"\n") ; } return len ; } /*** Machine definitions *****************************************************/ static SETTINGS def_hard = { .format = AFMT_S16_BE, .stereo = 1, .size = 16, .speed = 44100 } ; static SETTINGS def_soft = { .format = AFMT_S16_BE, .stereo = 1, .size = 16, .speed = 44100 } ; static MACHINE machPMac = { .name = awacs_name, .name2 = "PowerMac Built-in Sound", .owner = THIS_MODULE, .dma_alloc = PMacAlloc, .dma_free = PMacFree, .irqinit = PMacIrqInit, #ifdef MODULE .irqcleanup = PMacIrqCleanup, #endif /* MODULE */ .init = PMacInit, .silence = PMacSilence, .setFormat = PMacSetFormat, .setVolume = PMacSetVolume, .play = PMacPlay, .record = NULL, /* default to no record */ .mixer_init = PMacMixerInit, .mixer_ioctl = PMacMixerIoctl, .write_sq_setup = PMacWriteSqSetup, .read_sq_setup = PMacReadSqSetup, .state_info = PMacStateInfo, .abort_read = PMacAbortRead, .min_dsp_speed = 7350, .max_dsp_speed = 44100, .version = ((DMASOUND_AWACS_REVISION<<8) + DMASOUND_AWACS_EDITION) }; /*** Config & Setup **********************************************************/ /* Check for pmac models that we care about in terms of special actions. */ void __init set_model(void) { /* portables/lap-tops */ if (machine_is_compatible("AAPL,3400/2400") || machine_is_compatible("AAPL,3500")) { is_pbook_3X00 = 1 ; } if (machine_is_compatible("PowerBook1,1") || /* lombard */ machine_is_compatible("AAPL,PowerBook1998")){ /* wallstreet */ is_pbook_g3 = 1 ; return ; } } /* Get the OF node that tells us about the registers, interrupts etc. to use for sound IO. On most machines the sound IO OF node is the 'davbus' node. On newer pmacs with DACA (& Tumbler) the node to use is i2s-a. On much older machines i.e. before 9500 there is no davbus node and we have to use the 'awacs' property. In the latter case we signal this by setting the codec value - so that the code that looks for chip properties knows how to go about it. */ static struct device_node* __init get_snd_io_node(void) { struct device_node *np = NULL; /* set up awacs_node for early OF which doesn't have a full set of * properties on davbus */ awacs_node = find_devices("awacs"); if (awacs_node) awacs_revision = AWACS_AWACS; /* powermac models after 9500 (other than those which use DACA or * Tumbler) have a node called "davbus". */ np = find_devices("davbus"); /* * if we didn't find a davbus device, try 'i2s-a' since * this seems to be what iBooks (& Tumbler) have. */ if (np == NULL) np = i2s_node = find_devices("i2s-a"); /* if we didn't find this - perhaps we are on an early model * which _only_ has an 'awacs' node */ if (np == NULL && awacs_node) np = awacs_node ; /* if we failed all these return null - this will cause the * driver to give up... */ return np ; } /* Get the OF node that contains the info about the sound chip, inputs s-rates etc. This node does not exist (or contains much reduced info) on earlier machines we have to deduce the info other ways for these. */ static struct device_node* __init get_snd_info_node(struct device_node *io) { struct device_node *info; info = find_devices("sound"); while (info && info->parent != io) info = info->next; return info; } /* Find out what type of codec we have. */ static int __init get_codec_type(struct device_node *info) { /* already set if pre-davbus model and info will be NULL */ int codec = awacs_revision ; if (info) { /* must do awacs first to allow screamer to overide it */ if (device_is_compatible(info, "awacs")) codec = AWACS_AWACS ; if (device_is_compatible(info, "screamer")) codec = AWACS_SCREAMER; if (device_is_compatible(info, "burgundy")) codec = AWACS_BURGUNDY ; if (device_is_compatible(info, "daca")) codec = AWACS_DACA; if (device_is_compatible(info, "tumbler")) codec = AWACS_TUMBLER; if (device_is_compatible(info, "snapper")) codec = AWACS_SNAPPER; } return codec ; } /* find out what type, if any, of expansion card we have */ static void __init get_expansion_type(void) { if (find_devices("perch") != NULL) has_perch = 1; if (find_devices("pb-ziva-pc") != NULL) has_ziva = 1; /* need to work out how we deal with iMac SRS module */ } /* set up frame rates. * I suspect that these routines don't quite go about it the right way: * - where there is more than one rate - I think that the first property * value is the number of rates. * TODO: check some more device trees and modify accordingly * Set dmasound.mach.max_dsp_rate on the basis of these routines. */ static void __init awacs_init_frame_rates(unsigned int *prop, unsigned int l) { int i ; if (prop) { for (i=0; i<8; i++) awacs_freqs_ok[i] = 0 ; for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; for (i = 0; i < 8; ++i) { if (r == awacs_freqs[i]) { awacs_freqs_ok[i] = 1; break; } } } } /* else we assume that all the rates are available */ } static void __init burgundy_init_frame_rates(unsigned int *prop, unsigned int l) { int temp[9] ; int i = 0 ; if (prop) { for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; temp[i] = r ; i++ ; if(i>=9) i=8; } } #ifdef DEBUG_DMASOUND if (i > 1){ int j; printk("dmasound_pmac: burgundy with multiple frame rates\n"); for(j=0; j<i; j++) printk("%d ", temp[j]) ; printk("\n") ; } #endif } static void __init daca_init_frame_rates(unsigned int *prop, unsigned int l) { int temp[9] ; int i = 0 ; if (prop) { for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; temp[i] = r ; i++ ; if(i>=9) i=8; } } #ifdef DEBUG_DMASOUND if (i > 1){ int j; printk("dmasound_pmac: DACA with multiple frame rates\n"); for(j=0; j<i; j++) printk("%d ", temp[j]) ; printk("\n") ; } #endif } static void __init init_frame_rates(unsigned int *prop, unsigned int l) { switch (awacs_revision) { case AWACS_TUMBLER: case AWACS_SNAPPER: tas_init_frame_rates(prop, l); break ; case AWACS_DACA: daca_init_frame_rates(prop, l); break ; case AWACS_BURGUNDY: burgundy_init_frame_rates(prop, l); break ; default: awacs_init_frame_rates(prop, l); break ; } } /* find things/machines that can't do mac-io byteswap */ static void __init set_hw_byteswap(struct device_node *io) { struct device_node *mio ; unsigned int kl = 0 ; /* if seems that Keylargo can't byte-swap */ for (mio = io->parent; mio ; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { if (device_is_compatible(mio, "Keylargo")) kl = 1; break; } } hw_can_byteswap = !kl; } /* Allocate the resources necessary for beep generation. This cannot be (quite) done statically (yet) because we cannot do virt_to_bus() on static vars when the code is loaded as a module. for the sake of saving the possibility that two allocations will incur the overhead of two pull-ups in DBDMA_ALIGN() we allocate the 'emergency' dmdma command here as well... even tho' it is not part of the beep process. */ int32_t __init setup_beep(void) { /* Initialize beep stuff */ /* want one cmd buffer for beeps, and a second one for emergencies - i.e. dbdma error conditions. ask for three to allow for pull up in DBDMA_ALIGN(). */ beep_dbdma_cmd_space = kmalloc((2 + 1) * sizeof(struct dbdma_cmd), GFP_KERNEL); if(beep_dbdma_cmd_space == NULL) { printk(KERN_ERR "dmasound_pmac: no beep dbdma cmd space\n") ; return -ENOMEM ; } beep_dbdma_cmd = (volatile struct dbdma_cmd *) DBDMA_ALIGN(beep_dbdma_cmd_space); /* set up emergency dbdma cmd */ emergency_dbdma_cmd = beep_dbdma_cmd+1 ; beep_buf = (short *) kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); if (beep_buf == NULL) { printk(KERN_ERR "dmasound_pmac: no memory for beep buffer\n"); kfree(beep_dbdma_cmd_space) ; return -ENOMEM ; } return 0 ; } static struct input_dev awacs_beep_dev = { .evbit = { BIT(EV_SND) }, .sndbit = { BIT(SND_BELL) | BIT(SND_TONE) }, .event = awacs_beep_event, .name = "dmasound beeper", .phys = "macio/input0", /* what the heck is this?? */ .id = { .bustype = BUS_HOST, }, }; int __init dmasound_awacs_init(void) { struct device_node *io = NULL, *info = NULL; int vol, res; if (_machine != _MACH_Pmac) return -ENODEV; awacs_subframe = 0; awacs_revision = 0; hw_can_byteswap = 1 ; /* most can */ /* look for models we need to handle specially */ set_model() ; /* find the OF node that tells us about the dbdma stuff */ io = get_snd_io_node(); if (io == NULL) { #ifdef DEBUG_DMASOUND printk("dmasound_pmac: couldn't find sound io OF node\n"); #endif return -ENODEV ; } /* find the OF node that tells us about the sound sub-system * this doesn't exist on pre-davbus machines (earlier than 9500) */ if (awacs_revision != AWACS_AWACS) { /* set for pre-davbus */ info = get_snd_info_node(io) ; if (info == NULL){ #ifdef DEBUG_DMASOUND printk("dmasound_pmac: couldn't find 'sound' OF node\n"); #endif return -ENODEV ; } } awacs_revision = get_codec_type(info) ; if (awacs_revision == 0) { #ifdef DEBUG_DMASOUND printk("dmasound_pmac: couldn't find a Codec we can handle\n"); #endif return -ENODEV ; /* we don't know this type of h/w */ } /* set up perch, ziva, SRS or whatever else we have as sound * expansion. */ get_expansion_type(); /* we've now got enough information to make up the audio topology. * we will map the sound part of mac-io now so that we can probe for * other info if necessary (early AWACS we want to read chip ids) */ if (io->n_addrs < 3 || io->n_intrs < 3) { /* OK - maybe we need to use the 'awacs' node (on earlier * machines). */ if (awacs_node) { io = awacs_node ; if (io->n_addrs < 3 || io->n_intrs < 3) { printk("dmasound_pmac: can't use %s" " (%d addrs, %d intrs)\n", io->full_name, io->n_addrs, io->n_intrs); return -ENODEV; } } else { printk("dmasound_pmac: can't use %s (%d addrs, %d intrs)\n", io->full_name, io->n_addrs, io->n_intrs); } } if (!request_OF_resource(io, 0, NULL)) { printk(KERN_ERR "dmasound: can't request IO resource !\n"); return -ENODEV; } if (!request_OF_resource(io, 1, " (tx dma)")) { release_OF_resource(io, 0); printk(KERN_ERR "dmasound: can't request TX DMA resource !\n"); return -ENODEV; } if (!request_OF_resource(io, 2, " (rx dma)")) { release_OF_resource(io, 0); release_OF_resource(io, 1); printk(KERN_ERR "dmasound: can't request RX DMA resource !\n"); return -ENODEV; } /* all OF versions I've seen use this value */ if (i2s_node) i2s = ioremap(io->addrs[0].address, 0x1000); else awacs = ioremap(io->addrs[0].address, 0x1000); awacs_txdma = ioremap(io->addrs[1].address, 0x100); awacs_rxdma = ioremap(io->addrs[2].address, 0x100); /* first of all make sure that the chip is powered up....*/ pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, io, 0, 1); if (awacs_revision == AWACS_SCREAMER && awacs) awacs_recalibrate(); awacs_irq = io->intrs[0].line; awacs_tx_irq = io->intrs[1].line; awacs_rx_irq = io->intrs[2].line; /* Hack for legacy crap that will be killed someday */ awacs_node = io; /* if we have an awacs or screamer - probe the chip to make * sure we have the right revision. */ if (awacs_revision <= AWACS_SCREAMER){ uint32_t temp, rev, mfg ; /* find out the awacs revision from the chip */ temp = in_le32(&awacs->codec_stat); rev = (temp >> 12) & 0xf; mfg = (temp >> 8) & 0xf; #ifdef DEBUG_DMASOUND printk("dmasound_pmac: Awacs/Screamer Codec Mfct: %d Rev %d\n", mfg, rev); #endif if (rev >= AWACS_SCREAMER) awacs_revision = AWACS_SCREAMER ; else awacs_revision = rev ; } dmasound.mach = machPMac; /* find out other bits & pieces from OF, these may be present only on some models ... so be careful. */ /* in the absence of a frame rates property we will use the defaults */ if (info) { unsigned int *prop, l; sound_device_id = 0; /* device ID appears post g3 b&w */ prop = (unsigned int *)get_property(info, "device-id", NULL); if (prop != 0) sound_device_id = *prop; /* look for a property saying what sample rates are available */ prop = (unsigned int *)get_property(info, "sample-rates", &l); if (prop == 0) prop = (unsigned int *) get_property (info, "output-frame-rates", &l); /* if it's there use it to set up frame rates */ init_frame_rates(prop, l) ; } if (awacs) out_le32(&awacs->control, 0x11); /* set everything quiesent */ set_hw_byteswap(io) ; /* figure out if the h/w can do it */ #ifdef CONFIG_NVRAM /* get default volume from nvram */ vol = ((pmac_xpram_read( 8 ) & 7 ) << 1 ); #else vol = 0; #endif /* set up tracking values */ spk_vol = vol * 100 ; spk_vol /= 7 ; /* get set value to a percentage */ spk_vol |= (spk_vol << 8) ; /* equal left & right */ line_vol = passthru_vol = spk_vol ; /* fill regs that are shared between AWACS & Burgundy */ awacs_reg[2] = vol + (vol << 6); awacs_reg[4] = vol + (vol << 6); awacs_reg[5] = vol + (vol << 6); /* screamer has loopthru vol control */ awacs_reg[6] = 0; /* maybe should be vol << 3 for PCMCIA speaker */ awacs_reg[7] = 0; awacs_reg[0] = MASK_MUX_CD; awacs_reg[1] = MASK_LOOPTHRU; /* FIXME: Only machines with external SRS module need MASK_PAROUT */ if (has_perch || sound_device_id == 0x5 || /*sound_device_id == 0x8 ||*/ sound_device_id == 0xb) awacs_reg[1] |= MASK_PAROUT0 | MASK_PAROUT1; switch (awacs_revision) { case AWACS_TUMBLER: tas_register_driver(&tas3001c_hooks); tas_init(I2C_DRIVERID_TAS3001C, I2C_DRIVERNAME_TAS3001C); tas_dmasound_init(); tas_post_init(); break ; case AWACS_SNAPPER: tas_register_driver(&tas3004_hooks); tas_init(I2C_DRIVERID_TAS3004,I2C_DRIVERNAME_TAS3004); tas_dmasound_init(); tas_post_init(); break; case AWACS_DACA: daca_init(); break; case AWACS_BURGUNDY: awacs_burgundy_init(); break ; case AWACS_SCREAMER: case AWACS_AWACS: default: load_awacs(); break ; } /* enable/set-up external modules - when we know how */ if (has_perch) awacs_enable_amp(100 * 0x101); /* Reset dbdma channels */ out_le32(&awacs_txdma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); while (in_le32(&awacs_txdma->status) & RUN) udelay(1); out_le32(&awacs_rxdma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); while (in_le32(&awacs_rxdma->status) & RUN) udelay(1); /* Initialize beep stuff */ if ((res=setup_beep())) return res ; #ifdef CONFIG_PM pmu_register_sleep_notifier(&awacs_sleep_notifier); #endif /* CONFIG_PM */ /* Powerbooks have odd ways of enabling inputs such as an expansion-bay CD or sound from an internal modem or a PC-card modem. */ if (is_pbook_3X00) { /* * Enable CD and PC-card sound inputs. * This is done by reading from address * f301a000, + 0x10 to enable the expansion-bay * CD sound input, + 0x80 to enable the PC-card * sound input. The 0x100 enables the SCSI bus * terminator power. */ latch_base = ioremap (0xf301a000, 0x1000); in_8(latch_base + 0x190); } else if (is_pbook_g3) { struct device_node* mio; macio_base = NULL; for (mio = io->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0 && mio->n_addrs > 0) { macio_base = ioremap(mio->addrs[0].address, 0x40); break; } } /* * Enable CD sound input. * The relevant bits for writing to this byte are 0x8f. * I haven't found out what the 0x80 bit does. * For the 0xf bits, writing 3 or 7 enables the CD * input, any other value disables it. Values * 1, 3, 5, 7 enable the microphone. Values 0, 2, * 4, 6, 8 - f enable the input from the modem. * -- paulus. */ if (macio_base) out_8(macio_base + 0x37, 3); } if (hw_can_byteswap) dmasound.mach.hardware_afmts = (AFMT_S16_BE | AFMT_S16_LE) ; else dmasound.mach.hardware_afmts = AFMT_S16_BE ; /* shut out chips that do output only. * may need to extend this to machines which have no inputs - even tho' * they use screamer - IIRC one of the powerbooks is like this. */ if (awacs_revision != AWACS_DACA) { dmasound.mach.capabilities = DSP_CAP_DUPLEX ; dmasound.mach.record = PMacRecord ; } dmasound.mach.default_hard = def_hard ; dmasound.mach.default_soft = def_soft ; switch (awacs_revision) { case AWACS_BURGUNDY: sprintf(awacs_name, "PowerMac Burgundy ") ; break ; case AWACS_DACA: sprintf(awacs_name, "PowerMac DACA ") ; break ; case AWACS_TUMBLER: sprintf(awacs_name, "PowerMac Tumbler ") ; break ; case AWACS_SNAPPER: sprintf(awacs_name, "PowerMac Snapper ") ; break ; case AWACS_SCREAMER: sprintf(awacs_name, "PowerMac Screamer ") ; break ; case AWACS_AWACS: default: sprintf(awacs_name, "PowerMac AWACS rev %d ", awacs_revision) ; break ; } /* * XXX: we should handle errors here, but that would mean * rewriting the whole init code. later.. */ input_register_device(&awacs_beep_dev); return dmasound_init(); } static void __exit dmasound_awacs_cleanup(void) { input_unregister_device(&awacs_beep_dev); switch (awacs_revision) { case AWACS_TUMBLER: case AWACS_SNAPPER: tas_dmasound_cleanup(); tas_cleanup(); break ; case AWACS_DACA: daca_cleanup(); break; } dmasound_deinit(); } MODULE_DESCRIPTION("PowerMac built-in audio driver."); MODULE_LICENSE("GPL"); module_init(dmasound_awacs_init); module_exit(dmasound_awacs_cleanup);
waterice/Test-Git
sound/oss/dmasound/dmasound_awacs.c
C
gpl-2.0
86,835
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/i2c.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/camera.h> #include <mach/msm_bus_board.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include "../devices.h" #include "../board-8064.h" #ifdef CONFIG_MSM_CAMERA static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_GPIO, /*suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*active 1*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*active 2*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { #ifdef CONFIG_ZTEMT_CAMERA_COMMON //config GPIO_2's fuction 4:CAM_MCLK2 as CAM_MCLK for front camera now .func = GPIOMUX_FUNC_4, /*active 3*/ #else .func = GPIOMUX_FUNC_2, /*active 3*/ #endif .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_5, /*active 4*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_6, /*active 5*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_2, /*active 6*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_3, /*active 7*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_9, /*active 9*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_A, /*active 10*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_6, /*active 11*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_4, /*active 12*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; static struct msm_gpiomux_config apq8064_cam_common_configs[] = { { .gpio = 1, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 2, .settings = { #ifdef CONFIG_ZTEMT_CAMERA_COMMON [GPIOMUX_ACTIVE] = &cam_settings[3], #else [GPIOMUX_ACTIVE] = &cam_settings[12], #endif [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, #ifdef CONFIG_ZTEMT_CAMERA_COMMON //GPIO4 is configed as "BOOT_CONFIG" now, the original config of //GPIO4 is CAM_MCLK1 for front camera, so remove this config here #else { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, #endif { .gpio = 5, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 34, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 107, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 10, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[9], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 11, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[10], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, #ifdef CONFIG_ZTEMT_CAMERA_COMMON //GPIO12-13 are not for camera related use #else { .gpio = 12, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 13, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, #endif }; #ifdef CONFIG_ZTEMT_CAMERA_COMMON #define VFE_CAMIF_TIMER1_GPIO 12 #else #define VFE_CAMIF_TIMER1_GPIO 3 #endif #define VFE_CAMIF_TIMER2_GPIO 1 static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT, ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO, ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO, #ifdef CONFIG_ZTEMT_CAMERA_FLASH_LM3642 ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_LM3642, #else ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A, #endif }; static struct msm_gpiomux_config apq8064_cam_2d_configs[] = { }; static struct msm_bus_vectors cam_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_preview_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 27648000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_snapshot_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_zsl_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_video_ls_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 348192000, .ib = 617103360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_dual_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, }; static struct msm_bus_vectors cam_low_power_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 1451520, .ib = 3870720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_paths cam_bus_client_config[] = { { ARRAY_SIZE(cam_init_vectors), cam_init_vectors, }, { ARRAY_SIZE(cam_preview_vectors), cam_preview_vectors, }, { ARRAY_SIZE(cam_video_vectors), cam_video_vectors, }, { ARRAY_SIZE(cam_snapshot_vectors), cam_snapshot_vectors, }, { ARRAY_SIZE(cam_zsl_vectors), cam_zsl_vectors, }, { ARRAY_SIZE(cam_video_ls_vectors), cam_video_ls_vectors, }, { ARRAY_SIZE(cam_dual_vectors), cam_dual_vectors, }, { ARRAY_SIZE(cam_low_power_vectors), cam_low_power_vectors, }, }; static struct msm_bus_scale_pdata cam_bus_client_pdata = { cam_bus_client_config, ARRAY_SIZE(cam_bus_client_config), .name = "msm_camera", }; static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csid_core = 0, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csid_core = 1, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, }; static struct camera_vreg_t apq_8064_cam_vreg[] = { #ifdef CONFIG_IMX135 {"cam_vdig", REG_LDO, 1050000, 1050000, 105000}, #else {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, #endif {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; #ifdef CONFIG_OV5648 static struct camera_vreg_t apq_8064_cam_vreg_ov5648[] = { {"cam_vio", REG_VS, 0, 0, 0,10}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600,10}, }; #endif #define CAML_RSTN PM8921_GPIO_PM_TO_SYS(28) #define CAMR_RSTN 34 #ifdef CONFIG_OV5648 #define CAMR_PWD 36 //The PWD of front camera OV5648 for Z5MINI #endif #ifdef CONFIG_ZTEMT_IMX091_VCM_ENABLE #define BACK_CAM_PWDN_VCM 37 #endif static struct gpio apq8064_common_cam_gpio[] = { }; static struct gpio apq8064_back_cam_gpio[] = { {5, GPIOF_DIR_IN, "CAMIF_MCLK"}, {CAML_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, #ifdef CONFIG_ZTEMT_IMX091_VCM_ENABLE {BACK_CAM_PWDN_VCM,GPIOF_DIR_OUT,"BACK_CAM_PWDN_VCM"}, #endif }; static struct msm_gpio_set_tbl apq8064_back_cam_gpio_set_tbl[] = { {CAML_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAML_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, #ifdef CONFIG_ZTEMT_IMX091_VCM_ENABLE {BACK_CAM_PWDN_VCM, GPIOF_OUT_INIT_LOW, 10000}, {BACK_CAM_PWDN_VCM, GPIOF_OUT_INIT_HIGH, 10000}, #endif }; static struct msm_camera_gpio_conf apq8064_back_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio), .cam_gpio_set_tbl = apq8064_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio_set_tbl), }; static struct gpio apq8064_front_cam_gpio[] = { #ifdef CONFIG_ZTEMT_CAMERA_COMMON {2, GPIOF_DIR_IN, "CAMIF_MCLK"}, #else {4, GPIOF_DIR_IN, "CAMIF_MCLK"}, {12, GPIOF_DIR_IN, "CAMIF_I2C_DATA"}, {13, GPIOF_DIR_IN, "CAMIF_I2C_CLK"}, #endif {CAMR_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, #ifdef CONFIG_OV5648 {CAMR_PWD,GPIOF_DIR_OUT,"CAM_PWD"}, #endif }; static struct msm_gpio_set_tbl apq8064_front_cam_gpio_set_tbl[] = { #ifdef CONFIG_OV5648 {CAMR_PWD, GPIOF_OUT_INIT_LOW, 10000}, {CAMR_PWD, GPIOF_OUT_INIT_HIGH, 10000}, #endif {CAMR_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAMR_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, }; static struct msm_camera_gpio_conf apq8064_front_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_front_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio), .cam_gpio_set_tbl = apq8064_front_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio_set_tbl), }; static struct msm_camera_i2c_conf apq8064_back_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct i2c_board_info msm_act_main_cam_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x11), }; static struct msm_actuator_info msm_act_main_cam_0_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_0, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct i2c_board_info msm_act_main_cam1_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x18), }; static struct msm_actuator_info msm_act_main_cam_1_info = { .board_info = &msm_act_main_cam1_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_1, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_i2c_conf apq8064_front_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct msm_camera_sensor_flash_data flash_imx135 = { #ifdef CONFIG_ZTEMT_CAMERA_FLASH_LM3642 .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src, #else .flash_type = MSM_CAMERA_FLASH_NONE, #endif }; static struct msm_camera_csi_lane_params imx135_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx135 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx135_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_imx135_data = { .sensor_name = "imx135", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx135, .sensor_platform_info = &sensor_board_info_imx135, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, }; static struct msm_camera_sensor_flash_data flash_imx074 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_csi_lane_params imx074_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx074_csi_lane_params, }; static struct i2c_board_info imx074_eeprom_i2c_info = { I2C_BOARD_INFO("imx074_eeprom", 0x34 << 1), }; static struct msm_eeprom_info imx074_eeprom_info = { .board_info = &imx074_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = { .sensor_name = "imx074", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx074, .sensor_platform_info = &sensor_board_info_imx074, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, .eeprom_info = &imx074_eeprom_info, }; static struct msm_camera_csi_lane_params imx091_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_flash_data flash_imx091 = { #ifdef CONFIG_ZTEMT_CAMERA_FLASH_LM3642 .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src, #else .flash_type = MSM_CAMERA_FLASH_NONE, #endif }; static struct msm_camera_sensor_platform_info sensor_board_info_imx091 = { #ifdef CONFIG_ZTEMT_CAMERA_COMMON .mount_angle = 90, #else .mount_angle = 0, #endif .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx091_csi_lane_params, }; static struct i2c_board_info imx091_eeprom_i2c_info = { I2C_BOARD_INFO("imx091_eeprom", 0x21), }; static struct msm_eeprom_info imx091_eeprom_info = { .board_info = &imx091_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx091_data = { .sensor_name = "imx091", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx091, .sensor_platform_info = &sensor_board_info_imx091, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_1_info, .eeprom_info = &imx091_eeprom_info, }; static struct msm_camera_sensor_flash_data flash_s5k3l1yx = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &s5k3l1yx_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = { .sensor_name = "s5k3l1yx", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_s5k3l1yx, .sensor_platform_info = &sensor_board_info_s5k3l1yx, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct msm_camera_sensor_flash_data flash_mt9m114 = { .flash_type = MSM_CAMERA_FLASH_NONE }; static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x1, }; static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &mt9m114_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = { .sensor_name = "mt9m114", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_mt9m114, .sensor_platform_info = &sensor_board_info_mt9m114, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = YUV_SENSOR, }; static struct msm_camera_sensor_flash_data flash_ov2720 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov2720_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = { .mount_angle = 0, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &ov2720_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = { .sensor_name = "ov2720", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov2720, .sensor_platform_info = &sensor_board_info_ov2720, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; #ifdef CONFIG_IMX132 static struct msm_camera_sensor_flash_data flash_imx132 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params imx132_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx132 = { .mount_angle = 270, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &imx132_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_imx132_data = { .sensor_name = "imx132", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_imx132, .sensor_platform_info = &sensor_board_info_imx132, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; #endif #ifdef CONFIG_OV5648 static struct msm_camera_sensor_flash_data flash_ov5648 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov5648_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov5648 = { .mount_angle = 270, .cam_vreg = apq_8064_cam_vreg_ov5648, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg_ov5648), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &ov5648_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov5648_data = { .sensor_name = "ov5648", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov5648, .sensor_platform_info = &sensor_board_info_ov5648, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; #endif static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init apq8064_init_cam(void) { /* for SGLTE2 platform, do not configure i2c/gpiomux gsbi4 is used for * some other purpose */ if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE2) { msm_gpiomux_install(apq8064_cam_common_configs, ARRAY_SIZE(apq8064_cam_common_configs)); } if (machine_is_apq8064_cdp()) { sensor_board_info_imx074.mount_angle = 0; sensor_board_info_mt9m114.mount_angle = 0; } else if (machine_is_apq8064_liquid()) sensor_board_info_imx074.mount_angle = 180; platform_device_register(&msm_camera_server); if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE2) platform_device_register(&msm8960_device_i2c_mux_gsbi4); platform_device_register(&msm8960_device_csiphy0); platform_device_register(&msm8960_device_csiphy1); platform_device_register(&msm8960_device_csid0); platform_device_register(&msm8960_device_csid1); platform_device_register(&msm8960_device_ispif); platform_device_register(&msm8960_device_vfe); platform_device_register(&msm8960_device_vpe); } #ifdef CONFIG_I2C static struct i2c_board_info apq8064_camera_i2c_boardinfo[] = { { I2C_BOARD_INFO("imx074", 0x1A), .platform_data = &msm_camera_sensor_imx074_data, }, { I2C_BOARD_INFO("imx135", 0x20), .platform_data = &msm_camera_sensor_imx135_data, }, { I2C_BOARD_INFO("mt9m114", 0x48), .platform_data = &msm_camera_sensor_mt9m114_data, }, { I2C_BOARD_INFO("ov2720", 0x6C), .platform_data = &msm_camera_sensor_ov2720_data, }, { I2C_BOARD_INFO("sc628a", 0x6E), }, #ifdef CONFIG_ZTEMT_CAMERA_FLASH_LM3642 { I2C_BOARD_INFO("lm3642", 0x63),//camera flash led driver ic }, #endif { I2C_BOARD_INFO("imx091", 0x34), .platform_data = &msm_camera_sensor_imx091_data, }, { I2C_BOARD_INFO("s5k3l1yx", 0x21), //conflict with imx135, change from 0x20 to 0x21, tanyijun .platform_data = &msm_camera_sensor_s5k3l1yx_data, }, #ifdef CONFIG_IMX132 { I2C_BOARD_INFO("imx132", 0x6D),//use i2c slave write addr .platform_data = &msm_camera_sensor_imx132_data, }, #endif #ifdef CONFIG_OV5648 { I2C_BOARD_INFO("ov5648", 0x6d),//use i2c slave write addr .platform_data = &msm_camera_sensor_ov5648_data, }, #endif }; struct msm_camera_board_info apq8064_camera_board_info = { .board_info = apq8064_camera_i2c_boardinfo, .num_i2c_board_info = ARRAY_SIZE(apq8064_camera_i2c_boardinfo), }; #endif #endif
ztemt/Z5mini_H112_kernel
arch/arm/mach-msm/board/zte-camera.c
C
gpl-2.0
23,184
/* * linux/lib/vsprintf.c * * Copyright (C) 1991, 1992 Linus Torvalds * (C) Copyright 2000-2009 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. */ /* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ /* * Wirzenius wrote this portably, Torvalds fucked it up :-) * * from hush: simple_itoa() was lifted from boa-0.93.15 */ #include <common.h> #include <charset.h> #include <efi_loader.h> #include <div64.h> #include <hexdump.h> #include <stdarg.h> #include <uuid.h> #include <vsprintf.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/types.h> #include <linux/string.h> /* we use this so that we can do without the ctype library */ #define is_digit(c) ((c) >= '0' && (c) <= '9') static int skip_atoi(const char **s) { int i = 0; while (is_digit(**s)) i = i * 10 + *((*s)++) - '0'; return i; } /* Decimal conversion is by far the most typical, and is used * for /proc and /sys data. This directly impacts e.g. top performance * with many processes running. We optimize it for speed * using code from * http://www.cs.uiowa.edu/~jones/bcd/decimal.html * (with permission from the author, Douglas W. Jones). */ /* Formats correctly any integer in [0,99999]. * Outputs from one to five digits depending on input. * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ static char *put_dec_trunc(char *buf, unsigned q) { unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; d2 = (q>>8) & 0xf; d3 = (q>>12); d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; *buf++ = d0 + '0'; /* least significant digit */ d1 = q + 9*d3 + 5*d2 + d1; if (d1 != 0) { q = (d1 * 0xcd) >> 11; d1 = d1 - 10*q; *buf++ = d1 + '0'; /* next digit */ d2 = q + 2*d2; if ((d2 != 0) || (d3 != 0)) { q = (d2 * 0xd) >> 7; d2 = d2 - 10*q; *buf++ = d2 + '0'; /* next digit */ d3 = q + 4*d3; if (d3 != 0) { q = (d3 * 0xcd) >> 11; d3 = d3 - 10*q; *buf++ = d3 + '0'; /* next digit */ if (q != 0) *buf++ = q + '0'; /* most sign. digit */ } } } return buf; } /* Same with if's removed. Always emits five digits */ static char *put_dec_full(char *buf, unsigned q) { /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ /* but anyway, gcc produces better code with full-sized ints */ unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; d2 = (q>>8) & 0xf; d3 = (q>>12); /* * Possible ways to approx. divide by 10 * gcc -O2 replaces multiply with shifts and adds * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) * (x * 0x67) >> 10: 1100111 * (x * 0x34) >> 9: 110100 - same * (x * 0x1a) >> 8: 11010 - same * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) */ d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; *buf++ = d0 + '0'; d1 = q + 9*d3 + 5*d2 + d1; q = (d1 * 0xcd) >> 11; d1 = d1 - 10*q; *buf++ = d1 + '0'; d2 = q + 2*d2; q = (d2 * 0xd) >> 7; d2 = d2 - 10*q; *buf++ = d2 + '0'; d3 = q + 4*d3; q = (d3 * 0xcd) >> 11; /* - shorter code */ /* q = (d3 * 0x67) >> 10; - would also work */ d3 = d3 - 10*q; *buf++ = d3 + '0'; *buf++ = q + '0'; return buf; } /* No inlining helps gcc to use registers better */ static noinline char *put_dec(char *buf, uint64_t num) { while (1) { unsigned rem; if (num < 100000) return put_dec_trunc(buf, num); rem = do_div(num, 100000); buf = put_dec_full(buf, rem); } } #define ZEROPAD 1 /* pad with zero */ #define SIGN 2 /* unsigned/signed long */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ #define SMALL 32 /* Must be 32 == 0x20 */ #define SPECIAL 64 /* 0x */ /* * Macro to add a new character to our output string, but only if it will * fit. The macro moves to the next character position in the output string. */ #define ADDCH(str, ch) do { \ if ((str) < end) \ *(str) = (ch); \ ++str; \ } while (0) static char *number(char *buf, char *end, u64 num, int base, int size, int precision, int type) { /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ static const char digits[16] = "0123456789ABCDEF"; char tmp[66]; char sign; char locase; int need_pfx = ((type & SPECIAL) && base != 10); int i; /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (type & SMALL); if (type & LEFT) type &= ~ZEROPAD; sign = 0; if (type & SIGN) { if ((s64) num < 0) { sign = '-'; num = -(s64) num; size--; } else if (type & PLUS) { sign = '+'; size--; } else if (type & SPACE) { sign = ' '; size--; } } if (need_pfx) { size--; if (base == 16) size--; } /* generate full string in tmp[], in reverse order */ i = 0; if (num == 0) tmp[i++] = '0'; /* Generic code, for any base: else do { tmp[i++] = (digits[do_div(num,base)] | locase); } while (num != 0); */ else if (base != 10) { /* 8 or 16 */ int mask = base - 1; int shift = 3; if (base == 16) shift = 4; do { tmp[i++] = (digits[((unsigned char)num) & mask] | locase); num >>= shift; } while (num); } else { /* base 10 */ i = put_dec(tmp, num) - tmp; } /* printing 100 using %2d gives "100", not "00" */ if (i > precision) precision = i; /* leading space padding */ size -= precision; if (!(type & (ZEROPAD + LEFT))) { while (--size >= 0) ADDCH(buf, ' '); } /* sign */ if (sign) ADDCH(buf, sign); /* "0x" / "0" prefix */ if (need_pfx) { ADDCH(buf, '0'); if (base == 16) ADDCH(buf, 'X' | locase); } /* zero or space padding */ if (!(type & LEFT)) { char c = (type & ZEROPAD) ? '0' : ' '; while (--size >= 0) ADDCH(buf, c); } /* hmm even more zero padding? */ while (i <= --precision) ADDCH(buf, '0'); /* actual digits of result */ while (--i >= 0) ADDCH(buf, tmp[i]); /* trailing space padding */ while (--size >= 0) ADDCH(buf, ' '); return buf; } static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) { int len, i; if (s == NULL) s = "<NULL>"; len = strnlen(s, precision); if (!(flags & LEFT)) while (len < field_width--) ADDCH(buf, ' '); for (i = 0; i < len; ++i) ADDCH(buf, *s++); while (len < field_width--) ADDCH(buf, ' '); return buf; } /* U-Boot uses UTF-16 strings in the EFI context only. */ #if CONFIG_IS_ENABLED(EFI_LOADER) && !defined(API_BUILD) static char *string16(char *buf, char *end, u16 *s, int field_width, int precision, int flags) { const u16 *str = s ? s : L"<NULL>"; ssize_t i, len = utf16_strnlen(str, precision); if (!(flags & LEFT)) for (; len < field_width; --field_width) ADDCH(buf, ' '); for (i = 0; i < len && buf + utf16_utf8_strnlen(str, 1) <= end; ++i) { s32 s = utf16_get(&str); if (s < 0) s = '?'; utf8_put(s, &buf); } for (; len < field_width; --field_width) ADDCH(buf, ' '); return buf; } #if CONFIG_IS_ENABLED(EFI_DEVICE_PATH_TO_TEXT) static char *device_path_string(char *buf, char *end, void *dp, int field_width, int precision, int flags) { u16 *str; /* If dp == NULL output the string '<NULL>' */ if (!dp) return string16(buf, end, dp, field_width, precision, flags); str = efi_dp_str((struct efi_device_path *)dp); if (!str) return ERR_PTR(-ENOMEM); buf = string16(buf, end, str, field_width, precision, flags); efi_free_pool(str); return buf; } #endif #endif static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, int precision, int flags) { /* (6 * 2 hex digits), 5 colons and trailing zero */ char mac_addr[6 * 3]; char *p = mac_addr; int i; for (i = 0; i < 6; i++) { p = hex_byte_pack(p, addr[i]); if (!(flags & SPECIAL) && i != 5) *p++ = ':'; } *p = '\0'; return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); } static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, int precision, int flags) { /* (8 * 4 hex digits), 7 colons and trailing zero */ char ip6_addr[8 * 5]; char *p = ip6_addr; int i; for (i = 0; i < 8; i++) { p = hex_byte_pack(p, addr[2 * i]); p = hex_byte_pack(p, addr[2 * i + 1]); if (!(flags & SPECIAL) && i != 7) *p++ = ':'; } *p = '\0'; return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); } static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, int precision, int flags) { /* (4 * 3 decimal digits), 3 dots and trailing zero */ char ip4_addr[4 * 4]; char temp[3]; /* hold each IP quad in reverse order */ char *p = ip4_addr; int i, digits; for (i = 0; i < 4; i++) { digits = put_dec_trunc(temp, addr[i]) - temp; /* reverse the digits in the quad */ while (digits--) *p++ = temp[digits]; if (i != 3) *p++ = '.'; } *p = '\0'; return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); } #ifdef CONFIG_LIB_UUID /* * This works (roughly) the same way as Linux's. * * %pUb: 01020304-0506-0708-090a-0b0c0d0e0f10 * %pUB: 01020304-0506-0708-090A-0B0C0D0E0F10 * %pUl: 04030201-0605-0807-090a-0b0c0d0e0f10 * %pUL: 04030201-0605-0807-090A-0B0C0D0E0F10 */ static char *uuid_string(char *buf, char *end, u8 *addr, int field_width, int precision, int flags, const char *fmt) { char uuid[UUID_STR_LEN + 1]; int str_format; switch (*(++fmt)) { case 'L': str_format = UUID_STR_FORMAT_GUID | UUID_STR_UPPER_CASE; break; case 'l': str_format = UUID_STR_FORMAT_GUID; break; case 'B': str_format = UUID_STR_FORMAT_STD | UUID_STR_UPPER_CASE; break; default: str_format = UUID_STR_FORMAT_STD; break; } if (addr) uuid_bin_to_str(addr, uuid, str_format); else strcpy(uuid, "<NULL>"); return string(buf, end, uuid, field_width, precision, flags); } #endif /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format * specifiers. * * Right now we handle: * * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated * decimal for v4 and colon separated network-order 16 bit hex for v6) * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is * currently the same * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. */ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) { u64 num = (uintptr_t)ptr; /* * Being a boot loader, we explicitly allow pointers to * (physical) address null. */ #if 0 if (!ptr) return string(buf, end, "(null)", field_width, precision, flags); #endif switch (*fmt) { /* Device paths only exist in the EFI context. */ #if CONFIG_IS_ENABLED(EFI_DEVICE_PATH_TO_TEXT) && !defined(API_BUILD) case 'D': return device_path_string(buf, end, ptr, field_width, precision, flags); #endif case 'a': flags |= SPECIAL | ZEROPAD; switch (fmt[1]) { case 'p': default: field_width = sizeof(phys_addr_t) * 2 + 2; num = *(phys_addr_t *)ptr; break; } break; case 'm': flags |= SPECIAL; /* Fallthrough */ case 'M': return mac_address_string(buf, end, ptr, field_width, precision, flags); case 'i': flags |= SPECIAL; /* Fallthrough */ case 'I': if (fmt[1] == '6') return ip6_addr_string(buf, end, ptr, field_width, precision, flags); if (fmt[1] == '4') return ip4_addr_string(buf, end, ptr, field_width, precision, flags); flags &= ~SPECIAL; break; #ifdef CONFIG_LIB_UUID case 'U': return uuid_string(buf, end, ptr, field_width, precision, flags, fmt); #endif default: break; } flags |= SMALL; if (field_width == -1) { field_width = 2*sizeof(void *); flags |= ZEROPAD; } return number(buf, end, num, 16, field_width, precision, flags); } static int vsnprintf_internal(char *buf, size_t size, const char *fmt, va_list args) { u64 num; int base; char *str; int flags; /* flags to number() */ int field_width; /* width of output field */ int precision; /* min. # of digits for integers; max number of chars for from string */ int qualifier; /* 'h', 'l', or 'L' for integer fields */ /* 'z' support added 23/7/1999 S.H. */ /* 'z' changed to 'Z' --davidm 1/25/99 */ /* 't' added for ptrdiff_t */ char *end = buf + size; /* Make sure end is always >= buf - do we want this in U-Boot? */ if (end < buf) { end = ((void *)-1); size = end - buf; } str = buf; for (; *fmt ; ++fmt) { if (*fmt != '%') { ADDCH(str, *fmt); continue; } /* process flags */ flags = 0; repeat: ++fmt; /* this also skips first '%' */ switch (*fmt) { case '-': flags |= LEFT; goto repeat; case '+': flags |= PLUS; goto repeat; case ' ': flags |= SPACE; goto repeat; case '#': flags |= SPECIAL; goto repeat; case '0': flags |= ZEROPAD; goto repeat; } /* get field width */ field_width = -1; if (is_digit(*fmt)) field_width = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ field_width = va_arg(args, int); if (field_width < 0) { field_width = -field_width; flags |= LEFT; } } /* get the precision */ precision = -1; if (*fmt == '.') { ++fmt; if (is_digit(*fmt)) precision = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ precision = va_arg(args, int); } if (precision < 0) precision = 0; } /* get the conversion qualifier */ qualifier = -1; if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { qualifier = *fmt; ++fmt; if (qualifier == 'l' && *fmt == 'l') { qualifier = 'L'; ++fmt; } } /* default base */ base = 10; switch (*fmt) { case 'c': if (!(flags & LEFT)) { while (--field_width > 0) ADDCH(str, ' '); } ADDCH(str, (unsigned char) va_arg(args, int)); while (--field_width > 0) ADDCH(str, ' '); continue; case 's': /* U-Boot uses UTF-16 strings in the EFI context only. */ #if CONFIG_IS_ENABLED(EFI_LOADER) && !defined(API_BUILD) if (qualifier == 'l') { str = string16(str, end, va_arg(args, u16 *), field_width, precision, flags); } else #endif { str = string(str, end, va_arg(args, char *), field_width, precision, flags); } continue; case 'p': str = pointer(fmt + 1, str, end, va_arg(args, void *), field_width, precision, flags); if (IS_ERR(str)) return PTR_ERR(str); /* Skip all alphanumeric pointer suffixes */ while (isalnum(fmt[1])) fmt++; continue; case 'n': if (qualifier == 'l') { long *ip = va_arg(args, long *); *ip = (str - buf); } else { int *ip = va_arg(args, int *); *ip = (str - buf); } continue; case '%': ADDCH(str, '%'); continue; /* integer number formats - set up the flags and "break" */ case 'o': base = 8; break; case 'x': flags |= SMALL; case 'X': base = 16; break; case 'd': case 'i': flags |= SIGN; case 'u': break; default: ADDCH(str, '%'); if (*fmt) ADDCH(str, *fmt); else --fmt; continue; } if (qualifier == 'L') /* "quad" for 64 bit variables */ num = va_arg(args, unsigned long long); else if (qualifier == 'l') { num = va_arg(args, unsigned long); if (flags & SIGN) num = (signed long) num; } else if (qualifier == 'Z' || qualifier == 'z') { num = va_arg(args, size_t); } else if (qualifier == 't') { num = va_arg(args, ptrdiff_t); } else if (qualifier == 'h') { num = (unsigned short) va_arg(args, int); if (flags & SIGN) num = (signed short) num; } else { num = va_arg(args, unsigned int); if (flags & SIGN) num = (signed int) num; } str = number(str, end, num, base, field_width, precision, flags); } if (size > 0) { ADDCH(str, '\0'); if (str > end) end[-1] = '\0'; --str; } /* the trailing null byte doesn't count towards the total */ return str - buf; } int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { return vsnprintf_internal(buf, size, fmt, args); } int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; i = vsnprintf(buf, size, fmt, args); if (likely(i < size)) return i; if (size != 0) return size - 1; return 0; } int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); return i; } int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(buf, size, fmt, args); va_end(args); return i; } /** * Format a string and place it in a buffer (va_list version) * * @param buf The buffer to place the result into * @param fmt The format string to use * @param args Arguments for the format string * * The function returns the number of characters written * into @buf. Use vsnprintf() or vscnprintf() in order to avoid * buffer overflows. * * If you're not already dealing with a va_list consider using sprintf(). */ int vsprintf(char *buf, const char *fmt, va_list args) { return vsnprintf_internal(buf, INT_MAX, fmt, args); } int sprintf(char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsprintf(buf, fmt, args); va_end(args); return i; } #if CONFIG_IS_ENABLED(PRINTF) int printf(const char *fmt, ...) { va_list args; uint i; char printbuffer[CONFIG_SYS_PBSIZE]; va_start(args, fmt); /* * For this to work, printbuffer must be larger than * anything we ever want to print. */ i = vscnprintf(printbuffer, sizeof(printbuffer), fmt, args); va_end(args); /* Handle error */ if (i <= 0) return i; /* Print the string */ puts(printbuffer); return i; } int vprintf(const char *fmt, va_list args) { uint i; char printbuffer[CONFIG_SYS_PBSIZE]; /* * For this to work, printbuffer must be larger than * anything we ever want to print. */ i = vscnprintf(printbuffer, sizeof(printbuffer), fmt, args); /* Handle error */ if (i <= 0) return i; /* Print the string */ puts(printbuffer); return i; } #endif char *simple_itoa(ulong i) { /* 21 digits plus null terminator, good for 64-bit or smaller ints */ static char local[22]; char *p = &local[21]; *p-- = '\0'; do { *p-- = '0' + i % 10; i /= 10; } while (i > 0); return p + 1; } /* We don't seem to have %'d in U-Boot */ void print_grouped_ull(unsigned long long int_val, int digits) { char str[21], *s; int grab = 3; digits = (digits + 2) / 3; sprintf(str, "%*llu", digits * 3, int_val); for (s = str; *s; s += grab) { if (s != str) putc(s[-1] != ' ' ? ',' : ' '); printf("%.*s", grab, s); grab = 3; } } bool str2off(const char *p, loff_t *num) { char *endptr; *num = simple_strtoull(p, &endptr, 16); return *p != '\0' && *endptr == '\0'; } bool str2long(const char *p, ulong *num) { char *endptr; *num = simple_strtoul(p, &endptr, 16); return *p != '\0' && *endptr == '\0'; } char *strmhz(char *buf, unsigned long hz) { long l, n; long m; n = DIV_ROUND_CLOSEST(hz, 1000) / 1000L; l = sprintf(buf, "%ld", n); hz -= n * 1000000L; m = DIV_ROUND_CLOSEST(hz, 1000L); if (m != 0) sprintf(buf + l, ".%03ld", m); return buf; }
Digilent/u-boot-digilent
lib/vsprintf.c
C
gpl-2.0
19,676
/* * SPU file system -- SPU context management * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <arndb@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include "spufs.h" struct spu_context *alloc_spu_context(void) { struct spu_context *ctx; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) goto out; /* Binding to physical processor deferred * until spu_activate(). */ spu_init_csa(&ctx->csa); if (!ctx->csa.lscsa) { goto out_free; } spin_lock_init(&ctx->mmio_lock); kref_init(&ctx->kref); init_rwsem(&ctx->state_sema); init_MUTEX(&ctx->run_sema); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); goto out; out_free: kfree(ctx); ctx = NULL; out: return ctx; } void destroy_spu_context(struct kref *kref) { struct spu_context *ctx; ctx = container_of(kref, struct spu_context, kref); down_write(&ctx->state_sema); spu_deactivate(ctx); up_write(&ctx->state_sema); spu_fini_csa(&ctx->csa); kfree(ctx); } struct spu_context * get_spu_context(struct spu_context *ctx) { kref_get(&ctx->kref); return ctx; } int put_spu_context(struct spu_context *ctx) { return kref_put(&ctx->kref, &destroy_spu_context); } /* give up the mm reference when the context is about to be destroyed */ void spu_forget(struct spu_context *ctx) { struct mm_struct *mm; spu_acquire_saved(ctx); mm = ctx->owner; ctx->owner = NULL; mmput(mm); spu_release(ctx); } void spu_acquire(struct spu_context *ctx) { down_read(&ctx->state_sema); } void spu_release(struct spu_context *ctx) { up_read(&ctx->state_sema); } void spu_unmap_mappings(struct spu_context *ctx) { if (ctx->local_store) unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); if (ctx->mfc) unmap_mapping_range(ctx->mfc, 0, 0x4000, 1); if (ctx->cntl) unmap_mapping_range(ctx->cntl, 0, 0x4000, 1); if (ctx->signal1) unmap_mapping_range(ctx->signal1, 0, 0x4000, 1); if (ctx->signal2) unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); } int spu_acquire_runnable(struct spu_context *ctx) { int ret = 0; down_read(&ctx->state_sema); if (ctx->state == SPU_STATE_RUNNABLE) { ctx->spu->prio = current->prio; return 0; } up_read(&ctx->state_sema); down_write(&ctx->state_sema); /* ctx is about to be freed, can't acquire any more */ if (!ctx->owner) { ret = -EINVAL; goto out; } if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) goto out; ctx->state = SPU_STATE_RUNNABLE; } downgrade_write(&ctx->state_sema); /* On success, we return holding the lock */ return ret; out: /* Release here, to simplify calling code. */ up_write(&ctx->state_sema); return ret; } void spu_acquire_saved(struct spu_context *ctx) { down_read(&ctx->state_sema); if (ctx->state == SPU_STATE_SAVED) return; up_read(&ctx->state_sema); down_write(&ctx->state_sema); if (ctx->state == SPU_STATE_RUNNABLE) { spu_deactivate(ctx); ctx->state = SPU_STATE_SAVED; } downgrade_write(&ctx->state_sema); }
zhoupeng/spice4xen
linux-2.6.18-xen.hg/arch/powerpc/platforms/cell/spufs/context.c
C
gpl-2.0
3,935
#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT # include <init-arch.h> # include <math.h> # include <math_private.h> extern double __ieee754_atan2_sse2 (double, double); extern double __ieee754_atan2_avx (double, double); # ifdef HAVE_FMA4_SUPPORT extern double __ieee754_atan2_fma4 (double, double); # else # undef HAS_FMA4 # define HAS_FMA4 0 # define __ieee754_atan2_fma4 ((void *) 0) # endif libm_ifunc (__ieee754_atan2, HAS_FMA4 ? __ieee754_atan2_fma4 : (HAS_AVX ? __ieee754_atan2_avx : __ieee754_atan2_sse2)); strong_alias (__ieee754_atan2, __atan2_finite) # define __ieee754_atan2 __ieee754_atan2_sse2 #endif #include <sysdeps/ieee754/dbl-64/e_atan2.c>
tripleee/glibc-en-150
sysdeps/x86_64/fpu/multiarch/e_atan2.c
C
gpl-2.0
689
/* * Bt8xx based DVB adapter driver * * Copyright (C) 2002,2003 Florian Schirmer <jolt@tuxbox.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/bitops.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb-bt8xx.h" #include "bt878.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk( args... ) \ do { \ if (debug) printk(KERN_DEBUG args); \ } while (0) static void dvb_bt8xx_task(unsigned long data) { struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *)data; //printk("%d ", card->bt->finished_block); while (card->bt->last_block != card->bt->finished_block) { (card->bt->TS_Size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter) (&card->demux, &card->bt->buf_cpu[card->bt->last_block * card->bt->block_bytes], card->bt->block_bytes); card->bt->last_block = (card->bt->last_block + 1) % card->bt->block_count; } } static int dvb_bt8xx_start_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_demux *dvbdmx = dvbdmxfeed->demux; struct dvb_bt8xx_card *card = dvbdmx->priv; int rc; dprintk("dvb_bt8xx: start_feed\n"); if (!dvbdmx->dmx.frontend) return -EINVAL; down(&card->lock); card->nfeeds++; rc = card->nfeeds; if (card->nfeeds == 1) bt878_start(card->bt, card->gpio_mode, card->op_sync_orin, card->irq_err_ignore); up(&card->lock); return rc; } static int dvb_bt8xx_stop_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_demux *dvbdmx = dvbdmxfeed->demux; struct dvb_bt8xx_card *card = dvbdmx->priv; dprintk("dvb_bt8xx: stop_feed\n"); if (!dvbdmx->dmx.frontend) return -EINVAL; down(&card->lock); card->nfeeds--; if (card->nfeeds == 0) bt878_stop(card->bt); up(&card->lock); return 0; } static int is_pci_slot_eq(struct pci_dev* adev, struct pci_dev* bdev) { if ((adev->subsystem_vendor == bdev->subsystem_vendor) && (adev->subsystem_device == bdev->subsystem_device) && (adev->bus->number == bdev->bus->number) && (PCI_SLOT(adev->devfn) == PCI_SLOT(bdev->devfn))) return 1; return 0; } static struct bt878 __init *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev) { unsigned int card_nr; /* Hmm, n squared. Hope n is small */ for (card_nr = 0; card_nr < bt878_num; card_nr++) { if (is_pci_slot_eq(bt878[card_nr].dev, bttv_pci_dev)) return &bt878[card_nr]; } return NULL; } static int thomson_dtt7579_demod_init(struct dvb_frontend* fe) { static u8 mt352_clock_config [] = { 0x89, 0x38, 0x38 }; static u8 mt352_reset [] = { 0x50, 0x80 }; static u8 mt352_adc_ctl_1_cfg [] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg [] = { 0x67, 0x28, 0x20 }; static u8 mt352_gpp_ctl_cfg [] = { 0x8C, 0x33 }; static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_gpp_ctl_cfg, sizeof(mt352_gpp_ctl_cfg)); mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); return 0; } static int thomson_dtt7579_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pllbuf) { u32 div; unsigned char bs = 0; unsigned char cp = 0; #define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */ div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; if (params->frequency < 542000000) cp = 0xb4; else if (params->frequency < 771000000) cp = 0xbc; else cp = 0xf4; if (params->frequency == 0) bs = 0x03; else if (params->frequency < 443250000) bs = 0x02; else bs = 0x08; pllbuf[0] = 0xc0; // Note: non-linux standard PLL i2c address pllbuf[1] = div >> 8; pllbuf[2] = div & 0xff; pllbuf[3] = cp; pllbuf[4] = bs; return 0; } static struct mt352_config thomson_dtt7579_config = { .demod_address = 0x0f, .demod_init = thomson_dtt7579_demod_init, .pll_set = thomson_dtt7579_pll_set, }; static int cx24108_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { u32 freq = params->frequency; int i, a, n, pump; u32 band, pll; u32 osci[]={950000,1019000,1075000,1178000,1296000,1432000, 1576000,1718000,1856000,2036000,2150000}; u32 bandsel[]={0,0x00020000,0x00040000,0x00100800,0x00101000, 0x00102000,0x00104000,0x00108000,0x00110000, 0x00120000,0x00140000}; #define XTAL 1011100 /* Hz, really 1.0111 MHz and a /10 prescaler */ printk("cx24108 debug: entering SetTunerFreq, freq=%d\n",freq); /* This is really the bit driving the tuner chip cx24108 */ if(freq<950000) freq=950000; /* kHz */ if(freq>2150000) freq=2150000; /* satellite IF is 950..2150MHz */ /* decide which VCO to use for the input frequency */ for(i=1;(i<sizeof(osci)/sizeof(osci[0]))&&(osci[i]<freq);i++); printk("cx24108 debug: select vco #%d (f=%d)\n",i,freq); band=bandsel[i]; /* the gain values must be set by SetSymbolrate */ /* compute the pll divider needed, from Conexant data sheet, resolved for (n*32+a), remember f(vco) is f(receive) *2 or *4, depending on the divider bit. It is set to /4 on the 2 lowest bands */ n=((i<=2?2:1)*freq*10L)/(XTAL/100); a=n%32; n/=32; if(a==0) n--; pump=(freq<(osci[i-1]+osci[i])/2); pll=0xf8000000| ((pump?1:2)<<(14+11))| ((n&0x1ff)<<(5+11))| ((a&0x1f)<<11); /* everything is shifted left 11 bits to left-align the bits in the 32bit word. Output to the tuner goes MSB-aligned, after all */ printk("cx24108 debug: pump=%d, n=%d, a=%d\n",pump,n,a); cx24110_pll_write(fe,band); /* set vga and vca to their widest-band settings, as a precaution. SetSymbolrate might not be called to set this up */ cx24110_pll_write(fe,0x500c0000); cx24110_pll_write(fe,0x83f1f800); cx24110_pll_write(fe,pll); /* writereg(client,0x56,0x7f);*/ return 0; } static int pinnsat_pll_init(struct dvb_frontend* fe) { return 0; } static struct cx24110_config pctvsat_config = { .demod_address = 0x55, .pll_init = pinnsat_pll_init, .pll_set = cx24108_pll_set, }; static int microtune_mt7202dtf_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *) fe->dvb->priv; u8 cfg, cpump, band_select; u8 data[4]; u32 div; struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = data, .len = sizeof(data) }; div = (36000000 + params->frequency + 83333) / 166666; cfg = 0x88; if (params->frequency < 175000000) cpump = 2; else if (params->frequency < 390000000) cpump = 1; else if (params->frequency < 470000000) cpump = 2; else if (params->frequency < 750000000) cpump = 2; else cpump = 3; if (params->frequency < 175000000) band_select = 0x0e; else if (params->frequency < 470000000) band_select = 0x05; else band_select = 0x03; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = ((div >> 10) & 0x60) | cfg; data[3] = cpump | band_select; i2c_transfer(card->i2c_adapter, &msg, 1); return (div * 166666 - 36000000); } static int microtune_mt7202dtf_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name) { struct dvb_bt8xx_card* bt = (struct dvb_bt8xx_card*) fe->dvb->priv; return request_firmware(fw, name, &bt->bt->dev->dev); } static struct sp887x_config microtune_mt7202dtf_config = { .demod_address = 0x70, .pll_set = microtune_mt7202dtf_pll_set, .request_firmware = microtune_mt7202dtf_request_firmware, }; static int advbt771_samsung_tdtc9251dh0_demod_init(struct dvb_frontend* fe) { static u8 mt352_clock_config [] = { 0x89, 0x38, 0x2d }; static u8 mt352_reset [] = { 0x50, 0x80 }; static u8 mt352_adc_ctl_1_cfg [] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0x00, 0x40, 0x40 }; static u8 mt352_av771_extra[] = { 0xB5, 0x7A }; static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg,sizeof(mt352_agc_cfg)); udelay(2000); mt352_write(fe, mt352_av771_extra,sizeof(mt352_av771_extra)); mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); return 0; } static int advbt771_samsung_tdtc9251dh0_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pllbuf) { u32 div; unsigned char bs = 0; unsigned char cp = 0; #define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */ div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; if (params->frequency < 150000000) cp = 0xB4; else if (params->frequency < 173000000) cp = 0xBC; else if (params->frequency < 250000000) cp = 0xB4; else if (params->frequency < 400000000) cp = 0xBC; else if (params->frequency < 420000000) cp = 0xF4; else if (params->frequency < 470000000) cp = 0xFC; else if (params->frequency < 600000000) cp = 0xBC; else if (params->frequency < 730000000) cp = 0xF4; else cp = 0xFC; if (params->frequency < 150000000) bs = 0x01; else if (params->frequency < 173000000) bs = 0x01; else if (params->frequency < 250000000) bs = 0x02; else if (params->frequency < 400000000) bs = 0x02; else if (params->frequency < 420000000) bs = 0x02; else if (params->frequency < 470000000) bs = 0x02; else if (params->frequency < 600000000) bs = 0x08; else if (params->frequency < 730000000) bs = 0x08; else bs = 0x08; pllbuf[0] = 0xc2; // Note: non-linux standard PLL i2c address pllbuf[1] = div >> 8; pllbuf[2] = div & 0xff; pllbuf[3] = cp; pllbuf[4] = bs; return 0; } static struct mt352_config advbt771_samsung_tdtc9251dh0_config = { .demod_address = 0x0f, .demod_init = advbt771_samsung_tdtc9251dh0_demod_init, .pll_set = advbt771_samsung_tdtc9251dh0_pll_set, }; static struct dst_config dst_config = { .demod_address = 0x55, }; static int or51211_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name) { struct dvb_bt8xx_card* bt = (struct dvb_bt8xx_card*) fe->dvb->priv; return request_firmware(fw, name, &bt->bt->dev->dev); } static void or51211_setmode(struct dvb_frontend * fe, int mode) { struct dvb_bt8xx_card *bt = fe->dvb->priv; bttv_write_gpio(bt->bttv_nr, 0x0002, mode); /* Reset */ msleep(20); } static void or51211_reset(struct dvb_frontend * fe) { struct dvb_bt8xx_card *bt = fe->dvb->priv; /* RESET DEVICE * reset is controled by GPIO-0 * when set to 0 causes reset and when to 1 for normal op * must remain reset for 128 clock cycles on a 50Mhz clock * also PRM1 PRM2 & PRM4 are controled by GPIO-1,GPIO-2 & GPIO-4 * We assume that the reset has be held low long enough or we * have been reset by a power on. When the driver is unloaded * reset set to 0 so if reloaded we have been reset. */ /* reset & PRM1,2&4 are outputs */ int ret = bttv_gpio_enable(bt->bttv_nr, 0x001F, 0x001F); if (ret != 0) { printk(KERN_WARNING "or51211: Init Error - Can't Reset DVR " "(%i)\n", ret); } bttv_write_gpio(bt->bttv_nr, 0x001F, 0x0000); /* Reset */ msleep(20); /* Now set for normal operation */ bttv_write_gpio(bt->bttv_nr, 0x0001F, 0x0001); /* wait for operation to begin */ msleep(500); } static void or51211_sleep(struct dvb_frontend * fe) { struct dvb_bt8xx_card *bt = fe->dvb->priv; bttv_write_gpio(bt->bttv_nr, 0x0001, 0x0000); } static struct or51211_config or51211_config = { .demod_address = 0x15, .request_firmware = or51211_request_firmware, .setmode = or51211_setmode, .reset = or51211_reset, .sleep = or51211_sleep, }; static int vp3021_alps_tded4_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *) fe->dvb->priv; u8 buf[4]; u32 div; struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf) }; div = (params->frequency + 36166667) / 166667; buf[0] = (div >> 8) & 0x7F; buf[1] = div & 0xFF; buf[2] = 0x85; if ((params->frequency >= 47000000) && (params->frequency < 153000000)) buf[3] = 0x01; else if ((params->frequency >= 153000000) && (params->frequency < 430000000)) buf[3] = 0x02; else if ((params->frequency >= 430000000) && (params->frequency < 824000000)) buf[3] = 0x0C; else if ((params->frequency >= 824000000) && (params->frequency < 863000000)) buf[3] = 0x8C; else return -EINVAL; i2c_transfer(card->i2c_adapter, &msg, 1); return 0; } static struct nxt6000_config vp3021_alps_tded4_config = { .demod_address = 0x0a, .clock_inversion = 1, .pll_set = vp3021_alps_tded4_pll_set, }; static void frontend_init(struct dvb_bt8xx_card *card, u32 type) { int ret; struct dst_state* state = NULL; switch(type) { #ifdef BTTV_DVICO_DVBT_LITE case BTTV_DVICO_DVBT_LITE: card->fe = mt352_attach(&thomson_dtt7579_config, card->i2c_adapter); if (card->fe != NULL) { card->fe->ops->info.frequency_min = 174000000; card->fe->ops->info.frequency_max = 862000000; break; } break; #endif #ifdef BTTV_TWINHAN_VP3021 case BTTV_TWINHAN_VP3021: #else case BTTV_NEBULA_DIGITV: #endif card->fe = nxt6000_attach(&vp3021_alps_tded4_config, card->i2c_adapter); if (card->fe != NULL) { break; } break; case BTTV_AVDVBT_761: card->fe = sp887x_attach(&microtune_mt7202dtf_config, card->i2c_adapter); if (card->fe != NULL) { break; } break; case BTTV_AVDVBT_771: card->fe = mt352_attach(&advbt771_samsung_tdtc9251dh0_config, card->i2c_adapter); if (card->fe != NULL) { card->fe->ops->info.frequency_min = 174000000; card->fe->ops->info.frequency_max = 862000000; break; } break; case BTTV_TWINHAN_DST: /* DST is not a frontend driver !!! */ state = (struct dst_state *) kmalloc(sizeof (struct dst_state), GFP_KERNEL); /* Setup the Card */ state->config = &dst_config; state->i2c = card->i2c_adapter; state->bt = card->bt; /* DST is not a frontend, attaching the ASIC */ if ((dst_attach(state, &card->dvb_adapter)) == NULL) { printk("%s: Could not find a Twinhan DST.\n", __FUNCTION__); break; } card->fe = &state->frontend; /* Attach other DST peripherals if any */ /* Conditional Access device */ if (state->dst_hw_cap & DST_TYPE_HAS_CA) { ret = dst_ca_attach(state, &card->dvb_adapter); } if (card->fe != NULL) { break; } break; case BTTV_PINNACLESAT: card->fe = cx24110_attach(&pctvsat_config, card->i2c_adapter); if (card->fe != NULL) { break; } break; case BTTV_PC_HDTV: card->fe = or51211_attach(&or51211_config, card->i2c_adapter); if (card->fe != NULL) { break; } break; } if (card->fe == NULL) { printk("dvb-bt8xx: A frontend driver was not found for device %04x/%04x subsystem %04x/%04x\n", card->bt->dev->vendor, card->bt->dev->device, card->bt->dev->subsystem_vendor, card->bt->dev->subsystem_device); } else { if (dvb_register_frontend(&card->dvb_adapter, card->fe)) { printk("dvb-bt8xx: Frontend registration failed!\n"); if (card->fe->ops->release) card->fe->ops->release(card->fe); card->fe = NULL; } } } static int __init dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type) { int result; if ((result = dvb_register_adapter(&card->dvb_adapter, card->card_name, THIS_MODULE)) < 0) { printk("dvb_bt8xx: dvb_register_adapter failed (errno = %d)\n", result); return result; } card->dvb_adapter.priv = card; card->bt->adapter = card->i2c_adapter; memset(&card->demux, 0, sizeof(struct dvb_demux)); card->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; card->demux.priv = card; card->demux.filternum = 256; card->demux.feednum = 256; card->demux.start_feed = dvb_bt8xx_start_feed; card->demux.stop_feed = dvb_bt8xx_stop_feed; card->demux.write_to_decoder = NULL; if ((result = dvb_dmx_init(&card->demux)) < 0) { printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result); dvb_unregister_adapter(&card->dvb_adapter); return result; } card->dmxdev.filternum = 256; card->dmxdev.demux = &card->demux.dmx; card->dmxdev.capabilities = 0; if ((result = dvb_dmxdev_init(&card->dmxdev, &card->dvb_adapter)) < 0) { printk("dvb_bt8xx: dvb_dmxdev_init failed (errno = %d)\n", result); dvb_dmx_release(&card->demux); dvb_unregister_adapter(&card->dvb_adapter); return result; } card->fe_hw.source = DMX_FRONTEND_0; if ((result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_hw)) < 0) { printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result); dvb_dmxdev_release(&card->dmxdev); dvb_dmx_release(&card->demux); dvb_unregister_adapter(&card->dvb_adapter); return result; } card->fe_mem.source = DMX_MEMORY_FE; if ((result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_mem)) < 0) { printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result); card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw); dvb_dmxdev_release(&card->dmxdev); dvb_dmx_release(&card->demux); dvb_unregister_adapter(&card->dvb_adapter); return result; } if ((result = card->demux.dmx.connect_frontend(&card->demux.dmx, &card->fe_hw)) < 0) { printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result); card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_mem); card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw); dvb_dmxdev_release(&card->dmxdev); dvb_dmx_release(&card->demux); dvb_unregister_adapter(&card->dvb_adapter); return result; } dvb_net_init(&card->dvb_adapter, &card->dvbnet, &card->demux.dmx); tasklet_init(&card->bt->tasklet, dvb_bt8xx_task, (unsigned long) card); frontend_init(card, type); return 0; } static int dvb_bt8xx_probe(struct device *dev) { struct bttv_sub_device *sub = to_bttv_sub_dev(dev); struct dvb_bt8xx_card *card; struct pci_dev* bttv_pci_dev; int ret; if (!(card = kmalloc(sizeof(struct dvb_bt8xx_card), GFP_KERNEL))) return -ENOMEM; memset(card, 0, sizeof(*card)); init_MUTEX(&card->lock); card->bttv_nr = sub->core->nr; strncpy(card->card_name, sub->core->name, sizeof(sub->core->name)); card->i2c_adapter = &sub->core->i2c_adap; switch(sub->core->type) { case BTTV_PINNACLESAT: card->gpio_mode = 0x0400c060; /* should be: BT878_A_GAIN=0,BT878_A_PWRDN,BT878_DA_DPM,BT878_DA_SBR, BT878_DA_IOM=1,BT878_DA_APP to enable serial highspeed mode. */ card->op_sync_orin = 0; card->irq_err_ignore = 0; break; #ifdef BTTV_DVICO_DVBT_LITE case BTTV_DVICO_DVBT_LITE: #endif card->gpio_mode = 0x0400C060; card->op_sync_orin = 0; card->irq_err_ignore = 0; /* 26, 15, 14, 6, 5 * A_PWRDN DA_DPM DA_SBR DA_IOM_DA * DA_APP(parallel) */ break; #ifdef BTTV_TWINHAN_VP3021 case BTTV_TWINHAN_VP3021: #else case BTTV_NEBULA_DIGITV: #endif case BTTV_AVDVBT_761: card->gpio_mode = (1 << 26) | (1 << 14) | (1 << 5); card->op_sync_orin = 0; card->irq_err_ignore = 0; /* A_PWRDN DA_SBR DA_APP (high speed serial) */ break; case BTTV_AVDVBT_771: //case 0x07711461: card->gpio_mode = 0x0400402B; card->op_sync_orin = BT878_RISC_SYNC_MASK; card->irq_err_ignore = 0; /* A_PWRDN DA_SBR DA_APP[0] PKTP=10 RISC_ENABLE FIFO_ENABLE*/ break; case BTTV_TWINHAN_DST: card->gpio_mode = 0x2204f2c; card->op_sync_orin = BT878_RISC_SYNC_MASK; card->irq_err_ignore = BT878_APABORT | BT878_ARIPERR | BT878_APPERR | BT878_AFBUS; /* 25,21,14,11,10,9,8,3,2 then * 0x33 = 5,4,1,0 * A_SEL=SML, DA_MLB, DA_SBR, * DA_SDR=f, fifo trigger = 32 DWORDS * IOM = 0 == audio A/D * DPM = 0 == digital audio mode * == async data parallel port * then 0x33 (13 is set by start_capture) * DA_APP = async data parallel port, * ACAP_EN = 1, * RISC+FIFO ENABLE */ break; case BTTV_PC_HDTV: card->gpio_mode = 0x0100EC7B; card->op_sync_orin = 0; card->irq_err_ignore = 0; break; default: printk(KERN_WARNING "dvb_bt8xx: Unknown bttv card type: %d.\n", sub->core->type); kfree(card); return -ENODEV; } dprintk("dvb_bt8xx: identified card%d as %s\n", card->bttv_nr, card->card_name); if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) { printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr); kfree(card); return -EFAULT; } if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) { printk("dvb_bt8xx: unable to determine DMA core of card %d,\n", card->bttv_nr); printk("dvb_bt8xx: if you have the ALSA bt87x audio driver " "installed, try removing it.\n"); kfree(card); return -EFAULT; } init_MUTEX(&card->bt->gpio_lock); card->bt->bttv_nr = sub->core->nr; if ( (ret = dvb_bt8xx_load_card(card, sub->core->type)) ) { kfree(card); return ret; } dev_set_drvdata(dev, card); return 0; } static int dvb_bt8xx_remove(struct device *dev) { struct dvb_bt8xx_card *card = dev_get_drvdata(dev); dprintk("dvb_bt8xx: unloading card%d\n", card->bttv_nr); bt878_stop(card->bt); tasklet_kill(&card->bt->tasklet); dvb_net_release(&card->dvbnet); card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_mem); card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw); dvb_dmxdev_release(&card->dmxdev); dvb_dmx_release(&card->demux); if (card->fe) dvb_unregister_frontend(card->fe); dvb_unregister_adapter(&card->dvb_adapter); kfree(card); return 0; } static struct bttv_sub_driver driver = { .drv = { .name = "dvb-bt8xx", .probe = dvb_bt8xx_probe, .remove = dvb_bt8xx_remove, /* FIXME: * .shutdown = dvb_bt8xx_shutdown, * .suspend = dvb_bt8xx_suspend, * .resume = dvb_bt8xx_resume, */ }, }; static int __init dvb_bt8xx_init(void) { return bttv_sub_register(&driver, "dvb"); } static void __exit dvb_bt8xx_exit(void) { bttv_sub_unregister(&driver); } module_init(dvb_bt8xx_init); module_exit(dvb_bt8xx_exit); MODULE_DESCRIPTION("Bt8xx based DVB adapter driver"); MODULE_AUTHOR("Florian Schirmer <jolt@tuxbox.org>"); MODULE_LICENSE("GPL");
kzlin129/tt-gpl
go9/linux-s3c24xx/drivers/media/dvb/bt8xx/dvb-bt8xx.c
C
gpl-2.0
23,010
/* * Samsung EXYNOS FIMC-LITE (camera host interface) driver * * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. * Author: Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/bug.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include <media/exynos-fimc.h> #include "common.h" #include "fimc-core.h" #include "fimc-lite.h" #include "fimc-lite-reg.h" static int debug; module_param(debug, int, 0644); static const struct fimc_fmt fimc_lite_formats[] = { { .name = "YUV 4:2:2 packed, YCbYCr", .fourcc = V4L2_PIX_FMT_YUYV, .colorspace = V4L2_COLORSPACE_JPEG, .depth = { 16 }, .color = FIMC_FMT_YCBYCR422, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .flags = FMT_FLAGS_YUV, }, { .name = "YUV 4:2:2 packed, CbYCrY", .fourcc = V4L2_PIX_FMT_UYVY, .colorspace = V4L2_COLORSPACE_JPEG, .depth = { 16 }, .color = FIMC_FMT_CBYCRY422, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .flags = FMT_FLAGS_YUV, }, { .name = "YUV 4:2:2 packed, CrYCbY", .fourcc = V4L2_PIX_FMT_VYUY, .colorspace = V4L2_COLORSPACE_JPEG, .depth = { 16 }, .color = FIMC_FMT_CRYCBY422, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8, .flags = FMT_FLAGS_YUV, }, { .name = "YUV 4:2:2 packed, YCrYCb", .fourcc = V4L2_PIX_FMT_YVYU, .colorspace = V4L2_COLORSPACE_JPEG, .depth = { 16 }, .color = FIMC_FMT_YCRYCB422, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8, .flags = FMT_FLAGS_YUV, }, { .name = "RAW8 (GRBG)", .fourcc = V4L2_PIX_FMT_SGRBG8, .colorspace = V4L2_COLORSPACE_SRGB, .depth = { 8 }, .color = FIMC_FMT_RAW8, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .flags = FMT_FLAGS_RAW_BAYER, }, { .name = "RAW10 (GRBG)", .fourcc = V4L2_PIX_FMT_SGRBG10, .colorspace = V4L2_COLORSPACE_SRGB, .depth = { 16 }, .color = FIMC_FMT_RAW10, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .flags = FMT_FLAGS_RAW_BAYER, }, { .name = "RAW12 (GRBG)", .fourcc = V4L2_PIX_FMT_SGRBG12, .colorspace = V4L2_COLORSPACE_SRGB, .depth = { 16 }, .color = FIMC_FMT_RAW12, .memplanes = 1, .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, .flags = FMT_FLAGS_RAW_BAYER, }, }; /** * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code * @pixelformat: fourcc to match, ignored if null * @mbus_code: media bus code to match, ignored if null * @mask: the color format flags to match * @index: index to the fimc_lite_formats array, ignored if negative */ static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat, const u32 *mbus_code, unsigned int mask, int index) { const struct fimc_fmt *fmt, *def_fmt = NULL; unsigned int i; int id = 0; if (index >= (int)ARRAY_SIZE(fimc_lite_formats)) return NULL; for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) { fmt = &fimc_lite_formats[i]; if (mask && !(fmt->flags & mask)) continue; if (pixelformat && fmt->fourcc == *pixelformat) return fmt; if (mbus_code && fmt->mbus_code == *mbus_code) return fmt; if (index == id) def_fmt = fmt; id++; } return def_fmt; } static int fimc_lite_hw_init(struct fimc_lite *fimc, bool isp_output) { struct fimc_source_info *si; unsigned long flags; if (fimc->sensor == NULL) return -ENXIO; if (fimc->inp_frame.fmt == NULL || fimc->out_frame.fmt == NULL) return -EINVAL; /* Get sensor configuration data from the sensor subdev */ si = v4l2_get_subdev_hostdata(fimc->sensor); if (!si) return -EINVAL; spin_lock_irqsave(&fimc->slock, flags); flite_hw_set_camera_bus(fimc, si); flite_hw_set_source_format(fimc, &fimc->inp_frame); flite_hw_set_window_offset(fimc, &fimc->inp_frame); flite_hw_set_dma_buf_mask(fimc, 0); flite_hw_set_output_dma(fimc, &fimc->out_frame, !isp_output); flite_hw_set_interrupt_mask(fimc); flite_hw_set_test_pattern(fimc, fimc->test_pattern->val); if (debug > 0) flite_hw_dump_regs(fimc, __func__); spin_unlock_irqrestore(&fimc->slock, flags); return 0; } /* * Reinitialize the driver so it is ready to start the streaming again. * Set fimc->state to indicate stream off and the hardware shut down state. * If not suspending (@suspend is false), return any buffers to videobuf2. * Otherwise put any owned buffers onto the pending buffers queue, so they * can be re-spun when the device is being resumed. Also perform FIMC * software reset and disable streaming on the whole pipeline if required. */ static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend) { struct flite_buffer *buf; unsigned long flags; bool streaming; spin_lock_irqsave(&fimc->slock, flags); streaming = fimc->state & (1 << ST_SENSOR_STREAM); fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF | 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM); if (suspend) fimc->state |= (1 << ST_FLITE_SUSPENDED); else fimc->state &= ~(1 << ST_FLITE_PENDING | 1 << ST_FLITE_SUSPENDED); /* Release unused buffers */ while (!suspend && !list_empty(&fimc->pending_buf_q)) { buf = fimc_lite_pending_queue_pop(fimc); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } /* If suspending put unused buffers onto pending queue */ while (!list_empty(&fimc->active_buf_q)) { buf = fimc_lite_active_queue_pop(fimc); if (suspend) fimc_lite_pending_queue_add(fimc, buf); else vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&fimc->slock, flags); flite_hw_reset(fimc); if (!streaming) return 0; return fimc_pipeline_call(&fimc->ve, set_stream, 0); } static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend) { unsigned long flags; if (!fimc_lite_active(fimc)) return 0; spin_lock_irqsave(&fimc->slock, flags); set_bit(ST_FLITE_OFF, &fimc->state); flite_hw_capture_stop(fimc); spin_unlock_irqrestore(&fimc->slock, flags); wait_event_timeout(fimc->irq_queue, !test_bit(ST_FLITE_OFF, &fimc->state), (2*HZ/10)); /* 200 ms */ return fimc_lite_reinit(fimc, suspend); } /* Must be called with fimc.slock spinlock held. */ static void fimc_lite_config_update(struct fimc_lite *fimc) { flite_hw_set_window_offset(fimc, &fimc->inp_frame); flite_hw_set_dma_window(fimc, &fimc->out_frame); flite_hw_set_test_pattern(fimc, fimc->test_pattern->val); clear_bit(ST_FLITE_CONFIG, &fimc->state); } static irqreturn_t flite_irq_handler(int irq, void *priv) { struct fimc_lite *fimc = priv; struct flite_buffer *vbuf; unsigned long flags; struct timeval *tv; struct timespec ts; u32 intsrc; spin_lock_irqsave(&fimc->slock, flags); intsrc = flite_hw_get_interrupt_source(fimc); flite_hw_clear_pending_irq(fimc); if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) { wake_up(&fimc->irq_queue); goto done; } if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) { clear_bit(ST_FLITE_RUN, &fimc->state); fimc->events.data_overflow++; } if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) { flite_hw_clear_last_capture_end(fimc); clear_bit(ST_FLITE_STREAM, &fimc->state); wake_up(&fimc->irq_queue); } if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) goto done; if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) && test_bit(ST_FLITE_RUN, &fimc->state) && !list_empty(&fimc->pending_buf_q)) { vbuf = fimc_lite_pending_queue_pop(fimc); flite_hw_set_dma_buffer(fimc, vbuf); fimc_lite_active_queue_add(fimc, vbuf); } if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMEND) && test_bit(ST_FLITE_RUN, &fimc->state) && !list_empty(&fimc->active_buf_q)) { vbuf = fimc_lite_active_queue_pop(fimc); ktime_get_ts(&ts); tv = &vbuf->vb.v4l2_buf.timestamp; tv->tv_sec = ts.tv_sec; tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; vbuf->vb.v4l2_buf.sequence = fimc->frame_count++; flite_hw_mask_dma_buffer(fimc, vbuf->index); vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE); } if (test_bit(ST_FLITE_CONFIG, &fimc->state)) fimc_lite_config_update(fimc); if (list_empty(&fimc->pending_buf_q)) { flite_hw_capture_stop(fimc); clear_bit(ST_FLITE_STREAM, &fimc->state); } done: set_bit(ST_FLITE_RUN, &fimc->state); spin_unlock_irqrestore(&fimc->slock, flags); return IRQ_HANDLED; } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct fimc_lite *fimc = q->drv_priv; unsigned long flags; int ret; spin_lock_irqsave(&fimc->slock, flags); fimc->buf_index = 0; fimc->frame_count = 0; spin_unlock_irqrestore(&fimc->slock, flags); ret = fimc_lite_hw_init(fimc, false); if (ret) { fimc_lite_reinit(fimc, false); return ret; } set_bit(ST_FLITE_PENDING, &fimc->state); if (!list_empty(&fimc->active_buf_q) && !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) { flite_hw_capture_start(fimc); if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state)) fimc_pipeline_call(&fimc->ve, set_stream, 1); } if (debug > 0) flite_hw_dump_regs(fimc, __func__); return 0; } static void stop_streaming(struct vb2_queue *q) { struct fimc_lite *fimc = q->drv_priv; if (!fimc_lite_active(fimc)) return; fimc_lite_stop_capture(fimc, false); } static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { const struct v4l2_pix_format_mplane *pixm = NULL; struct fimc_lite *fimc = vq->drv_priv; struct flite_frame *frame = &fimc->out_frame; const struct fimc_fmt *fmt = frame->fmt; unsigned long wh; int i; if (pfmt) { pixm = &pfmt->fmt.pix_mp; fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0, -1); wh = pixm->width * pixm->height; } else { wh = frame->f_width * frame->f_height; } if (fmt == NULL) return -EINVAL; *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { unsigned int size = (wh * fmt->depth[i]) / 8; if (pixm) sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); else sizes[i] = size; allocators[i] = fimc->alloc_ctx; } return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_lite *fimc = vq->drv_priv; int i; if (fimc->out_frame.fmt == NULL) return -EINVAL; for (i = 0; i < fimc->out_frame.fmt->memplanes; i++) { unsigned long size = fimc->payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(&fimc->ve.vdev, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; } static void buffer_queue(struct vb2_buffer *vb) { struct flite_buffer *buf = container_of(vb, struct flite_buffer, vb); struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue); unsigned long flags; spin_lock_irqsave(&fimc->slock, flags); buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0); buf->index = fimc->buf_index++; if (fimc->buf_index >= fimc->reqbufs_count) fimc->buf_index = 0; if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) && !test_bit(ST_FLITE_STREAM, &fimc->state) && list_empty(&fimc->active_buf_q)) { flite_hw_set_dma_buffer(fimc, buf); fimc_lite_active_queue_add(fimc, buf); } else { fimc_lite_pending_queue_add(fimc, buf); } if (vb2_is_streaming(&fimc->vb_queue) && !list_empty(&fimc->pending_buf_q) && !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) { flite_hw_capture_start(fimc); spin_unlock_irqrestore(&fimc->slock, flags); if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state)) fimc_pipeline_call(&fimc->ve, set_stream, 1); return; } spin_unlock_irqrestore(&fimc->slock, flags); } static const struct vb2_ops fimc_lite_qops = { .queue_setup = queue_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = start_streaming, .stop_streaming = stop_streaming, }; static void fimc_lite_clear_event_counters(struct fimc_lite *fimc) { unsigned long flags; spin_lock_irqsave(&fimc->slock, flags); memset(&fimc->events, 0, sizeof(fimc->events)); spin_unlock_irqrestore(&fimc->slock, flags); } static int fimc_lite_open(struct file *file) { struct fimc_lite *fimc = video_drvdata(file); struct media_entity *me = &fimc->ve.vdev.entity; int ret; mutex_lock(&fimc->lock); if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) { ret = -EBUSY; goto unlock; } set_bit(ST_FLITE_IN_USE, &fimc->state); ret = pm_runtime_get_sync(&fimc->pdev->dev); if (ret < 0) goto unlock; ret = v4l2_fh_open(file); if (ret < 0) goto err_pm; if (!v4l2_fh_is_singular_file(file) || atomic_read(&fimc->out_path) != FIMC_IO_DMA) goto unlock; mutex_lock(&me->parent->graph_mutex); ret = fimc_pipeline_call(&fimc->ve, open, me, true); /* Mark video pipeline ending at this video node as in use. */ if (ret == 0) me->use_count++; mutex_unlock(&me->parent->graph_mutex); if (!ret) { fimc_lite_clear_event_counters(fimc); goto unlock; } v4l2_fh_release(file); err_pm: pm_runtime_put_sync(&fimc->pdev->dev); clear_bit(ST_FLITE_IN_USE, &fimc->state); unlock: mutex_unlock(&fimc->lock); return ret; } static int fimc_lite_release(struct file *file) { struct fimc_lite *fimc = video_drvdata(file); struct media_entity *entity = &fimc->ve.vdev.entity; mutex_lock(&fimc->lock); if (v4l2_fh_is_singular_file(file) && atomic_read(&fimc->out_path) == FIMC_IO_DMA) { if (fimc->streaming) { media_entity_pipeline_stop(entity); fimc->streaming = false; } fimc_lite_stop_capture(fimc, false); fimc_pipeline_call(&fimc->ve, close); clear_bit(ST_FLITE_IN_USE, &fimc->state); mutex_lock(&entity->parent->graph_mutex); entity->use_count--; mutex_unlock(&entity->parent->graph_mutex); } _vb2_fop_release(file, NULL); pm_runtime_put(&fimc->pdev->dev); clear_bit(ST_FLITE_SUSPENDED, &fimc->state); mutex_unlock(&fimc->lock); return 0; } static const struct v4l2_file_operations fimc_lite_fops = { .owner = THIS_MODULE, .open = fimc_lite_open, .release = fimc_lite_release, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; /* * Format and crop negotiation helpers */ static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *format) { struct flite_drvdata *dd = fimc->dd; struct v4l2_mbus_framefmt *mf = &format->format; const struct fimc_fmt *fmt = NULL; if (format->pad == FLITE_SD_PAD_SINK) { v4l_bound_align_image(&mf->width, 8, dd->max_width, ffs(dd->out_width_align) - 1, &mf->height, 0, dd->max_height, 0, 0); fmt = fimc_lite_find_format(NULL, &mf->code, 0, 0); if (WARN_ON(!fmt)) return NULL; mf->colorspace = fmt->colorspace; mf->code = fmt->mbus_code; } else { struct flite_frame *sink = &fimc->inp_frame; struct v4l2_mbus_framefmt *sink_fmt; struct v4l2_rect *rect; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sink_fmt = v4l2_subdev_get_try_format(fh, FLITE_SD_PAD_SINK); mf->code = sink_fmt->code; mf->colorspace = sink_fmt->colorspace; rect = v4l2_subdev_get_try_crop(fh, FLITE_SD_PAD_SINK); } else { mf->code = sink->fmt->mbus_code; mf->colorspace = sink->fmt->colorspace; rect = &sink->rect; } /* Allow changing format only on sink pad */ mf->width = rect->width; mf->height = rect->height; } mf->field = V4L2_FIELD_NONE; v4l2_dbg(1, debug, &fimc->subdev, "code: %#x (%d), %dx%d\n", mf->code, mf->colorspace, mf->width, mf->height); return fmt; } static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r) { struct flite_frame *frame = &fimc->inp_frame; v4l_bound_align_image(&r->width, 0, frame->f_width, 0, &r->height, 0, frame->f_height, 0, 0); /* Adjust left/top if cropping rectangle got out of bounds */ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width); r->left = round_down(r->left, fimc->dd->win_hor_offs_align); r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height); v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n", r->left, r->top, r->width, r->height, frame->f_width, frame->f_height); } static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r) { struct flite_frame *frame = &fimc->out_frame; struct v4l2_rect *crop_rect = &fimc->inp_frame.rect; /* Scaling is not supported so we enforce compose rectangle size same as size of the sink crop rectangle. */ r->width = crop_rect->width; r->height = crop_rect->height; /* Adjust left/top if the composing rectangle got out of bounds */ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width); r->left = round_down(r->left, fimc->dd->out_hor_offs_align); r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height); v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n", r->left, r->top, r->width, r->height, frame->f_width, frame->f_height); } /* * Video node ioctl operations */ static int fimc_lite_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct fimc_lite *fimc = video_drvdata(file); strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver)); strlcpy(cap->card, FIMC_LITE_DRV_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(&fimc->pdev->dev)); cap->device_caps = V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { const struct fimc_fmt *fmt; if (f->index >= ARRAY_SIZE(fimc_lite_formats)) return -EINVAL; fmt = &fimc_lite_formats[f->index]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; return 0; } static int fimc_lite_g_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_lite *fimc = video_drvdata(file); struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp; struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0]; struct flite_frame *frame = &fimc->out_frame; const struct fimc_fmt *fmt = frame->fmt; plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8; plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height; pixm->num_planes = fmt->memplanes; pixm->pixelformat = fmt->fourcc; pixm->width = frame->f_width; pixm->height = frame->f_height; pixm->field = V4L2_FIELD_NONE; pixm->colorspace = fmt->colorspace; return 0; } static int fimc_lite_try_fmt(struct fimc_lite *fimc, struct v4l2_pix_format_mplane *pixm, const struct fimc_fmt **ffmt) { u32 bpl = pixm->plane_fmt[0].bytesperline; struct flite_drvdata *dd = fimc->dd; const struct fimc_fmt *inp_fmt = fimc->inp_frame.fmt; const struct fimc_fmt *fmt; if (WARN_ON(inp_fmt == NULL)) return -EINVAL; /* * We allow some flexibility only for YUV formats. In case of raw * raw Bayer the FIMC-LITE's output format must match its camera * interface input format. */ if (inp_fmt->flags & FMT_FLAGS_YUV) fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, inp_fmt->flags, 0); else fmt = inp_fmt; if (WARN_ON(fmt == NULL)) return -EINVAL; if (ffmt) *ffmt = fmt; v4l_bound_align_image(&pixm->width, 8, dd->max_width, ffs(dd->out_width_align) - 1, &pixm->height, 0, dd->max_height, 0, 0); if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width)) pixm->plane_fmt[0].bytesperline = (pixm->width * fmt->depth[0]) / 8; if (pixm->plane_fmt[0].sizeimage == 0) pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height * fmt->depth[0]) / 8; pixm->num_planes = fmt->memplanes; pixm->pixelformat = fmt->fourcc; pixm->colorspace = fmt->colorspace; pixm->field = V4L2_FIELD_NONE; return 0; } static int fimc_lite_try_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_lite *fimc = video_drvdata(file); return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL); } static int fimc_lite_s_fmt_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp; struct fimc_lite *fimc = video_drvdata(file); struct flite_frame *frame = &fimc->out_frame; const struct fimc_fmt *fmt = NULL; int ret; if (vb2_is_busy(&fimc->vb_queue)) return -EBUSY; ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt); if (ret < 0) return ret; frame->fmt = fmt; fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8, pixm->plane_fmt[0].sizeimage); frame->f_width = pixm->width; frame->f_height = pixm->height; return 0; } static int fimc_pipeline_validate(struct fimc_lite *fimc) { struct v4l2_subdev *sd = &fimc->subdev; struct v4l2_subdev_format sink_fmt, src_fmt; struct media_pad *pad; int ret; while (1) { /* Retrieve format at the sink pad */ pad = &sd->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; /* Don't call FIMC subdev operation to avoid nested locking */ if (sd == &fimc->subdev) { struct flite_frame *ff = &fimc->out_frame; sink_fmt.format.width = ff->f_width; sink_fmt.format.height = ff->f_height; sink_fmt.format.code = fimc->inp_frame.fmt->mbus_code; } else { sink_fmt.pad = pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; } /* Retrieve format at the source pad */ pad = media_entity_remote_pad(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); src_fmt.pad = pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; } return 0; } static int fimc_lite_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); struct media_entity *entity = &fimc->ve.vdev.entity; int ret; if (fimc_lite_active(fimc)) return -EBUSY; ret = media_entity_pipeline_start(entity, &fimc->ve.pipe->mp); if (ret < 0) return ret; ret = fimc_pipeline_validate(fimc); if (ret < 0) goto err_p_stop; fimc->sensor = fimc_find_remote_sensor(&fimc->subdev.entity); ret = vb2_ioctl_streamon(file, priv, type); if (!ret) { fimc->streaming = true; return ret; } err_p_stop: media_entity_pipeline_stop(entity); return 0; } static int fimc_lite_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); int ret; ret = vb2_ioctl_streamoff(file, priv, type); if (ret < 0) return ret; media_entity_pipeline_stop(&fimc->ve.vdev.entity); fimc->streaming = false; return 0; } static int fimc_lite_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct fimc_lite *fimc = video_drvdata(file); int ret; reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count); ret = vb2_ioctl_reqbufs(file, priv, reqbufs); if (!ret) fimc->reqbufs_count = reqbufs->count; return ret; } /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */ static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b) { if (a->left < b->left || a->top < b->top) return 0; if (a->left + a->width > b->left + b->width) return 0; if (a->top + a->height > b->top + b->height) return 0; return 1; } static int fimc_lite_g_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct fimc_lite *fimc = video_drvdata(file); struct flite_frame *f = &fimc->out_frame; if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: sel->r.left = 0; sel->r.top = 0; sel->r.width = f->f_width; sel->r.height = f->f_height; return 0; case V4L2_SEL_TGT_COMPOSE: sel->r = f->rect; return 0; } return -EINVAL; } static int fimc_lite_s_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct fimc_lite *fimc = video_drvdata(file); struct flite_frame *f = &fimc->out_frame; struct v4l2_rect rect = sel->r; unsigned long flags; if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || sel->target != V4L2_SEL_TGT_COMPOSE) return -EINVAL; fimc_lite_try_compose(fimc, &rect); if ((sel->flags & V4L2_SEL_FLAG_LE) && !enclosed_rectangle(&rect, &sel->r)) return -ERANGE; if ((sel->flags & V4L2_SEL_FLAG_GE) && !enclosed_rectangle(&sel->r, &rect)) return -ERANGE; sel->r = rect; spin_lock_irqsave(&fimc->slock, flags); f->rect = rect; set_bit(ST_FLITE_CONFIG, &fimc->state); spin_unlock_irqrestore(&fimc->slock, flags); return 0; } static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = { .vidioc_querycap = fimc_lite_querycap, .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane, .vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane, .vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane, .vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane, .vidioc_g_selection = fimc_lite_g_selection, .vidioc_s_selection = fimc_lite_s_selection, .vidioc_reqbufs = fimc_lite_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = fimc_lite_streamon, .vidioc_streamoff = fimc_lite_streamoff, }; /* Capture subdev media entity operations */ static int fimc_lite_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct fimc_lite *fimc = v4l2_get_subdevdata(sd); unsigned int remote_ent_type = media_entity_type(remote->entity); int ret = 0; if (WARN_ON(fimc == NULL)) return 0; v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x\n", __func__, remote->entity->name, local->entity->name, flags, fimc->source_subdev_grp_id); switch (local->index) { case FLITE_SD_PAD_SINK: if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV) { ret = -EINVAL; break; } if (flags & MEDIA_LNK_FL_ENABLED) { if (fimc->source_subdev_grp_id == 0) fimc->source_subdev_grp_id = sd->grp_id; else ret = -EBUSY; } else { fimc->source_subdev_grp_id = 0; fimc->sensor = NULL; } break; case FLITE_SD_PAD_SOURCE_DMA: if (!(flags & MEDIA_LNK_FL_ENABLED)) atomic_set(&fimc->out_path, FIMC_IO_NONE); else if (remote_ent_type == MEDIA_ENT_T_DEVNODE) atomic_set(&fimc->out_path, FIMC_IO_DMA); else ret = -EINVAL; break; case FLITE_SD_PAD_SOURCE_ISP: if (!(flags & MEDIA_LNK_FL_ENABLED)) atomic_set(&fimc->out_path, FIMC_IO_NONE); else if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV) atomic_set(&fimc->out_path, FIMC_IO_ISP); else ret = -EINVAL; break; default: v4l2_err(sd, "Invalid pad index\n"); ret = -EINVAL; } mb(); return ret; } static const struct media_entity_operations fimc_lite_subdev_media_ops = { .link_setup = fimc_lite_link_setup, }; static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { const struct fimc_fmt *fmt; fmt = fimc_lite_find_format(NULL, NULL, 0, code->index); if (!fmt) return -EINVAL; code->code = fmt->mbus_code; return 0; } static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt( struct v4l2_subdev_fh *fh, unsigned int pad) { if (pad != FLITE_SD_PAD_SINK) pad = FLITE_SD_PAD_SOURCE_DMA; return v4l2_subdev_get_try_format(fh, pad); } static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *mf = &fmt->format; struct flite_frame *f = &fimc->inp_frame; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad); fmt->format = *mf; return 0; } mutex_lock(&fimc->lock); mf->colorspace = f->fmt->colorspace; mf->code = f->fmt->mbus_code; if (fmt->pad == FLITE_SD_PAD_SINK) { /* full camera input frame size */ mf->width = f->f_width; mf->height = f->f_height; } else { /* crop size */ mf->width = f->rect.width; mf->height = f->rect.height; } mutex_unlock(&fimc->lock); return 0; } static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *mf = &fmt->format; struct flite_frame *sink = &fimc->inp_frame; struct flite_frame *source = &fimc->out_frame; const struct fimc_fmt *ffmt; v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d\n", fmt->pad, mf->code, mf->width, mf->height); mutex_lock(&fimc->lock); if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP && sd->entity.stream_count > 0) || (atomic_read(&fimc->out_path) == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) { mutex_unlock(&fimc->lock); return -EBUSY; } ffmt = fimc_lite_subdev_try_fmt(fimc, fh, fmt); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { struct v4l2_mbus_framefmt *src_fmt; mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad); *mf = fmt->format; if (fmt->pad == FLITE_SD_PAD_SINK) { unsigned int pad = FLITE_SD_PAD_SOURCE_DMA; src_fmt = __fimc_lite_subdev_get_try_fmt(fh, pad); *src_fmt = *mf; } mutex_unlock(&fimc->lock); return 0; } if (fmt->pad == FLITE_SD_PAD_SINK) { sink->f_width = mf->width; sink->f_height = mf->height; sink->fmt = ffmt; /* Set sink crop rectangle */ sink->rect.width = mf->width; sink->rect.height = mf->height; sink->rect.left = 0; sink->rect.top = 0; /* Reset source format and crop rectangle */ source->rect = sink->rect; source->f_width = mf->width; source->f_height = mf->height; } mutex_unlock(&fimc->lock); return 0; } static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_selection *sel) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); struct flite_frame *f = &fimc->inp_frame; if ((sel->target != V4L2_SEL_TGT_CROP && sel->target != V4L2_SEL_TGT_CROP_BOUNDS) || sel->pad != FLITE_SD_PAD_SINK) return -EINVAL; if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad); return 0; } mutex_lock(&fimc->lock); if (sel->target == V4L2_SEL_TGT_CROP) { sel->r = f->rect; } else { sel->r.left = 0; sel->r.top = 0; sel->r.width = f->f_width; sel->r.height = f->f_height; } mutex_unlock(&fimc->lock); v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n", __func__, f->rect.left, f->rect.top, f->rect.width, f->rect.height, f->f_width, f->f_height); return 0; } static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_selection *sel) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); struct flite_frame *f = &fimc->inp_frame; int ret = 0; if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != FLITE_SD_PAD_SINK) return -EINVAL; mutex_lock(&fimc->lock); fimc_lite_try_crop(fimc, &sel->r); if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r; } else { unsigned long flags; spin_lock_irqsave(&fimc->slock, flags); f->rect = sel->r; /* Same crop rectangle on the source pad */ fimc->out_frame.rect = sel->r; set_bit(ST_FLITE_CONFIG, &fimc->state); spin_unlock_irqrestore(&fimc->slock, flags); } mutex_unlock(&fimc->lock); v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n", __func__, f->rect.left, f->rect.top, f->rect.width, f->rect.height, f->f_width, f->f_height); return ret; } static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); unsigned long flags; int ret; /* * Find sensor subdev linked to FIMC-LITE directly or through * MIPI-CSIS. This is required for configuration where FIMC-LITE * is used as a subdev only and feeds data internally to FIMC-IS. * The pipeline links are protected through entity.stream_count * so there is no need to take the media graph mutex here. */ fimc->sensor = fimc_find_remote_sensor(&sd->entity); if (atomic_read(&fimc->out_path) != FIMC_IO_ISP) return -ENOIOCTLCMD; mutex_lock(&fimc->lock); if (on) { flite_hw_reset(fimc); ret = fimc_lite_hw_init(fimc, true); if (!ret) { spin_lock_irqsave(&fimc->slock, flags); flite_hw_capture_start(fimc); spin_unlock_irqrestore(&fimc->slock, flags); } } else { set_bit(ST_FLITE_OFF, &fimc->state); spin_lock_irqsave(&fimc->slock, flags); flite_hw_capture_stop(fimc); spin_unlock_irqrestore(&fimc->slock, flags); ret = wait_event_timeout(fimc->irq_queue, !test_bit(ST_FLITE_OFF, &fimc->state), msecs_to_jiffies(200)); if (ret == 0) v4l2_err(sd, "s_stream(0) timeout\n"); clear_bit(ST_FLITE_RUN, &fimc->state); } mutex_unlock(&fimc->lock); return ret; } static int fimc_lite_log_status(struct v4l2_subdev *sd) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); flite_hw_dump_regs(fimc, __func__); return 0; } static int fimc_lite_subdev_registered(struct v4l2_subdev *sd) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); struct vb2_queue *q = &fimc->vb_queue; struct video_device *vfd = &fimc->ve.vdev; int ret; memset(vfd, 0, sizeof(*vfd)); atomic_set(&fimc->out_path, FIMC_IO_DMA); snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture", fimc->index); vfd->fops = &fimc_lite_fops; vfd->ioctl_ops = &fimc_lite_ioctl_ops; vfd->v4l2_dev = sd->v4l2_dev; vfd->minor = -1; vfd->release = video_device_release_empty; vfd->queue = q; fimc->reqbufs_count = 0; INIT_LIST_HEAD(&fimc->pending_buf_q); INIT_LIST_HEAD(&fimc->active_buf_q); memset(q, 0, sizeof(*q)); q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = &fimc_lite_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct flite_buffer); q->drv_priv = fimc; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &fimc->lock; ret = vb2_queue_init(q); if (ret < 0) return ret; fimc->vd_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0); if (ret < 0) return ret; video_set_drvdata(vfd, fimc); fimc->ve.pipe = v4l2_get_subdev_hostdata(sd); ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); if (ret < 0) { media_entity_cleanup(&vfd->entity); fimc->ve.pipe = NULL; return ret; } v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name, video_device_node_name(vfd)); return 0; } static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd) { struct fimc_lite *fimc = v4l2_get_subdevdata(sd); if (fimc == NULL) return; mutex_lock(&fimc->lock); if (video_is_registered(&fimc->ve.vdev)) { video_unregister_device(&fimc->ve.vdev); media_entity_cleanup(&fimc->ve.vdev.entity); fimc->ve.pipe = NULL; } mutex_unlock(&fimc->lock); } static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = { .registered = fimc_lite_subdev_registered, .unregistered = fimc_lite_subdev_unregistered, }; static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = { .enum_mbus_code = fimc_lite_subdev_enum_mbus_code, .get_selection = fimc_lite_subdev_get_selection, .set_selection = fimc_lite_subdev_set_selection, .get_fmt = fimc_lite_subdev_get_fmt, .set_fmt = fimc_lite_subdev_set_fmt, }; static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = { .s_stream = fimc_lite_subdev_s_stream, }; static const struct v4l2_subdev_core_ops fimc_lite_core_ops = { .log_status = fimc_lite_log_status, }; static struct v4l2_subdev_ops fimc_lite_subdev_ops = { .core = &fimc_lite_core_ops, .video = &fimc_lite_subdev_video_ops, .pad = &fimc_lite_subdev_pad_ops, }; static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl) { struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite, ctrl_handler); set_bit(ST_FLITE_CONFIG, &fimc->state); return 0; } static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = { .s_ctrl = fimc_lite_s_ctrl, }; static const struct v4l2_ctrl_config fimc_lite_ctrl = { .ops = &fimc_lite_ctrl_ops, .id = V4L2_CTRL_CLASS_USER | 0x1001, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Test Pattern 640x480", .step = 1, }; static void fimc_lite_set_default_config(struct fimc_lite *fimc) { struct flite_frame *sink = &fimc->inp_frame; struct flite_frame *source = &fimc->out_frame; sink->fmt = &fimc_lite_formats[0]; sink->f_width = FLITE_DEFAULT_WIDTH; sink->f_height = FLITE_DEFAULT_HEIGHT; sink->rect.width = FLITE_DEFAULT_WIDTH; sink->rect.height = FLITE_DEFAULT_HEIGHT; sink->rect.left = 0; sink->rect.top = 0; *source = *sink; } static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) { struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler; struct v4l2_subdev *sd = &fimc->subdev; int ret; v4l2_subdev_init(sd, &fimc_lite_subdev_ops); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index); fimc->subdev_pads[FLITE_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK; fimc->subdev_pads[FLITE_SD_PAD_SOURCE_DMA].flags = MEDIA_PAD_FL_SOURCE; fimc->subdev_pads[FLITE_SD_PAD_SOURCE_ISP].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&sd->entity, FLITE_SD_PADS_NUM, fimc->subdev_pads, 0); if (ret) return ret; v4l2_ctrl_handler_init(handler, 1); fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl, NULL); if (handler->error) { media_entity_cleanup(&sd->entity); return handler->error; } sd->ctrl_handler = handler; sd->internal_ops = &fimc_lite_subdev_internal_ops; sd->entity.ops = &fimc_lite_subdev_media_ops; sd->owner = THIS_MODULE; v4l2_set_subdevdata(sd, fimc); return 0; } static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc) { struct v4l2_subdev *sd = &fimc->subdev; v4l2_device_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(&fimc->ctrl_handler); v4l2_set_subdevdata(sd, NULL); } static void fimc_lite_clk_put(struct fimc_lite *fimc) { if (IS_ERR(fimc->clock)) return; clk_unprepare(fimc->clock); clk_put(fimc->clock); fimc->clock = ERR_PTR(-EINVAL); } static int fimc_lite_clk_get(struct fimc_lite *fimc) { int ret; fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME); if (IS_ERR(fimc->clock)) return PTR_ERR(fimc->clock); ret = clk_prepare(fimc->clock); if (ret < 0) { clk_put(fimc->clock); fimc->clock = ERR_PTR(-EINVAL); } return ret; } static const struct of_device_id flite_of_match[]; static int fimc_lite_probe(struct platform_device *pdev) { struct flite_drvdata *drv_data = NULL; struct device *dev = &pdev->dev; const struct of_device_id *of_id; struct fimc_lite *fimc; struct resource *res; int ret; if (!dev->of_node) return -ENODEV; fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL); if (!fimc) return -ENOMEM; of_id = of_match_node(flite_of_match, dev->of_node); if (of_id) drv_data = (struct flite_drvdata *)of_id->data; fimc->index = of_alias_get_id(dev->of_node, "fimc-lite"); if (!drv_data || fimc->index >= drv_data->num_instances || fimc->index < 0) { dev_err(dev, "Wrong %s node alias\n", dev->of_node->full_name); return -EINVAL; } fimc->dd = drv_data; fimc->pdev = pdev; init_waitqueue_head(&fimc->irq_queue); spin_lock_init(&fimc->slock); mutex_init(&fimc->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fimc->regs = devm_ioremap_resource(dev, res); if (IS_ERR(fimc->regs)) return PTR_ERR(fimc->regs); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "Failed to get IRQ resource\n"); return -ENXIO; } ret = fimc_lite_clk_get(fimc); if (ret) return ret; ret = devm_request_irq(dev, res->start, flite_irq_handler, 0, dev_name(dev), fimc); if (ret) { dev_err(dev, "Failed to install irq (%d)\n", ret); goto err_clk_put; } /* The video node will be created within the subdev's registered() op */ ret = fimc_lite_create_capture_subdev(fimc); if (ret) goto err_clk_put; platform_set_drvdata(pdev, fimc); pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { ret = clk_enable(fimc->clock); if (ret < 0) goto err_sd; } fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(fimc->alloc_ctx)) { ret = PTR_ERR(fimc->alloc_ctx); goto err_clk_dis; } fimc_lite_set_default_config(fimc); dev_dbg(dev, "FIMC-LITE.%d registered successfully\n", fimc->index); return 0; err_clk_dis: if (!pm_runtime_enabled(dev)) clk_disable(fimc->clock); err_sd: fimc_lite_unregister_capture_subdev(fimc); err_clk_put: fimc_lite_clk_put(fimc); return ret; } #ifdef CONFIG_PM_RUNTIME static int fimc_lite_runtime_resume(struct device *dev) { struct fimc_lite *fimc = dev_get_drvdata(dev); clk_enable(fimc->clock); return 0; } static int fimc_lite_runtime_suspend(struct device *dev) { struct fimc_lite *fimc = dev_get_drvdata(dev); clk_disable(fimc->clock); return 0; } #endif #ifdef CONFIG_PM_SLEEP static int fimc_lite_resume(struct device *dev) { struct fimc_lite *fimc = dev_get_drvdata(dev); struct flite_buffer *buf; unsigned long flags; int i; spin_lock_irqsave(&fimc->slock, flags); if (!test_and_clear_bit(ST_LPM, &fimc->state) || !test_bit(ST_FLITE_IN_USE, &fimc->state)) { spin_unlock_irqrestore(&fimc->slock, flags); return 0; } flite_hw_reset(fimc); spin_unlock_irqrestore(&fimc->slock, flags); if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state)) return 0; INIT_LIST_HEAD(&fimc->active_buf_q); fimc_pipeline_call(&fimc->ve, open, &fimc->ve.vdev.entity, false); fimc_lite_hw_init(fimc, atomic_read(&fimc->out_path) == FIMC_IO_ISP); clear_bit(ST_FLITE_SUSPENDED, &fimc->state); for (i = 0; i < fimc->reqbufs_count; i++) { if (list_empty(&fimc->pending_buf_q)) break; buf = fimc_lite_pending_queue_pop(fimc); buffer_queue(&buf->vb); } return 0; } static int fimc_lite_suspend(struct device *dev) { struct fimc_lite *fimc = dev_get_drvdata(dev); bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state); int ret; if (test_and_set_bit(ST_LPM, &fimc->state)) return 0; ret = fimc_lite_stop_capture(fimc, suspend); if (ret < 0 || !fimc_lite_active(fimc)) return ret; return fimc_pipeline_call(&fimc->ve, close); } #endif /* CONFIG_PM_SLEEP */ static int fimc_lite_remove(struct platform_device *pdev) { struct fimc_lite *fimc = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; pm_runtime_disable(dev); pm_runtime_set_suspended(dev); fimc_lite_unregister_capture_subdev(fimc); vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx); fimc_lite_clk_put(fimc); dev_info(dev, "Driver unloaded\n"); return 0; } static const struct dev_pm_ops fimc_lite_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume) SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume, NULL) }; /* EXYNOS4212, EXYNOS4412 */ static struct flite_drvdata fimc_lite_drvdata_exynos4 = { .max_width = 8192, .max_height = 8192, .out_width_align = 8, .win_hor_offs_align = 2, .out_hor_offs_align = 8, .max_dma_bufs = 1, .num_instances = 2, }; /* EXYNOS5250 */ static struct flite_drvdata fimc_lite_drvdata_exynos5 = { .max_width = 8192, .max_height = 8192, .out_width_align = 8, .win_hor_offs_align = 2, .out_hor_offs_align = 8, .max_dma_bufs = 32, .num_instances = 3, }; static const struct of_device_id flite_of_match[] = { { .compatible = "samsung,exynos4212-fimc-lite", .data = &fimc_lite_drvdata_exynos4, }, { .compatible = "samsung,exynos5250-fimc-lite", .data = &fimc_lite_drvdata_exynos5, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flite_of_match); static struct platform_driver fimc_lite_driver = { .probe = fimc_lite_probe, .remove = fimc_lite_remove, .driver = { .of_match_table = flite_of_match, .name = FIMC_LITE_DRV_NAME, .owner = THIS_MODULE, .pm = &fimc_lite_pm_ops, } }; module_platform_driver(fimc_lite_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
Barracuda09/media_build-bst
linux/drivers/media/platform/exynos4-is/fimc-lite.c
C
gpl-2.0
45,548
/* * PROJECT: ReactOS Kernel * LICENSE: GPL - See COPYING in the top level directory * FILE: ntoskrnl/config/cmcheck.c * PURPOSE: Configuration Manager - Hive and Key Validation * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org) */ /* INCLUDES ******************************************************************/ #include "ntoskrnl.h" #define NDEBUG #include "debug.h" /* GLOBALS *******************************************************************/ /* FUNCTIONS *****************************************************************/ ULONG NTAPI CmCheckRegistry(IN PCMHIVE RegistryHive, IN ULONG Flags) { /* FIXME: HACK! */ DPRINT1("CmCheckRegistry(0x%p, %lu) is UNIMPLEMENTED!\n", RegistryHive, Flags); return 0; }
sunnyden/reactos
ntoskrnl/config/cmcheck.c
C
gpl-2.0
792
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Copyright (C) 2006 Imendio AB * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA * * Authors: Richard Hult <richard@imendio.com> */ #include "config.h" #include <string.h> #include <gconf/gconf-client.h> #include <libempathy/empathy-utils.h> #include "empathy-conf.h" #define DEBUG_FLAG EMPATHY_DEBUG_OTHER #include <libempathy/empathy-debug.h> #define EMPATHY_CONF_ROOT "/apps/empathy" #define DESKTOP_INTERFACE_ROOT "/desktop/gnome/interface" #define GET_PRIV(obj) EMPATHY_GET_PRIV (obj, EmpathyConf) typedef struct { GConfClient *gconf_client; } EmpathyConfPriv; typedef struct { EmpathyConf *conf; EmpathyConfNotifyFunc func; gpointer user_data; } EmpathyConfNotifyData; static void conf_finalize (GObject *object); G_DEFINE_TYPE (EmpathyConf, empathy_conf, G_TYPE_OBJECT); static EmpathyConf *global_conf = NULL; static void empathy_conf_class_init (EmpathyConfClass *class) { GObjectClass *object_class; object_class = G_OBJECT_CLASS (class); object_class->finalize = conf_finalize; g_type_class_add_private (object_class, sizeof (EmpathyConfPriv)); } static void empathy_conf_init (EmpathyConf *conf) { EmpathyConfPriv *priv = G_TYPE_INSTANCE_GET_PRIVATE (conf, EMPATHY_TYPE_CONF, EmpathyConfPriv); conf->priv = priv; priv->gconf_client = gconf_client_get_default (); gconf_client_add_dir (priv->gconf_client, EMPATHY_CONF_ROOT, GCONF_CLIENT_PRELOAD_ONELEVEL, NULL); gconf_client_add_dir (priv->gconf_client, DESKTOP_INTERFACE_ROOT, GCONF_CLIENT_PRELOAD_NONE, NULL); } static void conf_finalize (GObject *object) { EmpathyConfPriv *priv; priv = GET_PRIV (object); gconf_client_remove_dir (priv->gconf_client, EMPATHY_CONF_ROOT, NULL); gconf_client_remove_dir (priv->gconf_client, DESKTOP_INTERFACE_ROOT, NULL); g_object_unref (priv->gconf_client); G_OBJECT_CLASS (empathy_conf_parent_class)->finalize (object); } EmpathyConf * empathy_conf_get (void) { if (!global_conf) { global_conf = g_object_new (EMPATHY_TYPE_CONF, NULL); } return global_conf; } void empathy_conf_shutdown (void) { if (global_conf) { g_object_unref (global_conf); global_conf = NULL; } } gboolean empathy_conf_set_int (EmpathyConf *conf, const gchar *key, gint value) { EmpathyConfPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); DEBUG ("Setting int:'%s' to %d", key, value); priv = GET_PRIV (conf); return gconf_client_set_int (priv->gconf_client, key, value, NULL); } gboolean empathy_conf_get_int (EmpathyConf *conf, const gchar *key, gint *value) { EmpathyConfPriv *priv; GError *error = NULL; *value = 0; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); g_return_val_if_fail (value != NULL, FALSE); priv = GET_PRIV (conf); *value = gconf_client_get_int (priv->gconf_client, key, &error); if (error) { g_error_free (error); return FALSE; } return TRUE; } gboolean empathy_conf_set_bool (EmpathyConf *conf, const gchar *key, gboolean value) { EmpathyConfPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); DEBUG ("Setting bool:'%s' to %d ---> %s", key, value, value ? "true" : "false"); priv = GET_PRIV (conf); return gconf_client_set_bool (priv->gconf_client, key, value, NULL); } gboolean empathy_conf_get_bool (EmpathyConf *conf, const gchar *key, gboolean *value) { EmpathyConfPriv *priv; GError *error = NULL; *value = FALSE; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); g_return_val_if_fail (value != NULL, FALSE); priv = GET_PRIV (conf); *value = gconf_client_get_bool (priv->gconf_client, key, &error); if (error) { g_error_free (error); return FALSE; } return TRUE; } gboolean empathy_conf_set_string (EmpathyConf *conf, const gchar *key, const gchar *value) { EmpathyConfPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); DEBUG ("Setting string:'%s' to '%s'", key, value); priv = GET_PRIV (conf); return gconf_client_set_string (priv->gconf_client, key, value, NULL); } gboolean empathy_conf_get_string (EmpathyConf *conf, const gchar *key, gchar **value) { EmpathyConfPriv *priv; GError *error = NULL; *value = NULL; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); priv = GET_PRIV (conf); *value = gconf_client_get_string (priv->gconf_client, key, &error); if (error) { g_error_free (error); return FALSE; } return TRUE; } gboolean empathy_conf_set_string_list (EmpathyConf *conf, const gchar *key, GSList *value) { EmpathyConfPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); priv = GET_PRIV (conf); return gconf_client_set_list (priv->gconf_client, key, GCONF_VALUE_STRING, value, NULL); } gboolean empathy_conf_get_string_list (EmpathyConf *conf, const gchar *key, GSList **value) { EmpathyConfPriv *priv; GError *error = NULL; *value = NULL; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); priv = GET_PRIV (conf); *value = gconf_client_get_list (priv->gconf_client, key, GCONF_VALUE_STRING, &error); if (error) { g_error_free (error); return FALSE; } return TRUE; } static void conf_notify_data_free (EmpathyConfNotifyData *data) { g_object_unref (data->conf); g_slice_free (EmpathyConfNotifyData, data); } static void conf_notify_func (GConfClient *client, guint id, GConfEntry *entry, gpointer user_data) { EmpathyConfNotifyData *data; data = user_data; data->func (data->conf, gconf_entry_get_key (entry), data->user_data); } guint empathy_conf_notify_add (EmpathyConf *conf, const gchar *key, EmpathyConfNotifyFunc func, gpointer user_data) { EmpathyConfPriv *priv; guint id; EmpathyConfNotifyData *data; g_return_val_if_fail (EMPATHY_IS_CONF (conf), 0); priv = GET_PRIV (conf); data = g_slice_new (EmpathyConfNotifyData); data->func = func; data->user_data = user_data; data->conf = g_object_ref (conf); id = gconf_client_notify_add (priv->gconf_client, key, conf_notify_func, data, (GFreeFunc) conf_notify_data_free, NULL); return id; } gboolean empathy_conf_notify_remove (EmpathyConf *conf, guint id) { EmpathyConfPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONF (conf), FALSE); priv = GET_PRIV (conf); gconf_client_notify_remove (priv->gconf_client, id); return TRUE; }
jmansar/empathy
libempathy-gtk/empathy-conf.c
C
gpl-2.0
7,566
/* GIMP - The GNU Image Manipulation Program * Copyright (C) 1995-2002 Spencer Kimball, Peter Mattis, and others * * gimp-gradients.c * Copyright (C) 2002 Michael Natterer <mitch@gimp.org> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config.h" #include <gegl.h> #include "core-types.h" #include "gimp.h" #include "gimp-gradients.h" #include "gimpcontext.h" #include "gimpcontainer.h" #include "gimpdatafactory.h" #include "gimpgradient.h" #include "gimp-intl.h" #define FG_BG_RGB_KEY "gimp-gradient-fg-bg-rgb" #define FG_BG_HARDEDGE_KEY "gimp-gradient-fg-bg-rgb" #define FG_BG_HSV_CCW_KEY "gimp-gradient-fg-bg-hsv-ccw" #define FG_BG_HSV_CW_KEY "gimp-gradient-fg-bg-hsv-cw" #define FG_TRANSPARENT_KEY "gimp-gradient-fg-transparent" /* local function prototypes */ static GimpGradient * gimp_gradients_add_gradient (Gimp *gimp, const gchar *name, const gchar *id); /* public functions */ void gimp_gradients_init (Gimp *gimp) { GimpGradient *gradient; g_return_if_fail (GIMP_IS_GIMP (gimp)); /* FG to BG (RGB) */ gradient = gimp_gradients_add_gradient (gimp, _("FG to BG (RGB)"), FG_BG_RGB_KEY); gradient->segments->left_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->right_color_type = GIMP_GRADIENT_COLOR_BACKGROUND; gimp_context_set_gradient (gimp->user_context, gradient); /* FG to BG (Hardedge) */ gradient = gimp_gradients_add_gradient (gimp, _("FG to BG (Hardedge)"), FG_BG_HARDEDGE_KEY); gradient->segments->left = 0.00; gradient->segments->middle = 0.25; gradient->segments->right = 0.50; gradient->segments->left_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->right_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->next = gimp_gradient_segment_new (); gradient->segments->next->prev = gradient->segments; gradient->segments->next->left = 0.50; gradient->segments->next->middle = 0.75; gradient->segments->next->right = 1.00; gradient->segments->next->left_color_type = GIMP_GRADIENT_COLOR_BACKGROUND; gradient->segments->next->right_color_type = GIMP_GRADIENT_COLOR_BACKGROUND; /* FG to BG (HSV counter-clockwise) */ gradient = gimp_gradients_add_gradient (gimp, _("FG to BG (HSV counter-clockwise)"), FG_BG_HSV_CCW_KEY); gradient->segments->left_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->right_color_type = GIMP_GRADIENT_COLOR_BACKGROUND; gradient->segments->color = GIMP_GRADIENT_SEGMENT_HSV_CCW; /* FG to BG (HSV clockwise hue) */ gradient = gimp_gradients_add_gradient (gimp, _("FG to BG (HSV clockwise hue)"), FG_BG_HSV_CW_KEY); gradient->segments->left_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->right_color_type = GIMP_GRADIENT_COLOR_BACKGROUND; gradient->segments->color = GIMP_GRADIENT_SEGMENT_HSV_CW; /* FG to Transparent */ gradient = gimp_gradients_add_gradient (gimp, _("FG to Transparent"), FG_TRANSPARENT_KEY); gradient->segments->left_color_type = GIMP_GRADIENT_COLOR_FOREGROUND; gradient->segments->right_color_type = GIMP_GRADIENT_COLOR_FOREGROUND_TRANSPARENT; } /* private functions */ static GimpGradient * gimp_gradients_add_gradient (Gimp *gimp, const gchar *name, const gchar *id) { GimpGradient *gradient; gradient = GIMP_GRADIENT (gimp_gradient_new (gimp_get_user_context (gimp), name)); gimp_data_make_internal (GIMP_DATA (gradient), id); gimp_container_add (gimp_data_factory_get_container (gimp->gradient_factory), GIMP_OBJECT (gradient)); g_object_unref (gradient); g_object_set_data (G_OBJECT (gimp), id, gradient); return gradient; }
mskala/noxcf-gimp
app/core/gimp-gradients.c
C
gpl-3.0
5,035
/* Calculate the size of physical memory. Copyright (C) 2000-2001, 2003, 2005-2006, 2009-2013 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Written by Paul Eggert. */ #include <config.h> #include "physmem.h" #include <unistd.h> #if HAVE_SYS_PSTAT_H # include <sys/pstat.h> #endif #if HAVE_SYS_SYSMP_H # include <sys/sysmp.h> #endif #if HAVE_SYS_SYSINFO_H && HAVE_MACHINE_HAL_SYSINFO_H # include <sys/sysinfo.h> # include <machine/hal_sysinfo.h> #endif #if HAVE_SYS_TABLE_H # include <sys/table.h> #endif #include <sys/types.h> #if HAVE_SYS_PARAM_H # include <sys/param.h> #endif #if HAVE_SYS_SYSCTL_H # include <sys/sysctl.h> #endif #if HAVE_SYS_SYSTEMCFG_H # include <sys/systemcfg.h> #endif #ifdef _WIN32 # define WIN32_LEAN_AND_MEAN # include <windows.h> /* MEMORYSTATUSEX is missing from older windows headers, so define a local replacement. */ typedef struct { DWORD dwLength; DWORD dwMemoryLoad; DWORDLONG ullTotalPhys; DWORDLONG ullAvailPhys; DWORDLONG ullTotalPageFile; DWORDLONG ullAvailPageFile; DWORDLONG ullTotalVirtual; DWORDLONG ullAvailVirtual; DWORDLONG ullAvailExtendedVirtual; } lMEMORYSTATUSEX; typedef WINBOOL (WINAPI *PFN_MS_EX) (lMEMORYSTATUSEX*); #endif #define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0])) /* Return the total amount of physical memory. */ double physmem_total (void) { #if defined _SC_PHYS_PAGES && defined _SC_PAGESIZE { /* This works on linux-gnu, solaris2 and cygwin. */ double pages = sysconf (_SC_PHYS_PAGES); double pagesize = sysconf (_SC_PAGESIZE); if (0 <= pages && 0 <= pagesize) return pages * pagesize; } #endif #if HAVE_PSTAT_GETSTATIC { /* This works on hpux11. */ struct pst_static pss; if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0)) { double pages = pss.physical_memory; double pagesize = pss.page_size; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE { /* This works on irix6. */ struct rminfo realmem; if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0) { double pagesize = sysconf (_SC_PAGESIZE); double pages = realmem.physmem; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_GETSYSINFO && defined GSI_PHYSMEM { /* This works on Tru64 UNIX V4/5. */ int physmem; if (getsysinfo (GSI_PHYSMEM, (caddr_t) &physmem, sizeof (physmem), NULL, NULL, NULL) == 1) { double kbytes = physmem; if (0 <= kbytes) return kbytes * 1024.0; } } #endif #if HAVE_SYSCTL && defined HW_PHYSMEM { /* This works on *bsd and darwin. */ unsigned int physmem; size_t len = sizeof physmem; static int mib[2] = { CTL_HW, HW_PHYSMEM }; if (sysctl (mib, ARRAY_SIZE (mib), &physmem, &len, NULL, 0) == 0 && len == sizeof (physmem)) return (double) physmem; } #endif #if HAVE__SYSTEM_CONFIGURATION /* This works on AIX. */ return _system_configuration.physmem; #endif #if defined _WIN32 { /* this works on windows */ PFN_MS_EX pfnex; HMODULE h = GetModuleHandle ("kernel32.dll"); if (!h) return 0.0; /* Use GlobalMemoryStatusEx if available. */ if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx"))) { lMEMORYSTATUSEX lms_ex; lms_ex.dwLength = sizeof lms_ex; if (!pfnex (&lms_ex)) return 0.0; return (double) lms_ex.ullTotalPhys; } /* Fall back to GlobalMemoryStatus which is always available. but returns wrong results for physical memory > 4GB. */ else { MEMORYSTATUS ms; GlobalMemoryStatus (&ms); return (double) ms.dwTotalPhys; } } #endif /* Guess 64 MB. It's probably an older host, so guess small. */ return 64 * 1024 * 1024; } /* Return the amount of physical memory available. */ double physmem_available (void) { #if defined _SC_AVPHYS_PAGES && defined _SC_PAGESIZE { /* This works on linux-gnu, solaris2 and cygwin. */ double pages = sysconf (_SC_AVPHYS_PAGES); double pagesize = sysconf (_SC_PAGESIZE); if (0 <= pages && 0 <= pagesize) return pages * pagesize; } #endif #if HAVE_PSTAT_GETSTATIC && HAVE_PSTAT_GETDYNAMIC { /* This works on hpux11. */ struct pst_static pss; struct pst_dynamic psd; if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0) && 0 <= pstat_getdynamic (&psd, sizeof psd, 1, 0)) { double pages = psd.psd_free; double pagesize = pss.page_size; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE { /* This works on irix6. */ struct rminfo realmem; if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0) { double pagesize = sysconf (_SC_PAGESIZE); double pages = realmem.availrmem; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_TABLE && defined TBL_VMSTATS { /* This works on Tru64 UNIX V4/5. */ struct tbl_vmstats vmstats; if (table (TBL_VMSTATS, 0, &vmstats, 1, sizeof (vmstats)) == 1) { double pages = vmstats.free_count; double pagesize = vmstats.pagesize; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSCTL && defined HW_USERMEM { /* This works on *bsd and darwin. */ unsigned int usermem; size_t len = sizeof usermem; static int mib[2] = { CTL_HW, HW_USERMEM }; if (sysctl (mib, ARRAY_SIZE (mib), &usermem, &len, NULL, 0) == 0 && len == sizeof (usermem)) return (double) usermem; } #endif #if defined _WIN32 { /* this works on windows */ PFN_MS_EX pfnex; HMODULE h = GetModuleHandle ("kernel32.dll"); if (!h) return 0.0; /* Use GlobalMemoryStatusEx if available. */ if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx"))) { lMEMORYSTATUSEX lms_ex; lms_ex.dwLength = sizeof lms_ex; if (!pfnex (&lms_ex)) return 0.0; return (double) lms_ex.ullAvailPhys; } /* Fall back to GlobalMemoryStatus which is always available. but returns wrong results for physical memory > 4GB */ else { MEMORYSTATUS ms; GlobalMemoryStatus (&ms); return (double) ms.dwAvailPhys; } } #endif /* Guess 25% of physical memory. */ return physmem_total () / 4; } #if DEBUG # include <stdio.h> # include <stdlib.h> int main (void) { printf ("%12.f %12.f\n", physmem_total (), physmem_available ()); exit (0); } #endif /* DEBUG */ /* Local Variables: compile-command: "gcc -DDEBUG -g -O -Wall -W physmem.c" End: */
geminy/aidear
oss/shell/coreutils/coreutils-8.21/lib/physmem.c
C
gpl-3.0
7,604
/* Provide a working getlogin for systems which lack it. Copyright (C) 2010-2013 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Written by Bruno Haible, 2010. */ #include <config.h> /* Specification. */ #include <unistd.h> #if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ # define WIN32_LEAN_AND_MEAN # include <windows.h> #endif char * getlogin (void) { #if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ static char login_name[1024]; DWORD sz = sizeof (login_name); if (GetUserName (login_name, &sz)) return login_name; #endif return NULL; }
geminy/aidear
oss/shell/coreutils/coreutils-8.21/lib/getlogin.c
C
gpl-3.0
1,234
/*====================================================================* - Copyright (C) 2001 Leptonica. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *====================================================================*/ /*! * \file ptafunc1.c * <pre> * * Simple rearrangements * PTA *ptaSubsample() * l_int32 ptaJoin() * l_int32 ptaaJoin() * PTA *ptaReverse() * PTA *ptaTranspose() * PTA *ptaCyclicPerm() * PTA *ptaSelectRange() * * Geometric * BOX *ptaGetBoundingRegion() * l_int32 *ptaGetRange() * PTA *ptaGetInsideBox() * PTA *pixFindCornerPixels() * l_int32 ptaContainsPt() * l_int32 ptaTestIntersection() * PTA *ptaTransform() * l_int32 ptaPtInsidePolygon() * l_float32 l_angleBetweenVectors() * * Min/max and filtering * l_int32 ptaGetMinMax() * PTA *ptaSelectByValue() * PTA *ptaCropToMask() * * Least Squares Fit * l_int32 ptaGetLinearLSF() * l_int32 ptaGetQuadraticLSF() * l_int32 ptaGetCubicLSF() * l_int32 ptaGetQuarticLSF() * l_int32 ptaNoisyLinearLSF() * l_int32 ptaNoisyQuadraticLSF() * l_int32 applyLinearFit() * l_int32 applyQuadraticFit() * l_int32 applyCubicFit() * l_int32 applyQuarticFit() * * Interconversions with Pix * l_int32 pixPlotAlongPta() * PTA *ptaGetPixelsFromPix() * PIX *pixGenerateFromPta() * PTA *ptaGetBoundaryPixels() * PTAA *ptaaGetBoundaryPixels() * PTAA *ptaaIndexLabeledPixels() * PTA *ptaGetNeighborPixLocs() * * Interconversion with Numa * PTA *numaConvertToPta1() * PTA *numaConvertToPta2() * l_int32 ptaConvertToNuma() * * Display Pta and Ptaa * PIX *pixDisplayPta() * PIX *pixDisplayPtaaPattern() * PIX *pixDisplayPtaPattern() * PTA *ptaReplicatePattern() * PIX *pixDisplayPtaa() * </pre> */ #include <math.h> #include "allheaders.h" #ifndef M_PI #define M_PI 3.14159265358979323846 #endif /* M_PI */ /*---------------------------------------------------------------------* * Simple rearrangements * *---------------------------------------------------------------------*/ /*! * \brief ptaSubsample() * * \param[in] ptas * \param[in] subfactor subsample factor, >= 1 * \return ptad evenly sampled pt values from ptas, or NULL on error */ PTA * ptaSubsample(PTA *ptas, l_int32 subfactor) { l_int32 n, i; l_float32 x, y; PTA *ptad; PROCNAME("pixSubsample"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if (subfactor < 1) return (PTA *)ERROR_PTR("subfactor < 1", procName, NULL); ptad = ptaCreate(0); n = ptaGetCount(ptas); for (i = 0; i < n; i++) { if (i % subfactor != 0) continue; ptaGetPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } return ptad; } /*! * \brief ptaJoin() * * \param[in] ptad dest pta; add to this one * \param[in] ptas source pta; add from this one * \param[in] istart starting index in ptas * \param[in] iend ending index in ptas; use -1 to cat all * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) istart < 0 is taken to mean 'read from the start' (istart = 0) * (2) iend < 0 means 'read to the end' * (3) if ptas == NULL, this is a no-op * </pre> */ l_int32 ptaJoin(PTA *ptad, PTA *ptas, l_int32 istart, l_int32 iend) { l_int32 n, i, x, y; PROCNAME("ptaJoin"); if (!ptad) return ERROR_INT("ptad not defined", procName, 1); if (!ptas) return 0; if (istart < 0) istart = 0; n = ptaGetCount(ptas); if (iend < 0 || iend >= n) iend = n - 1; if (istart > iend) return ERROR_INT("istart > iend; no pts", procName, 1); for (i = istart; i <= iend; i++) { ptaGetIPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } return 0; } /*! * \brief ptaaJoin() * * \param[in] ptaad dest ptaa; add to this one * \param[in] ptaas source ptaa; add from this one * \param[in] istart starting index in ptaas * \param[in] iend ending index in ptaas; use -1 to cat all * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) istart < 0 is taken to mean 'read from the start' (istart = 0) * (2) iend < 0 means 'read to the end' * (3) if ptas == NULL, this is a no-op * </pre> */ l_int32 ptaaJoin(PTAA *ptaad, PTAA *ptaas, l_int32 istart, l_int32 iend) { l_int32 n, i; PTA *pta; PROCNAME("ptaaJoin"); if (!ptaad) return ERROR_INT("ptaad not defined", procName, 1); if (!ptaas) return 0; if (istart < 0) istart = 0; n = ptaaGetCount(ptaas); if (iend < 0 || iend >= n) iend = n - 1; if (istart > iend) return ERROR_INT("istart > iend; no pts", procName, 1); for (i = istart; i <= iend; i++) { pta = ptaaGetPta(ptaas, i, L_CLONE); ptaaAddPta(ptaad, pta, L_INSERT); } return 0; } /*! * \brief ptaReverse() * * \param[in] ptas * \param[in] type 0 for float values; 1 for integer values * \return ptad reversed pta, or NULL on error */ PTA * ptaReverse(PTA *ptas, l_int32 type) { l_int32 n, i, ix, iy; l_float32 x, y; PTA *ptad; PROCNAME("ptaReverse"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); if ((ptad = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("ptad not made", procName, NULL); for (i = n - 1; i >= 0; i--) { if (type == 0) { ptaGetPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } else { /* type == 1 */ ptaGetIPt(ptas, i, &ix, &iy); ptaAddPt(ptad, ix, iy); } } return ptad; } /*! * \brief ptaTranspose() * * \param[in] ptas * \return ptad with x and y values swapped, or NULL on error */ PTA * ptaTranspose(PTA *ptas) { l_int32 n, i; l_float32 x, y; PTA *ptad; PROCNAME("ptaTranspose"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); if ((ptad = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("ptad not made", procName, NULL); for (i = 0; i < n; i++) { ptaGetPt(ptas, i, &x, &y); ptaAddPt(ptad, y, x); } return ptad; } /*! * \brief ptaCyclicPerm() * * \param[in] ptas * \param[in] xs, ys start point; must be in ptas * \return ptad cyclic permutation, starting and ending at (xs, ys, * or NULL on error * * <pre> * Notes: * (1) Check to insure that (a) ptas is a closed path where * the first and last points are identical, and (b) the * resulting pta also starts and ends on the same point * (which in this case is (xs, ys). * </pre> */ PTA * ptaCyclicPerm(PTA *ptas, l_int32 xs, l_int32 ys) { l_int32 n, i, x, y, j, index, state; l_int32 x1, y1, x2, y2; PTA *ptad; PROCNAME("ptaCyclicPerm"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); /* Verify input data */ ptaGetIPt(ptas, 0, &x1, &y1); ptaGetIPt(ptas, n - 1, &x2, &y2); if (x1 != x2 || y1 != y2) return (PTA *)ERROR_PTR("start and end pts not same", procName, NULL); state = L_NOT_FOUND; for (i = 0; i < n; i++) { ptaGetIPt(ptas, i, &x, &y); if (x == xs && y == ys) { state = L_FOUND; break; } } if (state == L_NOT_FOUND) return (PTA *)ERROR_PTR("start pt not in ptas", procName, NULL); if ((ptad = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("ptad not made", procName, NULL); for (j = 0; j < n - 1; j++) { if (i + j < n - 1) index = i + j; else index = (i + j + 1) % n; ptaGetIPt(ptas, index, &x, &y); ptaAddPt(ptad, x, y); } ptaAddPt(ptad, xs, ys); return ptad; } /*! * \brief ptaSelectRange() * * \param[in] ptas * \param[in] first use 0 to select from the beginning * \param[in] last use 0 to select to the end * \return ptad, or NULL on error */ PTA * ptaSelectRange(PTA *ptas, l_int32 first, l_int32 last) { l_int32 n, npt, i; l_float32 x, y; PTA *ptad; PROCNAME("ptaSelectRange"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if ((n = ptaGetCount(ptas)) == 0) { L_WARNING("ptas is empty\n", procName); return ptaCopy(ptas); } first = L_MAX(0, first); if (last <= 0) last = n - 1; if (first >= n) return (PTA *)ERROR_PTR("invalid first", procName, NULL); if (first > last) return (PTA *)ERROR_PTR("first > last", procName, NULL); npt = last - first + 1; ptad = ptaCreate(npt); for (i = first; i <= last; i++) { ptaGetPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } return ptad; } /*---------------------------------------------------------------------* * Geometric * *---------------------------------------------------------------------*/ /*! * \brief ptaGetBoundingRegion() * * \param[in] pta * \return box, or NULL on error * * <pre> * Notes: * (1) This is used when the pta represents a set of points in * a two-dimensional image. It returns the box of minimum * size containing the pts in the pta. * </pre> */ BOX * ptaGetBoundingRegion(PTA *pta) { l_int32 n, i, x, y, minx, maxx, miny, maxy; PROCNAME("ptaGetBoundingRegion"); if (!pta) return (BOX *)ERROR_PTR("pta not defined", procName, NULL); minx = 10000000; miny = 10000000; maxx = -10000000; maxy = -10000000; n = ptaGetCount(pta); for (i = 0; i < n; i++) { ptaGetIPt(pta, i, &x, &y); if (x < minx) minx = x; if (x > maxx) maxx = x; if (y < miny) miny = y; if (y > maxy) maxy = y; } return boxCreate(minx, miny, maxx - minx + 1, maxy - miny + 1); } /*! * \brief ptaGetRange() * * \param[in] pta * \param[out] pminx [optional] min value of x * \param[out] pmaxx [optional] max value of x * \param[out] pminy [optional] min value of y * \param[out] pmaxy [optional] max value of y * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) We can use pts to represent pairs of floating values, that * are not necessarily tied to a two-dimension region. For * example, the pts can represent a general function y(x). * </pre> */ l_int32 ptaGetRange(PTA *pta, l_float32 *pminx, l_float32 *pmaxx, l_float32 *pminy, l_float32 *pmaxy) { l_int32 n, i; l_float32 x, y, minx, maxx, miny, maxy; PROCNAME("ptaGetRange"); if (!pminx && !pmaxx && !pminy && !pmaxy) return ERROR_INT("no output requested", procName, 1); if (pminx) *pminx = 0; if (pmaxx) *pmaxx = 0; if (pminy) *pminy = 0; if (pmaxy) *pmaxy = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) == 0) return ERROR_INT("no points in pta", procName, 1); ptaGetPt(pta, 0, &x, &y); minx = x; maxx = x; miny = y; maxy = y; for (i = 1; i < n; i++) { ptaGetPt(pta, i, &x, &y); if (x < minx) minx = x; if (x > maxx) maxx = x; if (y < miny) miny = y; if (y > maxy) maxy = y; } if (pminx) *pminx = minx; if (pmaxx) *pmaxx = maxx; if (pminy) *pminy = miny; if (pmaxy) *pmaxy = maxy; return 0; } /*! * \brief ptaGetInsideBox() * * \param[in] ptas input pts * \param[in] box * \return ptad of pts in ptas that are inside the box, or NULL on error */ PTA * ptaGetInsideBox(PTA *ptas, BOX *box) { PTA *ptad; l_int32 n, i, contains; l_float32 x, y; PROCNAME("ptaGetInsideBox"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if (!box) return (PTA *)ERROR_PTR("box not defined", procName, NULL); n = ptaGetCount(ptas); ptad = ptaCreate(0); for (i = 0; i < n; i++) { ptaGetPt(ptas, i, &x, &y); boxContainsPt(box, x, y, &contains); if (contains) ptaAddPt(ptad, x, y); } return ptad; } /*! * \brief pixFindCornerPixels() * * \param[in] pixs 1 bpp * \return pta, or NULL on error * * <pre> * Notes: * (1) Finds the 4 corner-most pixels, as defined by a search * inward from each corner, using a 45 degree line. * </pre> */ PTA * pixFindCornerPixels(PIX *pixs) { l_int32 i, j, x, y, w, h, wpl, mindim, found; l_uint32 *data, *line; PTA *pta; PROCNAME("pixFindCornerPixels"); if (!pixs) return (PTA *)ERROR_PTR("pixs not defined", procName, NULL); if (pixGetDepth(pixs) != 1) return (PTA *)ERROR_PTR("pixs not 1 bpp", procName, NULL); w = pixGetWidth(pixs); h = pixGetHeight(pixs); mindim = L_MIN(w, h); data = pixGetData(pixs); wpl = pixGetWpl(pixs); if ((pta = ptaCreate(4)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (found = FALSE, i = 0; i < mindim; i++) { for (j = 0; j <= i; j++) { y = i - j; line = data + y * wpl; if (GET_DATA_BIT(line, j)) { ptaAddPt(pta, j, y); found = TRUE; break; } } if (found == TRUE) break; } for (found = FALSE, i = 0; i < mindim; i++) { for (j = 0; j <= i; j++) { y = i - j; line = data + y * wpl; x = w - 1 - j; if (GET_DATA_BIT(line, x)) { ptaAddPt(pta, x, y); found = TRUE; break; } } if (found == TRUE) break; } for (found = FALSE, i = 0; i < mindim; i++) { for (j = 0; j <= i; j++) { y = h - 1 - i + j; line = data + y * wpl; if (GET_DATA_BIT(line, j)) { ptaAddPt(pta, j, y); found = TRUE; break; } } if (found == TRUE) break; } for (found = FALSE, i = 0; i < mindim; i++) { for (j = 0; j <= i; j++) { y = h - 1 - i + j; line = data + y * wpl; x = w - 1 - j; if (GET_DATA_BIT(line, x)) { ptaAddPt(pta, x, y); found = TRUE; break; } } if (found == TRUE) break; } return pta; } /*! * \brief ptaContainsPt() * * \param[in] pta * \param[in] x, y point * \return 1 if contained, 0 otherwise or on error */ l_int32 ptaContainsPt(PTA *pta, l_int32 x, l_int32 y) { l_int32 i, n, ix, iy; PROCNAME("ptaContainsPt"); if (!pta) return ERROR_INT("pta not defined", procName, 0); n = ptaGetCount(pta); for (i = 0; i < n; i++) { ptaGetIPt(pta, i, &ix, &iy); if (x == ix && y == iy) return 1; } return 0; } /*! * \brief ptaTestIntersection() * * \param[in] pta1, pta2 * \return bval which is 1 if they have any elements in common; * 0 otherwise or on error. */ l_int32 ptaTestIntersection(PTA *pta1, PTA *pta2) { l_int32 i, j, n1, n2, x1, y1, x2, y2; PROCNAME("ptaTestIntersection"); if (!pta1) return ERROR_INT("pta1 not defined", procName, 0); if (!pta2) return ERROR_INT("pta2 not defined", procName, 0); n1 = ptaGetCount(pta1); n2 = ptaGetCount(pta2); for (i = 0; i < n1; i++) { ptaGetIPt(pta1, i, &x1, &y1); for (j = 0; j < n2; j++) { ptaGetIPt(pta2, i, &x2, &y2); if (x1 == x2 && y1 == y2) return 1; } } return 0; } /*! * \brief ptaTransform() * * \param[in] ptas * \param[in] shiftx, shifty * \param[in] scalex, scaley * \return pta, or NULL on error * * <pre> * Notes: * (1) Shift first, then scale. * </pre> */ PTA * ptaTransform(PTA *ptas, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley) { l_int32 n, i, x, y; PTA *ptad; PROCNAME("ptaTransform"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); ptad = ptaCreate(n); for (i = 0; i < n; i++) { ptaGetIPt(ptas, i, &x, &y); x = (l_int32)(scalex * (x + shiftx) + 0.5); y = (l_int32)(scaley * (y + shifty) + 0.5); ptaAddPt(ptad, x, y); } return ptad; } /*! * \brief ptaPtInsidePolygon() * * \param[in] pta vertices of a polygon * \param[in] x, y point to be tested * \param[out] pinside 1 if inside; 0 if outside or on boundary * \return 1 if OK, 0 on error * * The abs value of the sum of the angles subtended from a point by * the sides of a polygon, when taken in order traversing the polygon, * is 0 if the point is outside the polygon and 2*pi if inside. * The sign will be positive if traversed cw and negative if ccw. */ l_int32 ptaPtInsidePolygon(PTA *pta, l_float32 x, l_float32 y, l_int32 *pinside) { l_int32 i, n; l_float32 sum, x1, y1, x2, y2, xp1, yp1, xp2, yp2; PROCNAME("ptaPtInsidePolygon"); if (!pinside) return ERROR_INT("&inside not defined", procName, 1); *pinside = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); /* Think of (x1,y1) as the end point of a vector that starts * from the origin (0,0), and ditto for (x2,y2). */ n = ptaGetCount(pta); sum = 0.0; for (i = 0; i < n; i++) { ptaGetPt(pta, i, &xp1, &yp1); ptaGetPt(pta, (i + 1) % n, &xp2, &yp2); x1 = xp1 - x; y1 = yp1 - y; x2 = xp2 - x; y2 = yp2 - y; sum += l_angleBetweenVectors(x1, y1, x2, y2); } if (L_ABS(sum) > M_PI) *pinside = 1; return 0; } /*! * \brief l_angleBetweenVectors() * * \param[in] x1, y1 end point of first vector * \param[in] x2, y2 end point of second vector * \return angle radians, or 0.0 on error * * <pre> * Notes: * (1) This gives the angle between two vectors, going between * vector1 (x1,y1) and vector2 (x2,y2). The angle is swept * out from 1 --> 2. If this is clockwise, the angle is * positive, but the result is folded into the interval [-pi, pi]. * </pre> */ l_float32 l_angleBetweenVectors(l_float32 x1, l_float32 y1, l_float32 x2, l_float32 y2) { l_float64 ang; ang = atan2(y2, x2) - atan2(y1, x1); if (ang > M_PI) ang -= 2.0 * M_PI; if (ang < -M_PI) ang += 2.0 * M_PI; return ang; } /*---------------------------------------------------------------------* * Min/max and filtering * *---------------------------------------------------------------------*/ /*! * \brief ptaGetMinMax() * * \param[in] pta * \param[out] pxmin [optional] min of x * \param[out] pymin [optional] min of y * \param[out] pxmax [optional] max of x * \param[out] pymax [optional] max of y * \return 0 if OK, 1 on error. If pta is empty, requested * values are returned as -1.0. */ l_int32 ptaGetMinMax(PTA *pta, l_float32 *pxmin, l_float32 *pymin, l_float32 *pxmax, l_float32 *pymax) { l_int32 i, n; l_float32 x, y, xmin, ymin, xmax, ymax; PROCNAME("ptaGetMinMax"); if (pxmin) *pxmin = -1.0; if (pymin) *pymin = -1.0; if (pxmax) *pxmax = -1.0; if (pymax) *pymax = -1.0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if (!pxmin && !pxmax && !pymin && !pymax) return ERROR_INT("no output requested", procName, 1); if ((n = ptaGetCount(pta)) == 0) { L_WARNING("pta is empty\n", procName); return 0; } xmin = ymin = 1.0e20; xmax = ymax = -1.0e20; for (i = 0; i < n; i++) { ptaGetPt(pta, i, &x, &y); if (x < xmin) xmin = x; if (y < ymin) ymin = y; if (x > xmax) xmax = x; if (y > ymax) ymax = y; } if (pxmin) *pxmin = xmin; if (pymin) *pymin = ymin; if (pxmax) *pxmax = xmax; if (pymax) *pymax = ymax; return 0; } /*! * \brief ptaSelectByValue() * * \param[in] ptas * \param[in] xth, yth threshold values * \param[in] type L_SELECT_XVAL, L_SELECT_YVAL, * L_SELECT_IF_EITHER, L_SELECT_IF_BOTH * \param[in] relation L_SELECT_IF_LT, L_SELECT_IF_GT, * L_SELECT_IF_LTE, L_SELECT_IF_GTE * \return ptad filtered set, or NULL on error */ PTA * ptaSelectByValue(PTA *ptas, l_float32 xth, l_float32 yth, l_int32 type, l_int32 relation) { l_int32 i, n; l_float32 x, y; PTA *ptad; PROCNAME("ptaSelectByValue"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if (ptaGetCount(ptas) == 0) { L_WARNING("ptas is empty\n", procName); return ptaCopy(ptas); } if (type != L_SELECT_XVAL && type != L_SELECT_YVAL && type != L_SELECT_IF_EITHER && type != L_SELECT_IF_BOTH) return (PTA *)ERROR_PTR("invalid type", procName, NULL); if (relation != L_SELECT_IF_LT && relation != L_SELECT_IF_GT && relation != L_SELECT_IF_LTE && relation != L_SELECT_IF_GTE) return (PTA *)ERROR_PTR("invalid relation", procName, NULL); n = ptaGetCount(ptas); ptad = ptaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(ptas, i, &x, &y); if (type == L_SELECT_XVAL) { if ((relation == L_SELECT_IF_LT && x < xth) || (relation == L_SELECT_IF_GT && x > xth) || (relation == L_SELECT_IF_LTE && x <= xth) || (relation == L_SELECT_IF_GTE && x >= xth)) ptaAddPt(ptad, x, y); } else if (type == L_SELECT_YVAL) { if ((relation == L_SELECT_IF_LT && y < yth) || (relation == L_SELECT_IF_GT && y > yth) || (relation == L_SELECT_IF_LTE && y <= yth) || (relation == L_SELECT_IF_GTE && y >= yth)) ptaAddPt(ptad, x, y); } else if (type == L_SELECT_IF_EITHER) { if (((relation == L_SELECT_IF_LT) && (x < xth || y < yth)) || ((relation == L_SELECT_IF_GT) && (x > xth || y > yth)) || ((relation == L_SELECT_IF_LTE) && (x <= xth || y <= yth)) || ((relation == L_SELECT_IF_GTE) && (x >= xth || y >= yth))) ptaAddPt(ptad, x, y); } else { /* L_SELECT_IF_BOTH */ if (((relation == L_SELECT_IF_LT) && (x < xth && y < yth)) || ((relation == L_SELECT_IF_GT) && (x > xth && y > yth)) || ((relation == L_SELECT_IF_LTE) && (x <= xth && y <= yth)) || ((relation == L_SELECT_IF_GTE) && (x >= xth && y >= yth))) ptaAddPt(ptad, x, y); } } return ptad; } /*! * \brief ptaCropToMask() * * \param[in] ptas input pta * \param[in] pixm 1 bpp mask * \return ptad with only pts under the mask fg, or NULL on error */ PTA * ptaCropToMask(PTA *ptas, PIX *pixm) { l_int32 i, n, x, y; l_uint32 val; PTA *ptad; PROCNAME("ptaCropToMask"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if (!pixm || pixGetDepth(pixm) != 1) return (PTA *)ERROR_PTR("pixm undefined or not 1 bpp", procName, NULL); if (ptaGetCount(ptas) == 0) { L_INFO("ptas is empty\n", procName); return ptaCopy(ptas); } n = ptaGetCount(ptas); ptad = ptaCreate(n); for (i = 0; i < n; i++) { ptaGetIPt(ptas, i, &x, &y); pixGetPixel(pixm, x, y, &val); if (val == 1) ptaAddPt(ptad, x, y); } return ptad; } /*---------------------------------------------------------------------* * Least Squares Fit * *---------------------------------------------------------------------*/ /*! * \brief ptaGetLinearLSF() * * \param[in] pta * \param[out] pa [optional] slope a of least square fit: y = ax + b * \param[out] pb [optional] intercept b of least square fit * \param[out] pnafit [optional] numa of least square fit * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Either or both &a and &b must be input. They determine the * type of line that is fit. * (2) If both &a and &b are defined, this returns a and b that minimize: * * sum (yi - axi -b)^2 * i * * The method is simple: differentiate this expression w/rt a and b, * and solve the resulting two equations for a and b in terms of * various sums over the input data (xi, yi). * (3) We also allow two special cases, where either a = 0 or b = 0: * (a) If &a is given and &b = null, find the linear LSF that * goes through the origin (b = 0). * (b) If &b is given and &a = null, find the linear LSF with * zero slope (a = 0). * (4) If &nafit is defined, this returns an array of fitted values, * corresponding to the two implicit Numa arrays (nax and nay) in pta. * Thus, just as you can plot the data in pta as nay vs. nax, * you can plot the linear least square fit as nafit vs. nax. * Get the nax array using ptaGetArrays(pta, &nax, NULL); * </pre> */ l_int32 ptaGetLinearLSF(PTA *pta, l_float32 *pa, l_float32 *pb, NUMA **pnafit) { l_int32 n, i; l_float32 a, b, factor, sx, sy, sxx, sxy, val; l_float32 *xa, *ya; PROCNAME("ptaGetLinearLSF"); if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pnafit) *pnafit = NULL; if (!pa && !pb && !pnafit) return ERROR_INT("no output requested", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) < 2) return ERROR_INT("less than 2 pts found", procName, 1); xa = pta->x; /* not a copy */ ya = pta->y; /* not a copy */ sx = sy = sxx = sxy = 0.; if (pa && pb) { /* general line */ for (i = 0; i < n; i++) { sx += xa[i]; sy += ya[i]; sxx += xa[i] * xa[i]; sxy += xa[i] * ya[i]; } factor = n * sxx - sx * sx; if (factor == 0.0) return ERROR_INT("no solution found", procName, 1); factor = 1. / factor; a = factor * ((l_float32)n * sxy - sx * sy); b = factor * (sxx * sy - sx * sxy); } else if (pa) { /* b = 0; line through origin */ for (i = 0; i < n; i++) { sxx += xa[i] * xa[i]; sxy += xa[i] * ya[i]; } if (sxx == 0.0) return ERROR_INT("no solution found", procName, 1); a = sxy / sxx; b = 0.0; } else { /* a = 0; horizontal line */ for (i = 0; i < n; i++) sy += ya[i]; a = 0.0; b = sy / (l_float32)n; } if (pnafit) { *pnafit = numaCreate(n); for (i = 0; i < n; i++) { val = a * xa[i] + b; numaAddNumber(*pnafit, val); } } if (pa) *pa = a; if (pb) *pb = b; return 0; } /*! * \brief ptaGetQuadraticLSF() * * \param[in] pta * \param[out] pa [optional] coeff a of LSF: y = ax^2 + bx + c * \param[out] pb [optional] coeff b of LSF: y = ax^2 + bx + c * \param[out] pc [optional] coeff c of LSF: y = ax^2 + bx + c * \param[out] pnafit [optional] numa of least square fit * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This does a quadratic least square fit to the set of points * in %pta. That is, it finds coefficients a, b and c that minimize: * * sum (yi - a*xi*xi -b*xi -c)^2 * i * * The method is simple: differentiate this expression w/rt * a, b and c, and solve the resulting three equations for these * coefficients in terms of various sums over the input data (xi, yi). * The three equations are in the form: * f[0][0]a + f[0][1]b + f[0][2]c = g[0] * f[1][0]a + f[1][1]b + f[1][2]c = g[1] * f[2][0]a + f[2][1]b + f[2][2]c = g[2] * (2) If &nafit is defined, this returns an array of fitted values, * corresponding to the two implicit Numa arrays (nax and nay) in pta. * Thus, just as you can plot the data in pta as nay vs. nax, * you can plot the linear least square fit as nafit vs. nax. * Get the nax array using ptaGetArrays(pta, &nax, NULL); * </pre> */ l_int32 ptaGetQuadraticLSF(PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, NUMA **pnafit) { l_int32 n, i, ret; l_float32 x, y, sx, sy, sx2, sx3, sx4, sxy, sx2y; l_float32 *xa, *ya; l_float32 *f[3]; l_float32 g[3]; PROCNAME("ptaGetQuadraticLSF"); if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pc) *pc = 0.0; if (pnafit) *pnafit = NULL; if (!pa && !pb && !pc && !pnafit) return ERROR_INT("no output requested", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) < 3) return ERROR_INT("less than 3 pts found", procName, 1); xa = pta->x; /* not a copy */ ya = pta->y; /* not a copy */ sx = sy = sx2 = sx3 = sx4 = sxy = sx2y = 0.; for (i = 0; i < n; i++) { x = xa[i]; y = ya[i]; sx += x; sy += y; sx2 += x * x; sx3 += x * x * x; sx4 += x * x * x * x; sxy += x * y; sx2y += x * x * y; } for (i = 0; i < 3; i++) f[i] = (l_float32 *)LEPT_CALLOC(3, sizeof(l_float32)); f[0][0] = sx4; f[0][1] = sx3; f[0][2] = sx2; f[1][0] = sx3; f[1][1] = sx2; f[1][2] = sx; f[2][0] = sx2; f[2][1] = sx; f[2][2] = n; g[0] = sx2y; g[1] = sxy; g[2] = sy; /* Solve for the unknowns, also putting f-inverse into f */ ret = gaussjordan(f, g, 3); for (i = 0; i < 3; i++) LEPT_FREE(f[i]); if (ret) return ERROR_INT("quadratic solution failed", procName, 1); if (pa) *pa = g[0]; if (pb) *pb = g[1]; if (pc) *pc = g[2]; if (pnafit) { *pnafit = numaCreate(n); for (i = 0; i < n; i++) { x = xa[i]; y = g[0] * x * x + g[1] * x + g[2]; numaAddNumber(*pnafit, y); } } return 0; } /*! * \brief ptaGetCubicLSF() * * \param[in] pta * \param[out] pa [optional] coeff a of LSF: y = ax^3 + bx^2 + cx + d * \param[out] pb [optional] coeff b of LSF * \param[out] pc [optional] coeff c of LSF * \param[out] pd [optional] coeff d of LSF * \param[out] pnafit [optional] numa of least square fit * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This does a cubic least square fit to the set of points * in %pta. That is, it finds coefficients a, b, c and d * that minimize: * * sum (yi - a*xi*xi*xi -b*xi*xi -c*xi - d)^2 * i * * Differentiate this expression w/rt a, b, c and d, and solve * the resulting four equations for these coefficients in * terms of various sums over the input data (xi, yi). * The four equations are in the form: * f[0][0]a + f[0][1]b + f[0][2]c + f[0][3] = g[0] * f[1][0]a + f[1][1]b + f[1][2]c + f[1][3] = g[1] * f[2][0]a + f[2][1]b + f[2][2]c + f[2][3] = g[2] * f[3][0]a + f[3][1]b + f[3][2]c + f[3][3] = g[3] * (2) If &nafit is defined, this returns an array of fitted values, * corresponding to the two implicit Numa arrays (nax and nay) in pta. * Thus, just as you can plot the data in pta as nay vs. nax, * you can plot the linear least square fit as nafit vs. nax. * Get the nax array using ptaGetArrays(pta, &nax, NULL); * </pre> */ l_int32 ptaGetCubicLSF(PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pd, NUMA **pnafit) { l_int32 n, i, ret; l_float32 x, y, sx, sy, sx2, sx3, sx4, sx5, sx6, sxy, sx2y, sx3y; l_float32 *xa, *ya; l_float32 *f[4]; l_float32 g[4]; PROCNAME("ptaGetCubicLSF"); if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pc) *pc = 0.0; if (pd) *pd = 0.0; if (pnafit) *pnafit = NULL; if (!pa && !pb && !pc && !pd && !pnafit) return ERROR_INT("no output requested", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) < 4) return ERROR_INT("less than 4 pts found", procName, 1); xa = pta->x; /* not a copy */ ya = pta->y; /* not a copy */ sx = sy = sx2 = sx3 = sx4 = sx5 = sx6 = sxy = sx2y = sx3y = 0.; for (i = 0; i < n; i++) { x = xa[i]; y = ya[i]; sx += x; sy += y; sx2 += x * x; sx3 += x * x * x; sx4 += x * x * x * x; sx5 += x * x * x * x * x; sx6 += x * x * x * x * x * x; sxy += x * y; sx2y += x * x * y; sx3y += x * x * x * y; } for (i = 0; i < 4; i++) f[i] = (l_float32 *)LEPT_CALLOC(4, sizeof(l_float32)); f[0][0] = sx6; f[0][1] = sx5; f[0][2] = sx4; f[0][3] = sx3; f[1][0] = sx5; f[1][1] = sx4; f[1][2] = sx3; f[1][3] = sx2; f[2][0] = sx4; f[2][1] = sx3; f[2][2] = sx2; f[2][3] = sx; f[3][0] = sx3; f[3][1] = sx2; f[3][2] = sx; f[3][3] = n; g[0] = sx3y; g[1] = sx2y; g[2] = sxy; g[3] = sy; /* Solve for the unknowns, also putting f-inverse into f */ ret = gaussjordan(f, g, 4); for (i = 0; i < 4; i++) LEPT_FREE(f[i]); if (ret) return ERROR_INT("cubic solution failed", procName, 1); if (pa) *pa = g[0]; if (pb) *pb = g[1]; if (pc) *pc = g[2]; if (pd) *pd = g[3]; if (pnafit) { *pnafit = numaCreate(n); for (i = 0; i < n; i++) { x = xa[i]; y = g[0] * x * x * x + g[1] * x * x + g[2] * x + g[3]; numaAddNumber(*pnafit, y); } } return 0; } /*! * \brief ptaGetQuarticLSF() * * \param[in] pta * \param[out] pa [optional] coeff a of LSF: * y = ax^4 + bx^3 + cx^2 + dx + e * \param[out] pb [optional] coeff b of LSF * \param[out] pc [optional] coeff c of LSF * \param[out] pd [optional] coeff d of LSF * \param[out] pe [optional] coeff e of LSF * \param[out] pnafit [optional] numa of least square fit * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This does a quartic least square fit to the set of points * in %pta. That is, it finds coefficients a, b, c, d and 3 * that minimize: * * sum (yi - a*xi*xi*xi*xi -b*xi*xi*xi -c*xi*xi - d*xi - e)^2 * i * * Differentiate this expression w/rt a, b, c, d and e, and solve * the resulting five equations for these coefficients in * terms of various sums over the input data (xi, yi). * The five equations are in the form: * f[0][0]a + f[0][1]b + f[0][2]c + f[0][3] + f[0][4] = g[0] * f[1][0]a + f[1][1]b + f[1][2]c + f[1][3] + f[1][4] = g[1] * f[2][0]a + f[2][1]b + f[2][2]c + f[2][3] + f[2][4] = g[2] * f[3][0]a + f[3][1]b + f[3][2]c + f[3][3] + f[3][4] = g[3] * f[4][0]a + f[4][1]b + f[4][2]c + f[4][3] + f[4][4] = g[4] * (2) If &nafit is defined, this returns an array of fitted values, * corresponding to the two implicit Numa arrays (nax and nay) in pta. * Thus, just as you can plot the data in pta as nay vs. nax, * you can plot the linear least square fit as nafit vs. nax. * Get the nax array using ptaGetArrays(pta, &nax, NULL); * </pre> */ l_int32 ptaGetQuarticLSF(PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pd, l_float32 *pe, NUMA **pnafit) { l_int32 n, i, ret; l_float32 x, y, sx, sy, sx2, sx3, sx4, sx5, sx6, sx7, sx8; l_float32 sxy, sx2y, sx3y, sx4y; l_float32 *xa, *ya; l_float32 *f[5]; l_float32 g[5]; PROCNAME("ptaGetQuarticLSF"); if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pc) *pc = 0.0; if (pd) *pd = 0.0; if (pe) *pe = 0.0; if (pnafit) *pnafit = NULL; if (!pa && !pb && !pc && !pd && !pe && !pnafit) return ERROR_INT("no output requested", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) < 5) return ERROR_INT("less than 5 pts found", procName, 1); xa = pta->x; /* not a copy */ ya = pta->y; /* not a copy */ sx = sy = sx2 = sx3 = sx4 = sx5 = sx6 = sx7 = sx8 = 0; sxy = sx2y = sx3y = sx4y = 0.; for (i = 0; i < n; i++) { x = xa[i]; y = ya[i]; sx += x; sy += y; sx2 += x * x; sx3 += x * x * x; sx4 += x * x * x * x; sx5 += x * x * x * x * x; sx6 += x * x * x * x * x * x; sx7 += x * x * x * x * x * x * x; sx8 += x * x * x * x * x * x * x * x; sxy += x * y; sx2y += x * x * y; sx3y += x * x * x * y; sx4y += x * x * x * x * y; } for (i = 0; i < 5; i++) f[i] = (l_float32 *)LEPT_CALLOC(5, sizeof(l_float32)); f[0][0] = sx8; f[0][1] = sx7; f[0][2] = sx6; f[0][3] = sx5; f[0][4] = sx4; f[1][0] = sx7; f[1][1] = sx6; f[1][2] = sx5; f[1][3] = sx4; f[1][4] = sx3; f[2][0] = sx6; f[2][1] = sx5; f[2][2] = sx4; f[2][3] = sx3; f[2][4] = sx2; f[3][0] = sx5; f[3][1] = sx4; f[3][2] = sx3; f[3][3] = sx2; f[3][4] = sx; f[4][0] = sx4; f[4][1] = sx3; f[4][2] = sx2; f[4][3] = sx; f[4][4] = n; g[0] = sx4y; g[1] = sx3y; g[2] = sx2y; g[3] = sxy; g[4] = sy; /* Solve for the unknowns, also putting f-inverse into f */ ret = gaussjordan(f, g, 5); for (i = 0; i < 5; i++) LEPT_FREE(f[i]); if (ret) return ERROR_INT("quartic solution failed", procName, 1); if (pa) *pa = g[0]; if (pb) *pb = g[1]; if (pc) *pc = g[2]; if (pd) *pd = g[3]; if (pe) *pe = g[4]; if (pnafit) { *pnafit = numaCreate(n); for (i = 0; i < n; i++) { x = xa[i]; y = g[0] * x * x * x * x + g[1] * x * x * x + g[2] * x * x + g[3] * x + g[4]; numaAddNumber(*pnafit, y); } } return 0; } /*! * \brief ptaNoisyLinearLSF() * * \param[in] pta * \param[in] factor reject outliers with error greater than this * number of medians; typically ~ 3 * \param[out] pptad [optional] with outliers removed * \param[out] pa [optional] slope a of least square fit: y = ax + b * \param[out] pb [optional] intercept b of least square fit * \param[out] pmederr [optional] median error * \param[out] pnafit [optional] numa of least square fit to ptad * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This does a linear least square fit to the set of points * in %pta. It then evaluates the errors and removes points * whose error is >= factor * median_error. It then re-runs * the linear LSF on the resulting points. * (2) Either or both &a and &b must be input. They determine the * type of line that is fit. * (3) The median error can give an indication of how good the fit * is likely to be. * </pre> */ l_int32 ptaNoisyLinearLSF(PTA *pta, l_float32 factor, PTA **pptad, l_float32 *pa, l_float32 *pb, l_float32 *pmederr, NUMA **pnafit) { l_int32 n, i, ret; l_float32 x, y, yf, val, mederr; NUMA *nafit, *naerror; PTA *ptad; PROCNAME("ptaNoisyLinearLSF"); if (pptad) *pptad = NULL; if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pmederr) *pmederr = 0.0; if (pnafit) *pnafit = NULL; if (!pptad && !pa && !pb && !pnafit) return ERROR_INT("no output requested", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (factor <= 0.0) return ERROR_INT("factor must be > 0.0", procName, 1); if ((n = ptaGetCount(pta)) < 3) return ERROR_INT("less than 2 pts found", procName, 1); if (ptaGetLinearLSF(pta, pa, pb, &nafit) != 0) return ERROR_INT("error in linear LSF", procName, 1); /* Get the median error */ naerror = numaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(pta, i, &x, &y); numaGetFValue(nafit, i, &yf); numaAddNumber(naerror, L_ABS(y - yf)); } numaGetMedian(naerror, &mederr); if (pmederr) *pmederr = mederr; numaDestroy(&nafit); /* Remove outliers */ ptad = ptaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(pta, i, &x, &y); numaGetFValue(naerror, i, &val); if (val <= factor * mederr) /* <= in case mederr = 0 */ ptaAddPt(ptad, x, y); } numaDestroy(&naerror); /* Do LSF again */ ret = ptaGetLinearLSF(ptad, pa, pb, pnafit); if (pptad) *pptad = ptad; else ptaDestroy(&ptad); return ret; } /*! * \brief ptaNoisyQuadraticLSF() * * \param[in] pta * \param[in] factor reject outliers with error greater than this * number of medians; typically ~ 3 * \param[out] pptad [optional] with outliers removed * \param[out] pa [optional] coeff a of LSF: y = ax^2 + bx + c * \param[out] pb [optional] coeff b of LSF: y = ax^2 + bx + c * \param[out] pc [optional] coeff c of LSF: y = ax^2 + bx + c * \param[out] pmederr [optional] median error * \param[out] pnafit [optional] numa of least square fit to ptad * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This does a quadratic least square fit to the set of points * in %pta. It then evaluates the errors and removes points * whose error is >= factor * median_error. It then re-runs * a quadratic LSF on the resulting points. * </pre> */ l_int32 ptaNoisyQuadraticLSF(PTA *pta, l_float32 factor, PTA **pptad, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pmederr, NUMA **pnafit) { l_int32 n, i, ret; l_float32 x, y, yf, val, mederr; NUMA *nafit, *naerror; PTA *ptad; PROCNAME("ptaNoisyQuadraticLSF"); if (pptad) *pptad = NULL; if (pa) *pa = 0.0; if (pb) *pb = 0.0; if (pc) *pc = 0.0; if (pmederr) *pmederr = 0.0; if (pnafit) *pnafit = NULL; if (!pptad && !pa && !pb && !pc && !pnafit) return ERROR_INT("no output requested", procName, 1); if (factor <= 0.0) return ERROR_INT("factor must be > 0.0", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) < 3) return ERROR_INT("less than 3 pts found", procName, 1); if (ptaGetQuadraticLSF(pta, NULL, NULL, NULL, &nafit) != 0) return ERROR_INT("error in quadratic LSF", procName, 1); /* Get the median error */ naerror = numaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(pta, i, &x, &y); numaGetFValue(nafit, i, &yf); numaAddNumber(naerror, L_ABS(y - yf)); } numaGetMedian(naerror, &mederr); if (pmederr) *pmederr = mederr; numaDestroy(&nafit); /* Remove outliers */ ptad = ptaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(pta, i, &x, &y); numaGetFValue(naerror, i, &val); if (val <= factor * mederr) /* <= in case mederr = 0 */ ptaAddPt(ptad, x, y); } numaDestroy(&naerror); n = ptaGetCount(ptad); if ((n = ptaGetCount(ptad)) < 3) { ptaDestroy(&ptad); return ERROR_INT("less than 3 pts found", procName, 1); } /* Do LSF again */ ret = ptaGetQuadraticLSF(ptad, pa, pb, pc, pnafit); if (pptad) *pptad = ptad; else ptaDestroy(&ptad); return ret; } /*! * \brief applyLinearFit() * * \param[in] a, b linear fit coefficients * \param[in] x * \param[out] py y = a * x + b * \return 0 if OK, 1 on error */ l_int32 applyLinearFit(l_float32 a, l_float32 b, l_float32 x, l_float32 *py) { PROCNAME("applyLinearFit"); if (!py) return ERROR_INT("&y not defined", procName, 1); *py = a * x + b; return 0; } /*! * \brief applyQuadraticFit() * * \param[in] a, b, c quadratic fit coefficients * \param[in] x * \param[out] py y = a * x^2 + b * x + c * \return 0 if OK, 1 on error */ l_int32 applyQuadraticFit(l_float32 a, l_float32 b, l_float32 c, l_float32 x, l_float32 *py) { PROCNAME("applyQuadraticFit"); if (!py) return ERROR_INT("&y not defined", procName, 1); *py = a * x * x + b * x + c; return 0; } /*! * \brief applyCubicFit() * * \param[in] a, b, c, d cubic fit coefficients * \param[in] x * \param[out] py y = a * x^3 + b * x^2 + c * x + d * \return 0 if OK, 1 on error */ l_int32 applyCubicFit(l_float32 a, l_float32 b, l_float32 c, l_float32 d, l_float32 x, l_float32 *py) { PROCNAME("applyCubicFit"); if (!py) return ERROR_INT("&y not defined", procName, 1); *py = a * x * x * x + b * x * x + c * x + d; return 0; } /*! * \brief applyQuarticFit() * * \param[in] a, b, c, d, e quartic fit coefficients * \param[in] x * \param[out] py y = a * x^4 + b * x^3 + c * x^2 + d * x + e * \return 0 if OK, 1 on error */ l_int32 applyQuarticFit(l_float32 a, l_float32 b, l_float32 c, l_float32 d, l_float32 e, l_float32 x, l_float32 *py) { l_float32 x2; PROCNAME("applyQuarticFit"); if (!py) return ERROR_INT("&y not defined", procName, 1); x2 = x * x; *py = a * x2 * x2 + b * x2 * x + c * x2 + d * x + e; return 0; } /*---------------------------------------------------------------------* * Interconversions with Pix * *---------------------------------------------------------------------*/ /*! * \brief pixPlotAlongPta() * * \param[in] pixs any depth * \param[in] pta set of points on which to plot * \param[in] outformat GPLOT_PNG, GPLOT_PS, GPLOT_EPS, GPLOT_LATEX * \param[in] title [optional] for plot; can be null * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This is a debugging function. * (2) Removes existing colormaps and clips the pta to the input %pixs. * (3) If the image is RGB, three separate plots are generated. * </pre> */ l_int32 pixPlotAlongPta(PIX *pixs, PTA *pta, l_int32 outformat, const char *title) { char buffer[128]; char *rtitle, *gtitle, *btitle; static l_int32 count = 0; /* require separate temp files for each call */ l_int32 i, x, y, d, w, h, npts, rval, gval, bval; l_uint32 val; NUMA *na, *nar, *nag, *nab; PIX *pixt; PROCNAME("pixPlotAlongPta"); lept_mkdir("lept/plot"); if (!pixs) return ERROR_INT("pixs not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (outformat != GPLOT_PNG && outformat != GPLOT_PS && outformat != GPLOT_EPS && outformat != GPLOT_LATEX) { L_WARNING("outformat invalid; using GPLOT_PNG\n", procName); outformat = GPLOT_PNG; } pixt = pixRemoveColormap(pixs, REMOVE_CMAP_BASED_ON_SRC); d = pixGetDepth(pixt); w = pixGetWidth(pixt); h = pixGetHeight(pixt); npts = ptaGetCount(pta); if (d == 32) { nar = numaCreate(npts); nag = numaCreate(npts); nab = numaCreate(npts); for (i = 0; i < npts; i++) { ptaGetIPt(pta, i, &x, &y); if (x < 0 || x >= w) continue; if (y < 0 || y >= h) continue; pixGetPixel(pixt, x, y, &val); rval = GET_DATA_BYTE(&val, COLOR_RED); gval = GET_DATA_BYTE(&val, COLOR_GREEN); bval = GET_DATA_BYTE(&val, COLOR_BLUE); numaAddNumber(nar, rval); numaAddNumber(nag, gval); numaAddNumber(nab, bval); } snprintf(buffer, sizeof(buffer), "/tmp/lept/plot/%03d", count++); rtitle = stringJoin("Red: ", title); gplotSimple1(nar, outformat, buffer, rtitle); snprintf(buffer, sizeof(buffer), "/tmp/lept/plot/%03d", count++); gtitle = stringJoin("Green: ", title); gplotSimple1(nag, outformat, buffer, gtitle); snprintf(buffer, sizeof(buffer), "/tmp/lept/plot/%03d", count++); btitle = stringJoin("Blue: ", title); gplotSimple1(nab, outformat, buffer, btitle); numaDestroy(&nar); numaDestroy(&nag); numaDestroy(&nab); LEPT_FREE(rtitle); LEPT_FREE(gtitle); LEPT_FREE(btitle); } else { na = numaCreate(npts); for (i = 0; i < npts; i++) { ptaGetIPt(pta, i, &x, &y); if (x < 0 || x >= w) continue; if (y < 0 || y >= h) continue; pixGetPixel(pixt, x, y, &val); numaAddNumber(na, (l_float32)val); } snprintf(buffer, sizeof(buffer), "/tmp/lept/plot/%03d", count++); gplotSimple1(na, outformat, buffer, title); numaDestroy(&na); } pixDestroy(&pixt); return 0; } /*! * \brief ptaGetPixelsFromPix() * * \param[in] pixs 1 bpp * \param[in] box [optional] can be null * \return pta, or NULL on error * * <pre> * Notes: * (1) Generates a pta of fg pixels in the pix, within the box. * If box == NULL, it uses the entire pix. * </pre> */ PTA * ptaGetPixelsFromPix(PIX *pixs, BOX *box) { l_int32 i, j, w, h, wpl, xstart, xend, ystart, yend, bw, bh; l_uint32 *data, *line; PTA *pta; PROCNAME("ptaGetPixelsFromPix"); if (!pixs || (pixGetDepth(pixs) != 1)) return (PTA *)ERROR_PTR("pixs undefined or not 1 bpp", procName, NULL); pixGetDimensions(pixs, &w, &h, NULL); data = pixGetData(pixs); wpl = pixGetWpl(pixs); xstart = ystart = 0; xend = w - 1; yend = h - 1; if (box) { boxGetGeometry(box, &xstart, &ystart, &bw, &bh); xend = xstart + bw - 1; yend = ystart + bh - 1; } if ((pta = ptaCreate(0)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = ystart; i <= yend; i++) { line = data + i * wpl; for (j = xstart; j <= xend; j++) { if (GET_DATA_BIT(line, j)) ptaAddPt(pta, j, i); } } return pta; } /*! * \brief pixGenerateFromPta() * * \param[in] pta * \param[in] w, h of pix * \return pix 1 bpp, or NULL on error * * <pre> * Notes: * (1) Points are rounded to nearest ints. * (2) Any points outside (w,h) are silently discarded. * (3) Output 1 bpp pix has values 1 for each point in the pta. * </pre> */ PIX * pixGenerateFromPta(PTA *pta, l_int32 w, l_int32 h) { l_int32 n, i, x, y; PIX *pix; PROCNAME("pixGenerateFromPta"); if (!pta) return (PIX *)ERROR_PTR("pta not defined", procName, NULL); if ((pix = pixCreate(w, h, 1)) == NULL) return (PIX *)ERROR_PTR("pix not made", procName, NULL); n = ptaGetCount(pta); for (i = 0; i < n; i++) { ptaGetIPt(pta, i, &x, &y); if (x < 0 || x >= w || y < 0 || y >= h) continue; pixSetPixel(pix, x, y, 1); } return pix; } /*! * \brief ptaGetBoundaryPixels() * * \param[in] pixs 1 bpp * \param[in] type L_BOUNDARY_FG, L_BOUNDARY_BG * \return pta, or NULL on error * * <pre> * Notes: * (1) This generates a pta of either fg or bg boundary pixels. * (2) See also pixGeneratePtaBoundary() for rendering of * fg boundary pixels. * </pre> */ PTA * ptaGetBoundaryPixels(PIX *pixs, l_int32 type) { PIX *pixt; PTA *pta; PROCNAME("ptaGetBoundaryPixels"); if (!pixs || (pixGetDepth(pixs) != 1)) return (PTA *)ERROR_PTR("pixs undefined or not 1 bpp", procName, NULL); if (type != L_BOUNDARY_FG && type != L_BOUNDARY_BG) return (PTA *)ERROR_PTR("invalid type", procName, NULL); if (type == L_BOUNDARY_FG) pixt = pixMorphSequence(pixs, "e3.3", 0); else pixt = pixMorphSequence(pixs, "d3.3", 0); pixXor(pixt, pixt, pixs); pta = ptaGetPixelsFromPix(pixt, NULL); pixDestroy(&pixt); return pta; } /*! * \brief ptaaGetBoundaryPixels() * * \param[in] pixs 1 bpp * \param[in] type L_BOUNDARY_FG, L_BOUNDARY_BG * \param[in] connectivity 4 or 8 * \param[out] pboxa [optional] bounding boxes of the c.c. * \param[out] ppixa [optional] pixa of the c.c. * \return ptaa, or NULL on error * * <pre> * Notes: * (1) This generates a ptaa of either fg or bg boundary pixels, * where each pta has the boundary pixels for a connected * component. * (2) We can't simply find all the boundary pixels and then select * those within the bounding box of each component, because * bounding boxes can overlap. It is necessary to extract and * dilate or erode each component separately. Note also that * special handling is required for bg pixels when the * component touches the pix boundary. * </pre> */ PTAA * ptaaGetBoundaryPixels(PIX *pixs, l_int32 type, l_int32 connectivity, BOXA **pboxa, PIXA **ppixa) { l_int32 i, n, w, h, x, y, bw, bh, left, right, top, bot; BOXA *boxa; PIX *pixt1, *pixt2; PIXA *pixa; PTA *pta1, *pta2; PTAA *ptaa; PROCNAME("ptaaGetBoundaryPixels"); if (pboxa) *pboxa = NULL; if (ppixa) *ppixa = NULL; if (!pixs || (pixGetDepth(pixs) != 1)) return (PTAA *)ERROR_PTR("pixs undefined or not 1 bpp", procName, NULL); if (type != L_BOUNDARY_FG && type != L_BOUNDARY_BG) return (PTAA *)ERROR_PTR("invalid type", procName, NULL); if (connectivity != 4 && connectivity != 8) return (PTAA *)ERROR_PTR("connectivity not 4 or 8", procName, NULL); pixGetDimensions(pixs, &w, &h, NULL); boxa = pixConnComp(pixs, &pixa, connectivity); n = boxaGetCount(boxa); ptaa = ptaaCreate(0); for (i = 0; i < n; i++) { pixt1 = pixaGetPix(pixa, i, L_CLONE); boxaGetBoxGeometry(boxa, i, &x, &y, &bw, &bh); left = right = top = bot = 0; if (type == L_BOUNDARY_BG) { if (x > 0) left = 1; if (y > 0) top = 1; if (x + bw < w) right = 1; if (y + bh < h) bot = 1; pixt2 = pixAddBorderGeneral(pixt1, left, right, top, bot, 0); } else { pixt2 = pixClone(pixt1); } pta1 = ptaGetBoundaryPixels(pixt2, type); pta2 = ptaTransform(pta1, x - left, y - top, 1.0, 1.0); ptaaAddPta(ptaa, pta2, L_INSERT); ptaDestroy(&pta1); pixDestroy(&pixt1); pixDestroy(&pixt2); } if (pboxa) *pboxa = boxa; else boxaDestroy(&boxa); if (ppixa) *ppixa = pixa; else pixaDestroy(&pixa); return ptaa; } /*! * \brief ptaaIndexLabeledPixels() * * \param[in] pixs 32 bpp, of indices of c.c. * \param[out] pncc [optional] number of connected components * \return ptaa, or NULL on error * * <pre> * Notes: * (1) The pixel values in %pixs are the index of the connected component * to which the pixel belongs; %pixs is typically generated from * a 1 bpp pix by pixConnCompTransform(). Background pixels in * the generating 1 bpp pix are represented in %pixs by 0. * We do not check that the pixel values are correctly labelled. * (2) Each pta in the returned ptaa gives the pixel locations * correspnding to a connected component, with the label of each * given by the index of the pta into the ptaa. * (3) Initialize with the first pta in ptaa being empty and * representing the background value (index 0) in the pix. * </pre> */ PTAA * ptaaIndexLabeledPixels(PIX *pixs, l_int32 *pncc) { l_int32 wpl, index, i, j, w, h; l_uint32 maxval; l_uint32 *data, *line; PTA *pta; PTAA *ptaa; PROCNAME("ptaaIndexLabeledPixels"); if (pncc) *pncc = 0; if (!pixs || (pixGetDepth(pixs) != 32)) return (PTAA *)ERROR_PTR("pixs undef or not 32 bpp", procName, NULL); /* The number of c.c. is the maximum pixel value. Use this to * initialize ptaa with sufficient pta arrays */ pixGetMaxValueInRect(pixs, NULL, &maxval, NULL, NULL); if (pncc) *pncc = maxval; pta = ptaCreate(1); ptaa = ptaaCreate(maxval + 1); ptaaInitFull(ptaa, pta); ptaDestroy(&pta); /* Sweep over %pixs, saving the pixel coordinates of each pixel * with nonzero value in the appropriate pta, indexed by that value. */ pixGetDimensions(pixs, &w, &h, NULL); data = pixGetData(pixs); wpl = pixGetWpl(pixs); for (i = 0; i < h; i++) { line = data + wpl * i; for (j = 0; j < w; j++) { index = line[j]; if (index > 0) ptaaAddPt(ptaa, index, j, i); } } return ptaa; } /*! * \brief ptaGetNeighborPixLocs() * * \param[in] pixs any depth * \param[in] x, y pixel from which we search for nearest neighbors * conn (4 or 8 connectivity * \return pta, or NULL on error * * <pre> * Notes: * (1) Generates a pta of all valid neighbor pixel locations, * or NULL on error. * </pre> */ PTA * ptaGetNeighborPixLocs(PIX *pixs, l_int32 x, l_int32 y, l_int32 conn) { l_int32 w, h; PTA *pta; PROCNAME("ptaGetNeighborPixLocs"); if (!pixs) return (PTA *)ERROR_PTR("pixs not defined", procName, NULL); pixGetDimensions(pixs, &w, &h, NULL); if (x < 0 || x >= w || y < 0 || y >= h) return (PTA *)ERROR_PTR("(x,y) not in pixs", procName, NULL); if (conn != 4 && conn != 8) return (PTA *)ERROR_PTR("conn not 4 or 8", procName, NULL); pta = ptaCreate(conn); if (x > 0) ptaAddPt(pta, x - 1, y); if (x < w - 1) ptaAddPt(pta, x + 1, y); if (y > 0) ptaAddPt(pta, x, y - 1); if (y < h - 1) ptaAddPt(pta, x, y + 1); if (conn == 8) { if (x > 0) { if (y > 0) ptaAddPt(pta, x - 1, y - 1); if (y < h - 1) ptaAddPt(pta, x - 1, y + 1); } if (x < w - 1) { if (y > 0) ptaAddPt(pta, x + 1, y - 1); if (y < h - 1) ptaAddPt(pta, x + 1, y + 1); } } return pta; } /*---------------------------------------------------------------------* * Interconversion with Numa * *---------------------------------------------------------------------*/ /*! * \brief numaConvertToPta1() * * \param[in] na numa with implicit y(x) * \return pta if OK; null on error */ PTA * numaConvertToPta1(NUMA *na) { l_int32 i, n; l_float32 startx, delx, val; PTA *pta; PROCNAME("numaConvertToPta1"); if (!na) return (PTA *)ERROR_PTR("na not defined", procName, NULL); n = numaGetCount(na); pta = ptaCreate(n); numaGetParameters(na, &startx, &delx); for (i = 0; i < n; i++) { numaGetFValue(na, i, &val); ptaAddPt(pta, startx + i * delx, val); } return pta; } /*! * \brief numaConvertToPta2() * * \param[in] nax * \param[in] nay * \return pta if OK; null on error */ PTA * numaConvertToPta2(NUMA *nax, NUMA *nay) { l_int32 i, n, nx, ny; l_float32 valx, valy; PTA *pta; PROCNAME("numaConvertToPta2"); if (!nax || !nay) return (PTA *)ERROR_PTR("nax and nay not both defined", procName, NULL); nx = numaGetCount(nax); ny = numaGetCount(nay); n = L_MIN(nx, ny); if (nx != ny) L_WARNING("nx = %d does not equal ny = %d\n", procName, nx, ny); pta = ptaCreate(n); for (i = 0; i < n; i++) { numaGetFValue(nax, i, &valx); numaGetFValue(nay, i, &valy); ptaAddPt(pta, valx, valy); } return pta; } /*! * \brief ptaConvertToNuma() * * \param[in] pta * \param[out] pnax addr of nax * \param[out] pnay addr of nay * \return 0 if OK, 1 on error */ l_int32 ptaConvertToNuma(PTA *pta, NUMA **pnax, NUMA **pnay) { l_int32 i, n; l_float32 valx, valy; PROCNAME("ptaConvertToNuma"); if (pnax) *pnax = NULL; if (pnay) *pnay = NULL; if (!pnax || !pnay) return ERROR_INT("&nax and &nay not both defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); *pnax = numaCreate(n); *pnay = numaCreate(n); for (i = 0; i < n; i++) { ptaGetPt(pta, i, &valx, &valy); numaAddNumber(*pnax, valx); numaAddNumber(*pnay, valy); } return 0; } /*---------------------------------------------------------------------* * Display Pta and Ptaa * *---------------------------------------------------------------------*/ /*! * \brief pixDisplayPta() * * \param[in] pixd can be same as pixs or NULL; 32 bpp if in-place * \param[in] pixs 1, 2, 4, 8, 16 or 32 bpp * \param[in] pta of path to be plotted * \return pixd 32 bpp RGB version of pixs, with path in green. * * <pre> * Notes: * (1) To write on an existing pixs, pixs must be 32 bpp and * call with pixd == pixs: * pixDisplayPta(pixs, pixs, pta); * To write to a new pix, use pixd == NULL and call: * pixd = pixDisplayPta(NULL, pixs, pta); * (2) On error, returns pixd to avoid losing pixs if called as * pixs = pixDisplayPta(pixs, pixs, pta); * </pre> */ PIX * pixDisplayPta(PIX *pixd, PIX *pixs, PTA *pta) { l_int32 i, n, w, h, x, y; l_uint32 rpixel, gpixel, bpixel; PROCNAME("pixDisplayPta"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, pixd); if (!pta) return (PIX *)ERROR_PTR("pta not defined", procName, pixd); if (pixd && (pixd != pixs || pixGetDepth(pixd) != 32)) return (PIX *)ERROR_PTR("invalid pixd", procName, pixd); if (!pixd) pixd = pixConvertTo32(pixs); pixGetDimensions(pixd, &w, &h, NULL); composeRGBPixel(255, 0, 0, &rpixel); /* start point */ composeRGBPixel(0, 255, 0, &gpixel); composeRGBPixel(0, 0, 255, &bpixel); /* end point */ n = ptaGetCount(pta); for (i = 0; i < n; i++) { ptaGetIPt(pta, i, &x, &y); if (x < 0 || x >= w || y < 0 || y >= h) continue; if (i == 0) pixSetPixel(pixd, x, y, rpixel); else if (i < n - 1) pixSetPixel(pixd, x, y, gpixel); else pixSetPixel(pixd, x, y, bpixel); } return pixd; } /*! * \brief pixDisplayPtaaPattern() * * \param[in] pixd 32 bpp * \param[in] pixs 1, 2, 4, 8, 16 or 32 bpp; 32 bpp if in place * \param[in] ptaa giving locations at which the pattern is displayed * \param[in] pixp 1 bpp pattern to be placed such that its reference * point co-locates with each point in pta * \param[in] cx, cy reference point in pattern * \return pixd 32 bpp RGB version of pixs. * * <pre> * Notes: * (1) To write on an existing pixs, pixs must be 32 bpp and * call with pixd == pixs: * pixDisplayPtaPattern(pixs, pixs, pta, ...); * To write to a new pix, use pixd == NULL and call: * pixd = pixDisplayPtaPattern(NULL, pixs, pta, ...); * (2) Puts a random color on each pattern associated with a pta. * (3) On error, returns pixd to avoid losing pixs if called as * pixs = pixDisplayPtaPattern(pixs, pixs, pta, ...); * (4) A typical pattern to be used is a circle, generated with * generatePtaFilledCircle() * </pre> */ PIX * pixDisplayPtaaPattern(PIX *pixd, PIX *pixs, PTAA *ptaa, PIX *pixp, l_int32 cx, l_int32 cy) { l_int32 i, n; l_uint32 color; PIXCMAP *cmap; PTA *pta; PROCNAME("pixDisplayPtaaPattern"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, pixd); if (!ptaa) return (PIX *)ERROR_PTR("ptaa not defined", procName, pixd); if (pixd && (pixd != pixs || pixGetDepth(pixd) != 32)) return (PIX *)ERROR_PTR("invalid pixd", procName, pixd); if (!pixp) return (PIX *)ERROR_PTR("pixp not defined", procName, pixd); if (!pixd) pixd = pixConvertTo32(pixs); /* Use 256 random colors */ cmap = pixcmapCreateRandom(8, 0, 0); n = ptaaGetCount(ptaa); for (i = 0; i < n; i++) { pixcmapGetColor32(cmap, i % 256, &color); pta = ptaaGetPta(ptaa, i, L_CLONE); pixDisplayPtaPattern(pixd, pixd, pta, pixp, cx, cy, color); ptaDestroy(&pta); } pixcmapDestroy(&cmap); return pixd; } /*! * \brief pixDisplayPtaPattern() * * \param[in] pixd can be same as pixs or NULL; 32 bpp if in-place * \param[in] pixs 1, 2, 4, 8, 16 or 32 bpp * \param[in] pta giving locations at which the pattern is displayed * \param[in] pixp 1 bpp pattern to be placed such that its reference * point co-locates with each point in pta * \param[in] cx, cy reference point in pattern * \param[in] color in 0xrrggbb00 format * \return pixd 32 bpp RGB version of pixs. * * <pre> * Notes: * (1) To write on an existing pixs, pixs must be 32 bpp and * call with pixd == pixs: * pixDisplayPtaPattern(pixs, pixs, pta, ...); * To write to a new pix, use pixd == NULL and call: * pixd = pixDisplayPtaPattern(NULL, pixs, pta, ...); * (2) On error, returns pixd to avoid losing pixs if called as * pixs = pixDisplayPtaPattern(pixs, pixs, pta, ...); * (3) A typical pattern to be used is a circle, generated with * generatePtaFilledCircle() * </pre> */ PIX * pixDisplayPtaPattern(PIX *pixd, PIX *pixs, PTA *pta, PIX *pixp, l_int32 cx, l_int32 cy, l_uint32 color) { l_int32 i, n, w, h, x, y; PTA *ptat; PROCNAME("pixDisplayPtaPattern"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, pixd); if (!pta) return (PIX *)ERROR_PTR("pta not defined", procName, pixd); if (pixd && (pixd != pixs || pixGetDepth(pixd) != 32)) return (PIX *)ERROR_PTR("invalid pixd", procName, pixd); if (!pixp) return (PIX *)ERROR_PTR("pixp not defined", procName, pixd); if (!pixd) pixd = pixConvertTo32(pixs); pixGetDimensions(pixs, &w, &h, NULL); ptat = ptaReplicatePattern(pta, pixp, NULL, cx, cy, w, h); n = ptaGetCount(ptat); for (i = 0; i < n; i++) { ptaGetIPt(ptat, i, &x, &y); if (x < 0 || x >= w || y < 0 || y >= h) continue; pixSetPixel(pixd, x, y, color); } ptaDestroy(&ptat); return pixd; } /*! * \brief ptaReplicatePattern() * * \param[in] ptas "sparse" input pta * \param[in] pixp [optional] 1 bpp pattern, to be replicated in output pta * \param[in] ptap [optional] set of pts, to be replicated in output pta * \param[in] cx, cy reference point in pattern * \param[in] w, h clipping sizes for output pta * \return ptad with all points of replicated pattern, or NULL on error * * <pre> * Notes: * (1) You can use either the image %pixp or the set of pts %ptap. * (2) The pattern is placed with its reference point at each point * in ptas, and all the fg pixels are colleced into ptad. * For %pixp, this is equivalent to blitting pixp at each point * in ptas, and then converting the resulting pix to a pta. * </pre> */ PTA * ptaReplicatePattern(PTA *ptas, PIX *pixp, PTA *ptap, l_int32 cx, l_int32 cy, l_int32 w, l_int32 h) { l_int32 i, j, n, np, x, y, xp, yp, xf, yf; PTA *ptat, *ptad; PROCNAME("ptaReplicatePattern"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); if (!pixp && !ptap) return (PTA *)ERROR_PTR("no pattern is defined", procName, NULL); if (pixp && ptap) L_WARNING("pixp and ptap defined; using ptap\n", procName); n = ptaGetCount(ptas); ptad = ptaCreate(n); if (ptap) ptat = ptaClone(ptap); else ptat = ptaGetPixelsFromPix(pixp, NULL); np = ptaGetCount(ptat); for (i = 0; i < n; i++) { ptaGetIPt(ptas, i, &x, &y); for (j = 0; j < np; j++) { ptaGetIPt(ptat, j, &xp, &yp); xf = x - cx + xp; yf = y - cy + yp; if (xf >= 0 && xf < w && yf >= 0 && yf < h) ptaAddPt(ptad, xf, yf); } } ptaDestroy(&ptat); return ptad; } /*! * \brief pixDisplayPtaa() * * \param[in] pixs 1, 2, 4, 8, 16 or 32 bpp * \param[in] ptaa array of paths to be plotted * \return pixd 32 bpp RGB version of pixs, with paths plotted * in different colors, or NULL on error */ PIX * pixDisplayPtaa(PIX *pixs, PTAA *ptaa) { l_int32 i, j, w, h, npta, npt, x, y, rv, gv, bv; l_uint32 *pixela; NUMA *na1, *na2, *na3; PIX *pixd; PTA *pta; PROCNAME("pixDisplayPtaa"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, NULL); if (!ptaa) return (PIX *)ERROR_PTR("ptaa not defined", procName, NULL); npta = ptaaGetCount(ptaa); if (npta == 0) return (PIX *)ERROR_PTR("no pta", procName, NULL); if ((pixd = pixConvertTo32(pixs)) == NULL) return (PIX *)ERROR_PTR("pixd not made", procName, NULL); pixGetDimensions(pixd, &w, &h, NULL); /* Make a colormap for the paths */ if ((pixela = (l_uint32 *)LEPT_CALLOC(npta, sizeof(l_uint32))) == NULL) { pixDestroy(&pixd); return (PIX *)ERROR_PTR("calloc fail for pixela", procName, NULL); } na1 = numaPseudorandomSequence(256, 14657); na2 = numaPseudorandomSequence(256, 34631); na3 = numaPseudorandomSequence(256, 54617); for (i = 0; i < npta; i++) { numaGetIValue(na1, i % 256, &rv); numaGetIValue(na2, i % 256, &gv); numaGetIValue(na3, i % 256, &bv); composeRGBPixel(rv, gv, bv, &pixela[i]); } numaDestroy(&na1); numaDestroy(&na2); numaDestroy(&na3); for (i = 0; i < npta; i++) { pta = ptaaGetPta(ptaa, i, L_CLONE); npt = ptaGetCount(pta); for (j = 0; j < npt; j++) { ptaGetIPt(pta, j, &x, &y); if (x < 0 || x >= w || y < 0 || y >= h) continue; pixSetPixel(pixd, x, y, pixela[i]); } ptaDestroy(&pta); } LEPT_FREE(pixela); return pixd; }
DocCreator/DocCreator
thirdparty/leptonica/src/ptafunc1.c
C
lgpl-3.0
75,693
/** * @file * Point To Point Protocol Sequential API module * */ /* * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * */ #include "lwip/opt.h" #if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ #include "lwip/pppapi.h" #include "lwip/priv/tcpip_priv.h" #include "netif/ppp/pppoe.h" #include "netif/ppp/pppol2tp.h" #include "netif/ppp/pppos.h" /** * Call ppp_set_default() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_set_default(struct tcpip_api_call *m) { struct pppapi_msg_msg *msg = (struct pppapi_msg_msg *)m; ppp_set_default(msg->ppp); return ERR_OK; } /** * Call ppp_set_default() in a thread-safe way by running that function inside the * tcpip_thread context. */ void pppapi_set_default(ppp_pcb *pcb) { struct pppapi_msg msg; msg.msg.ppp = pcb; tcpip_api_call(pppapi_do_ppp_set_default, &msg.call); } /** * Call ppp_set_auth() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_set_auth(struct tcpip_api_call *m) { struct pppapi_msg *msg = (struct pppapi_msg *)m; ppp_set_auth(msg->msg.ppp, msg->msg.msg.setauth.authtype, msg->msg.msg.setauth.user, msg->msg.msg.setauth.passwd); return ERR_OK; } /** * Call ppp_set_auth() in a thread-safe way by running that function inside the * tcpip_thread context. */ void pppapi_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd) { struct pppapi_msg msg; msg.msg.ppp = pcb; msg.msg.msg.setauth.authtype = authtype; msg.msg.msg.setauth.user = user; msg.msg.msg.setauth.passwd = passwd; tcpip_api_call(pppapi_do_ppp_set_auth, &msg.call); } #if PPP_NOTIFY_PHASE /** * Call ppp_set_notify_phase_callback() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_set_notify_phase_callback(struct tcpip_api_call *m) { struct pppapi_msg_msg *msg = (struct pppapi_msg_msg *)m; ppp_set_notify_phase_callback(msg->ppp, msg->msg.setnotifyphasecb.notify_phase_cb); return ERR_OK; } /** * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the * tcpip_thread context. */ void pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) { struct pppapi_msg msg; msg.function = pppapi_do_ppp_set_notify_phase_callback; msg.msg.ppp = pcb; msg.msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb; tcpip_api_call(pppapi_do_ppp_set_notify_phase_callback, &msg.call); } #endif /* PPP_NOTIFY_PHASE */ #if PPPOS_SUPPORT /** * Call pppos_create() inside the tcpip_thread context. */ static err_t pppapi_do_pppos_create(struct tcpip_api_call *m) { struct pppapi_msg *msg = (struct pppapi_msg *)(m); msg->msg.ppp = pppos_create(msg->msg.msg.serialcreate.pppif, msg->msg.msg.serialcreate.output_cb, msg->msg.msg.serialcreate.link_status_cb, msg->msg.msg.serialcreate.ctx_cb); return ERR_OK; } /** * Call pppos_create() in a thread-safe way by running that function inside the * tcpip_thread context. */ ppp_pcb* pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { struct pppapi_msg msg; msg.msg.msg.serialcreate.pppif = pppif; msg.msg.msg.serialcreate.output_cb = output_cb; msg.msg.msg.serialcreate.link_status_cb = link_status_cb; msg.msg.msg.serialcreate.ctx_cb = ctx_cb; tcpip_api_call(pppapi_do_pppos_create, &msg.call); return msg.msg.ppp; } #endif /* PPPOS_SUPPORT */ #if PPPOE_SUPPORT /** * Call pppoe_create() inside the tcpip_thread context. */ static err_t pppapi_do_pppoe_create(struct tcpip_api_call *m) { struct pppapi_msg_msg *msg = (struct pppapi_msg_msg *)m; msg->ppp = pppoe_create(msg->msg.ethernetcreate.pppif, msg->msg.ethernetcreate.ethif, msg->msg.ethernetcreate.service_name, msg->msg.ethernetcreate.concentrator_name, msg->msg.ethernetcreate.link_status_cb, msg->msg.ethernetcreate.ctx_cb); return ERR_OK; } /** * Call pppoe_create() in a thread-safe way by running that function inside the * tcpip_thread context. */ ppp_pcb* pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { struct pppapi_msg msg; msg.msg.msg.ethernetcreate.pppif = pppif; msg.msg.msg.ethernetcreate.ethif = ethif; msg.msg.msg.ethernetcreate.service_name = service_name; msg.msg.msg.ethernetcreate.concentrator_name = concentrator_name; msg.msg.msg.ethernetcreate.link_status_cb = link_status_cb; msg.msg.msg.ethernetcreate.ctx_cb = ctx_cb; tcpip_api_call(pppapi_do_pppoe_create, &msg.call); return msg.msg.ppp; } #endif /* PPPOE_SUPPORT */ #if PPPOL2TP_SUPPORT /** * Call pppol2tp_create() inside the tcpip_thread context. */ static err_t pppapi_do_pppol2tp_create(struct tcpip_api_call *m) { struct pppapi_msg_msg *msg = (struct pppapi_msg_msg *)m; msg->ppp = pppol2tp_create(msg->msg.l2tpcreate.pppif, msg->msg.l2tpcreate.netif, msg->msg.l2tpcreate.ipaddr, msg->msg.l2tpcreate.port, #if PPPOL2TP_AUTH_SUPPORT msg->msg.l2tpcreate.secret, msg->msg.l2tpcreate.secret_len, #else /* PPPOL2TP_AUTH_SUPPORT */ NULL, #endif /* PPPOL2TP_AUTH_SUPPORT */ msg->msg.l2tpcreate.link_status_cb, msg->msg.l2tpcreate.ctx_cb); return ERR_OK; } /** * Call pppol2tp_create() in a thread-safe way by running that function inside the * tcpip_thread context. */ ppp_pcb* pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, const u8_t *secret, u8_t secret_len, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { struct pppapi_msg msg; msg.msg.msg.l2tpcreate.pppif = pppif; msg.msg.msg.l2tpcreate.netif = netif; msg.msg.msg.l2tpcreate.ipaddr = ipaddr; msg.msg.msg.l2tpcreate.port = port; #if PPPOL2TP_AUTH_SUPPORT msg.msg.msg.l2tpcreate.secret = secret; msg.msg.msg.l2tpcreate.secret_len = secret_len; #endif /* PPPOL2TP_AUTH_SUPPORT */ msg.msg.msg.l2tpcreate.link_status_cb = link_status_cb; msg.msg.msg.l2tpcreate.ctx_cb = ctx_cb; tcpip_api_call(pppapi_do_pppol2tp_create, &msg.call); return msg.msg.ppp; } #endif /* PPPOL2TP_SUPPORT */ /** * Call ppp_connect() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_connect(struct tcpip_api_call *m) { struct pppapi_msg *msg = (struct pppapi_msg *)m; return ppp_connect(msg->msg.ppp, msg->msg.msg.connect.holdoff); } /** * Call ppp_connect() in a thread-safe way by running that function inside the * tcpip_thread context. */ err_t pppapi_connect(ppp_pcb *pcb, u16_t holdoff) { struct pppapi_msg msg; msg.msg.ppp = pcb; msg.msg.msg.connect.holdoff = holdoff; return tcpip_api_call(pppapi_do_ppp_connect, &msg.call); } #if PPP_SERVER /** * Call ppp_listen() inside the tcpip_thread context. */ static void pppapi_do_ppp_listen(struct pppapi_msg_msg *msg) { msg->err = ppp_listen(msg->ppp, msg->msg.listen.addrs); TCPIP_PPPAPI_ACK(msg); } /** * Call ppp_listen() in a thread-safe way by running that function inside the * tcpip_thread context. */ err_t pppapi_listen(ppp_pcb *pcb, struct ppp_addrs *addrs) { struct pppapi_msg msg; msg.function = pppapi_do_ppp_listen; msg.msg.ppp = pcb; msg.msg.msg.listen.addrs = addrs; TCPIP_PPPAPI(&msg); return msg.msg.err; } #endif /* PPP_SERVER */ /** * Call ppp_close() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_close(struct tcpip_api_call *m) { struct pppapi_msg *msg = (struct pppapi_msg *)m; return ppp_close(msg->msg.ppp, msg->msg.msg.close.nocarrier); } /** * Call ppp_close() in a thread-safe way by running that function inside the * tcpip_thread context. */ err_t pppapi_close(ppp_pcb *pcb, u8_t nocarrier) { struct pppapi_msg msg; msg.msg.ppp = pcb; msg.msg.msg.close.nocarrier = nocarrier; return tcpip_api_call(pppapi_do_ppp_close, &msg.call); } /** * Call ppp_free() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_free(struct tcpip_api_call *m) { struct pppapi_msg_msg *msg = (struct pppapi_msg_msg *)m; return ppp_free(msg->ppp); } /** * Call ppp_free() in a thread-safe way by running that function inside the * tcpip_thread context. */ err_t pppapi_free(ppp_pcb *pcb) { struct pppapi_msg msg; msg.msg.ppp = pcb; return tcpip_api_call(pppapi_do_ppp_free, &msg.call); } /** * Call ppp_ioctl() inside the tcpip_thread context. */ static err_t pppapi_do_ppp_ioctl(struct tcpip_api_call *m) { struct pppapi_msg *msg = (struct pppapi_msg *)m; return ppp_ioctl(msg->msg.ppp, msg->msg.msg.ioctl.cmd, msg->msg.msg.ioctl.arg); } /** * Call ppp_ioctl() in a thread-safe way by running that function inside the * tcpip_thread context. */ err_t pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) { struct pppapi_msg msg; msg.msg.ppp = pcb; msg.msg.msg.ioctl.cmd = cmd; msg.msg.msg.ioctl.arg = arg; return tcpip_api_call(pppapi_do_ppp_ioctl, &msg.call); } #endif /* LWIP_PPP_API */
jaracil/esp-idf
components/lwip/api/pppapi.c
C
apache-2.0
10,470
/** * FreeRDP: A Remote Desktop Protocol Implementation * X11 Video Redirection * * Copyright 2010-2011 Vic Lee * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ipc.h> #include <sys/shm.h> #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xatom.h> #include <X11/extensions/XShm.h> #include <winpr/crt.h> #include <freerdp/utils/event.h> #include <freerdp/client/tsmf.h> #include "xf_tsmf.h" #ifdef WITH_XV #include <X11/extensions/Xv.h> #include <X11/extensions/Xvlib.h> typedef struct xf_xv_context xfXvContext; struct xf_xv_context { long xv_port; Atom xv_colorkey_atom; int xv_image_size; int xv_shmid; char* xv_shmaddr; UINT32* xv_pixfmts; }; #ifdef WITH_DEBUG_XV #define DEBUG_XV(fmt, ...) DEBUG_CLASS(XV, fmt, ## __VA_ARGS__) #else #define DEBUG_XV(fmt, ...) DEBUG_NULL(fmt, ## __VA_ARGS__) #endif void xf_tsmf_init(xfContext* xfc, long xv_port) { int ret; unsigned int i; unsigned int version; unsigned int release; unsigned int event_base; unsigned int error_base; unsigned int request_base; unsigned int num_adaptors; xfXvContext* xv; XvAdaptorInfo* ai; XvAttribute* attr; XvImageFormatValues* fo; xv = (xfXvContext*) malloc(sizeof(xfXvContext)); ZeroMemory(xv, sizeof(xfXvContext)); xfc->xv_context = xv; xv->xv_colorkey_atom = None; xv->xv_image_size = 0; xv->xv_port = xv_port; if (!XShmQueryExtension(xfc->display)) { DEBUG_XV("no shmem available."); return; } ret = XvQueryExtension(xfc->display, &version, &release, &request_base, &event_base, &error_base); if (ret != Success) { DEBUG_XV("XvQueryExtension failed %d.", ret); return; } DEBUG_XV("version %u release %u", version, release); ret = XvQueryAdaptors(xfc->display, DefaultRootWindow(xfc->display), &num_adaptors, &ai); if (ret != Success) { DEBUG_XV("XvQueryAdaptors failed %d.", ret); return; } for (i = 0; i < num_adaptors; i++) { DEBUG_XV("adapter port %ld-%ld (%s)", ai[i].base_id, ai[i].base_id + ai[i].num_ports - 1, ai[i].name); if (xv->xv_port == 0 && i == num_adaptors - 1) xv->xv_port = ai[i].base_id; } if (num_adaptors > 0) XvFreeAdaptorInfo(ai); if (xv->xv_port == 0) { DEBUG_XV("no adapter selected, video frames will not be processed."); return; } DEBUG_XV("selected %ld", xv->xv_port); attr = XvQueryPortAttributes(xfc->display, xv->xv_port, &ret); for (i = 0; i < (unsigned int)ret; i++) { if (strcmp(attr[i].name, "XV_COLORKEY") == 0) { xv->xv_colorkey_atom = XInternAtom(xfc->display, "XV_COLORKEY", FALSE); XvSetPortAttribute(xfc->display, xv->xv_port, xv->xv_colorkey_atom, attr[i].min_value + 1); break; } } XFree(attr); #ifdef WITH_DEBUG_XV fprintf(stderr, "xf_tsmf_init: pixel format "); #endif fo = XvListImageFormats(xfc->display, xv->xv_port, &ret); if (ret > 0) { xv->xv_pixfmts = (UINT32*) malloc((ret + 1) * sizeof(UINT32)); ZeroMemory(xv->xv_pixfmts, (ret + 1) * sizeof(UINT32)); for (i = 0; i < ret; i++) { xv->xv_pixfmts[i] = fo[i].id; #ifdef WITH_DEBUG_XV fprintf(stderr, "%c%c%c%c ", ((char*)(xv->xv_pixfmts + i))[0], ((char*)(xv->xv_pixfmts + i))[1], ((char*)(xv->xv_pixfmts + i))[2], ((char*)(xv->xv_pixfmts + i))[3]); #endif } xv->xv_pixfmts[i] = 0; } XFree(fo); #ifdef WITH_DEBUG_XV fprintf(stderr, "\n"); #endif } void xf_tsmf_uninit(xfContext* xfc) { xfXvContext* xv = (xfXvContext*) xfc->xv_context; if (xv) { if (xv->xv_image_size > 0) { shmdt(xv->xv_shmaddr); shmctl(xv->xv_shmid, IPC_RMID, NULL); } if (xv->xv_pixfmts) { free(xv->xv_pixfmts); xv->xv_pixfmts = NULL; } free(xv); xfc->xv_context = NULL; } } static BOOL xf_tsmf_is_format_supported(xfXvContext* xv, UINT32 pixfmt) { int i; if (!xv->xv_pixfmts) return FALSE; for (i = 0; xv->xv_pixfmts[i]; i++) { if (xv->xv_pixfmts[i] == pixfmt) return TRUE; } return FALSE; } static void xf_process_tsmf_video_frame_event(xfContext* xfc, RDP_VIDEO_FRAME_EVENT* vevent) { int i; BYTE* data1; BYTE* data2; UINT32 pixfmt; UINT32 xvpixfmt; BOOL converti420yv12 = FALSE; XvImage * image; int colorkey = 0; XShmSegmentInfo shminfo; xfXvContext* xv = (xfXvContext*) xfc->xv_context; if (xv->xv_port == 0) return; /* In case the player is minimized */ if (vevent->x < -2048 || vevent->y < -2048 || vevent->num_visible_rects <= 0) return; if (xv->xv_colorkey_atom != None) { XvGetPortAttribute(xfc->display, xv->xv_port, xv->xv_colorkey_atom, &colorkey); XSetFunction(xfc->display, xfc->gc, GXcopy); XSetFillStyle(xfc->display, xfc->gc, FillSolid); XSetForeground(xfc->display, xfc->gc, colorkey); for (i = 0; i < vevent->num_visible_rects; i++) { XFillRectangle(xfc->display, xfc->window->handle, xfc->gc, vevent->x + vevent->visible_rects[i].x, vevent->y + vevent->visible_rects[i].y, vevent->visible_rects[i].width, vevent->visible_rects[i].height); } } else { XSetClipRectangles(xfc->display, xfc->gc, vevent->x, vevent->y, (XRectangle*) vevent->visible_rects, vevent->num_visible_rects, YXBanded); } pixfmt = vevent->frame_pixfmt; if (xf_tsmf_is_format_supported(xv, pixfmt)) { xvpixfmt = pixfmt; } else if (pixfmt == RDP_PIXFMT_I420 && xf_tsmf_is_format_supported(xv, RDP_PIXFMT_YV12)) { xvpixfmt = RDP_PIXFMT_YV12; converti420yv12 = TRUE; } else if (pixfmt == RDP_PIXFMT_YV12 && xf_tsmf_is_format_supported(xv, RDP_PIXFMT_I420)) { xvpixfmt = RDP_PIXFMT_I420; converti420yv12 = TRUE; } else { DEBUG_XV("pixel format 0x%X not supported by hardware.", pixfmt); return; } image = XvShmCreateImage(xfc->display, xv->xv_port, xvpixfmt, 0, vevent->frame_width, vevent->frame_height, &shminfo); if (xv->xv_image_size != image->data_size) { if (xv->xv_image_size > 0) { shmdt(xv->xv_shmaddr); shmctl(xv->xv_shmid, IPC_RMID, NULL); } xv->xv_image_size = image->data_size; xv->xv_shmid = shmget(IPC_PRIVATE, image->data_size, IPC_CREAT | 0777); xv->xv_shmaddr = shmat(xv->xv_shmid, 0, 0); } shminfo.shmid = xv->xv_shmid; shminfo.shmaddr = image->data = xv->xv_shmaddr; shminfo.readOnly = FALSE; if (!XShmAttach(xfc->display, &shminfo)) { XFree(image); DEBUG_XV("XShmAttach failed."); return; } /* The video driver may align each line to a different size and we need to convert our original image data. */ switch (pixfmt) { case RDP_PIXFMT_I420: case RDP_PIXFMT_YV12: /* Y */ if (image->pitches[0] == vevent->frame_width) { memcpy(image->data + image->offsets[0], vevent->frame_data, vevent->frame_width * vevent->frame_height); } else { for (i = 0; i < vevent->frame_height; i++) { memcpy(image->data + image->offsets[0] + i * image->pitches[0], vevent->frame_data + i * vevent->frame_width, vevent->frame_width); } } /* UV */ /* Conversion between I420 and YV12 is to simply swap U and V */ if (converti420yv12 == FALSE) { data1 = vevent->frame_data + vevent->frame_width * vevent->frame_height; data2 = vevent->frame_data + vevent->frame_width * vevent->frame_height + vevent->frame_width * vevent->frame_height / 4; } else { data2 = vevent->frame_data + vevent->frame_width * vevent->frame_height; data1 = vevent->frame_data + vevent->frame_width * vevent->frame_height + vevent->frame_width * vevent->frame_height / 4; image->id = pixfmt == RDP_PIXFMT_I420 ? RDP_PIXFMT_YV12 : RDP_PIXFMT_I420; } if (image->pitches[1] * 2 == vevent->frame_width) { memcpy(image->data + image->offsets[1], data1, vevent->frame_width * vevent->frame_height / 4); memcpy(image->data + image->offsets[2], data2, vevent->frame_width * vevent->frame_height / 4); } else { for (i = 0; i < vevent->frame_height / 2; i++) { memcpy(image->data + image->offsets[1] + i * image->pitches[1], data1 + i * vevent->frame_width / 2, vevent->frame_width / 2); memcpy(image->data + image->offsets[2] + i * image->pitches[2], data2 + i * vevent->frame_width / 2, vevent->frame_width / 2); } } break; default: memcpy(image->data, vevent->frame_data, image->data_size <= vevent->frame_size ? image->data_size : vevent->frame_size); break; } XvShmPutImage(xfc->display, xv->xv_port, xfc->window->handle, xfc->gc, image, 0, 0, image->width, image->height, vevent->x, vevent->y, vevent->width, vevent->height, FALSE); if (xv->xv_colorkey_atom == None) XSetClipMask(xfc->display, xfc->gc, None); XSync(xfc->display, FALSE); XShmDetach(xfc->display, &shminfo); XFree(image); } static void xf_process_tsmf_redraw_event(xfContext* xfc, RDP_REDRAW_EVENT* revent) { XSetFunction(xfc->display, xfc->gc, GXcopy); XSetFillStyle(xfc->display, xfc->gc, FillSolid); XCopyArea(xfc->display, xfc->primary, xfc->window->handle, xfc->gc, revent->x, revent->y, revent->width, revent->height, revent->x, revent->y); } void xf_process_tsmf_event(xfContext* xfc, wMessage* event) { switch (GetMessageType(event->id)) { case TsmfChannel_VideoFrame: xf_process_tsmf_video_frame_event(xfc, (RDP_VIDEO_FRAME_EVENT*) event); break; case TsmfChannel_Redraw: xf_process_tsmf_redraw_event(xfc, (RDP_REDRAW_EVENT*) event); break; } } #else /* WITH_XV */ void xf_tsmf_init(xfContext* xfc, long xv_port) { } void xf_tsmf_uninit(xfContext* xfc) { } void xf_process_tsmf_event(xfContext* xfc, wMessage* event) { } #endif /* WITH_XV */
0359xiaodong/FreeRDP
client/X11/xf_tsmf.c
C
apache-2.0
10,083
/* *@brief RDTSC implementation * *@date 22.10.2013 * * */ #include <stdint.h> #include <hal/cpu_info.h> uint64_t get_cpu_counter(void) { uint64_t hi = 0, lo = 0; asm volatile ( "rdtsc\n\t" "movl %%eax, %0\n\t" "movl %%edx, %1\n\t" : "=r"(lo), "=r"(hi) :); return (hi << 32) + lo; }
gzoom13/embox
src/arch/x86/lib/cpu_performance/cpu_counter.c
C
bsd-2-clause
310
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #if HAVE_CONFIG_H # include <config.h> #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <netinet/in.h> #include <string.h> #include <infiniband/opcode.h> #include "mthca.h" #include "doorbell.h" enum { MTHCA_CQ_DOORBELL = 0x20 }; enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 }; #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) enum { MTHCA_CQ_ENTRY_OWNER_SW = 0x00, MTHCA_CQ_ENTRY_OWNER_HW = 0x80, MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe }; enum { SYNDROME_LOCAL_LENGTH_ERR = 0x01, SYNDROME_LOCAL_QP_OP_ERR = 0x02, SYNDROME_LOCAL_EEC_OP_ERR = 0x03, SYNDROME_LOCAL_PROT_ERR = 0x04, SYNDROME_WR_FLUSH_ERR = 0x05, SYNDROME_MW_BIND_ERR = 0x06, SYNDROME_BAD_RESP_ERR = 0x10, SYNDROME_LOCAL_ACCESS_ERR = 0x11, SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, SYNDROME_REMOTE_ACCESS_ERR = 0x13, SYNDROME_REMOTE_OP_ERR = 0x14, SYNDROME_RETRY_EXC_ERR = 0x15, SYNDROME_RNR_RETRY_EXC_ERR = 0x16, SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, SYNDROME_REMOTE_ABORTED_ERR = 0x22, SYNDROME_INVAL_EECN_ERR = 0x23, SYNDROME_INVAL_EEC_STATE_ERR = 0x24 }; struct mthca_cqe { uint32_t my_qpn; uint32_t my_ee; uint32_t rqpn; uint16_t sl_g_mlpath; uint16_t rlid; uint32_t imm_etype_pkey_eec; uint32_t byte_cnt; uint32_t wqe; uint8_t opcode; uint8_t is_send; uint8_t reserved; uint8_t owner; }; struct mthca_err_cqe { uint32_t my_qpn; uint32_t reserved1[3]; uint8_t syndrome; uint8_t vendor_err; uint16_t db_cnt; uint32_t reserved2; uint32_t wqe; uint8_t opcode; uint8_t reserved3[2]; uint8_t owner; }; static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) { return cq->buf.buf + entry * MTHCA_CQ_ENTRY_SIZE; } static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) { struct mthca_cqe *cqe = get_cqe(cq, i); return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; } static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) { return cqe_sw(cq, cq->cons_index & cq->ibv_cq.cqe); } static inline void set_cqe_hw(struct mthca_cqe *cqe) { VALGRIND_MAKE_MEM_UNDEFINED(cqe, sizeof *cqe); cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; } /* * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index * should be correct before calling update_cons_index(). */ static inline void update_cons_index(struct mthca_cq *cq, int incr) { uint32_t doorbell[2]; if (mthca_is_memfree(cq->ibv_cq.context)) { *cq->set_ci_db = htonl(cq->cons_index); wmb(); } else { doorbell[0] = htonl(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn); doorbell[1] = htonl(incr - 1); mthca_write64(doorbell, to_mctx(cq->ibv_cq.context), MTHCA_CQ_DOORBELL); } } static void dump_cqe(void *cqe_ptr) { uint32_t *cqe = cqe_ptr; int i; for (i = 0; i < 8; ++i) printf(" [%2x] %08x\n", i * 4, ntohl(((uint32_t *) cqe)[i])); } static int handle_error_cqe(struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ibv_wc *wc, int *free_cqe) { int err; int dbd; uint32_t new_wqe; if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { printf("local QP operation err " "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", ntohl(cqe->my_qpn), ntohl(cqe->wqe), cq->cqn, cq->cons_index); dump_cqe(cqe); } /* * For completions in error, only work request ID, status, vendor error * (and freed resource count for RD) have to be set. */ switch (cqe->syndrome) { case SYNDROME_LOCAL_LENGTH_ERR: wc->status = IBV_WC_LOC_LEN_ERR; break; case SYNDROME_LOCAL_QP_OP_ERR: wc->status = IBV_WC_LOC_QP_OP_ERR; break; case SYNDROME_LOCAL_EEC_OP_ERR: wc->status = IBV_WC_LOC_EEC_OP_ERR; break; case SYNDROME_LOCAL_PROT_ERR: wc->status = IBV_WC_LOC_PROT_ERR; break; case SYNDROME_WR_FLUSH_ERR: wc->status = IBV_WC_WR_FLUSH_ERR; break; case SYNDROME_MW_BIND_ERR: wc->status = IBV_WC_MW_BIND_ERR; break; case SYNDROME_BAD_RESP_ERR: wc->status = IBV_WC_BAD_RESP_ERR; break; case SYNDROME_LOCAL_ACCESS_ERR: wc->status = IBV_WC_LOC_ACCESS_ERR; break; case SYNDROME_REMOTE_INVAL_REQ_ERR: wc->status = IBV_WC_REM_INV_REQ_ERR; break; case SYNDROME_REMOTE_ACCESS_ERR: wc->status = IBV_WC_REM_ACCESS_ERR; break; case SYNDROME_REMOTE_OP_ERR: wc->status = IBV_WC_REM_OP_ERR; break; case SYNDROME_RETRY_EXC_ERR: wc->status = IBV_WC_RETRY_EXC_ERR; break; case SYNDROME_RNR_RETRY_EXC_ERR: wc->status = IBV_WC_RNR_RETRY_EXC_ERR; break; case SYNDROME_LOCAL_RDD_VIOL_ERR: wc->status = IBV_WC_LOC_RDD_VIOL_ERR; break; case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: wc->status = IBV_WC_REM_INV_RD_REQ_ERR; break; case SYNDROME_REMOTE_ABORTED_ERR: wc->status = IBV_WC_REM_ABORT_ERR; break; case SYNDROME_INVAL_EECN_ERR: wc->status = IBV_WC_INV_EECN_ERR; break; case SYNDROME_INVAL_EEC_STATE_ERR: wc->status = IBV_WC_INV_EEC_STATE_ERR; break; default: wc->status = IBV_WC_GENERAL_ERR; break; } wc->vendor_err = cqe->vendor_err; /* * Mem-free HCAs always generate one CQE per WQE, even in the * error case, so we don't have to check the doorbell count, etc. */ if (mthca_is_memfree(cq->ibv_cq.context)) return 0; err = mthca_free_err_wqe(qp, is_send, wqe_index, &dbd, &new_wqe); if (err) return err; /* * If we're at the end of the WQE chain, or we've used up our * doorbell count, free the CQE. Otherwise just update it for * the next poll operation. * * This doesn't apply to mem-free HCAs, which never use the * doorbell count field. In that case we always free the CQE. */ if (mthca_is_memfree(cq->ibv_cq.context) || !(new_wqe & htonl(0x3f)) || (!cqe->db_cnt && dbd)) return 0; cqe->db_cnt = htons(ntohs(cqe->db_cnt) - dbd); cqe->wqe = new_wqe; cqe->syndrome = SYNDROME_WR_FLUSH_ERR; *free_cqe = 0; return 0; } static inline int mthca_poll_one(struct mthca_cq *cq, struct mthca_qp **cur_qp, int *freed, struct ibv_wc *wc) { struct mthca_wq *wq; struct mthca_cqe *cqe; struct mthca_srq *srq; uint32_t qpn; uint32_t wqe; int wqe_index; int is_error; int is_send; int free_cqe = 1; int err = 0; cqe = next_cqe_sw(cq); if (!cqe) return CQ_EMPTY; VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe); /* * Make sure we read CQ entry contents after we've checked the * ownership bit. */ rmb(); qpn = ntohl(cqe->my_qpn); is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == MTHCA_ERROR_CQE_OPCODE_MASK; is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; if (!*cur_qp || ntohl(cqe->my_qpn) != (*cur_qp)->ibv_qp.qp_num) { /* * We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. */ *cur_qp = mthca_find_qp(to_mctx(cq->ibv_cq.context), ntohl(cqe->my_qpn)); if (!*cur_qp) { err = CQ_POLL_ERR; goto out; } } wc->qp_num = (*cur_qp)->ibv_qp.qp_num; if (is_send) { wq = &(*cur_qp)->sq; wqe_index = ((ntohl(cqe->wqe) - (*cur_qp)->send_wqe_offset) >> wq->wqe_shift); wc->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max]; } else if ((*cur_qp)->ibv_qp.srq) { srq = to_msrq((*cur_qp)->ibv_qp.srq); wqe = htonl(cqe->wqe); wq = NULL; wqe_index = wqe >> srq->wqe_shift; wc->wr_id = srq->wrid[wqe_index]; mthca_free_srq_wqe(srq, wqe_index); } else { int32_t wqe; wq = &(*cur_qp)->rq; wqe = ntohl(cqe->wqe); wqe_index = wqe >> wq->wqe_shift; /* * WQE addr == base - 1 might be reported by Sinai FW * 1.0.800 and Arbel FW 5.1.400 in receive completion * with error instead of (rq size - 1). This bug * should be fixed in later FW revisions. */ if (wqe_index < 0) wqe_index = wq->max - 1; wc->wr_id = (*cur_qp)->wrid[wqe_index]; } if (wq) { if (wq->last_comp < wqe_index) wq->tail += wqe_index - wq->last_comp; else wq->tail += wqe_index + wq->max - wq->last_comp; wq->last_comp = wqe_index; } if (is_error) { err = handle_error_cqe(cq, *cur_qp, wqe_index, is_send, (struct mthca_err_cqe *) cqe, wc, &free_cqe); goto out; } if (is_send) { wc->wc_flags = 0; switch (cqe->opcode) { case MTHCA_OPCODE_RDMA_WRITE: wc->opcode = IBV_WC_RDMA_WRITE; break; case MTHCA_OPCODE_RDMA_WRITE_IMM: wc->opcode = IBV_WC_RDMA_WRITE; wc->wc_flags |= IBV_WC_WITH_IMM; break; case MTHCA_OPCODE_SEND: wc->opcode = IBV_WC_SEND; break; case MTHCA_OPCODE_SEND_IMM: wc->opcode = IBV_WC_SEND; wc->wc_flags |= IBV_WC_WITH_IMM; break; case MTHCA_OPCODE_RDMA_READ: wc->opcode = IBV_WC_RDMA_READ; wc->byte_len = ntohl(cqe->byte_cnt); break; case MTHCA_OPCODE_ATOMIC_CS: wc->opcode = IBV_WC_COMP_SWAP; wc->byte_len = ntohl(cqe->byte_cnt); break; case MTHCA_OPCODE_ATOMIC_FA: wc->opcode = IBV_WC_FETCH_ADD; wc->byte_len = ntohl(cqe->byte_cnt); break; case MTHCA_OPCODE_BIND_MW: wc->opcode = IBV_WC_BIND_MW; break; default: /* assume it's a send completion */ wc->opcode = IBV_WC_SEND; break; } } else { wc->byte_len = ntohl(cqe->byte_cnt); switch (cqe->opcode & 0x1f) { case IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE: case IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE: wc->wc_flags = IBV_WC_WITH_IMM; wc->imm_data = cqe->imm_etype_pkey_eec; wc->opcode = IBV_WC_RECV; break; case IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: case IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: wc->wc_flags = IBV_WC_WITH_IMM; wc->imm_data = cqe->imm_etype_pkey_eec; wc->opcode = IBV_WC_RECV_RDMA_WITH_IMM; break; default: wc->wc_flags = 0; wc->opcode = IBV_WC_RECV; break; } wc->slid = ntohs(cqe->rlid); wc->sl = ntohs(cqe->sl_g_mlpath) >> 12; wc->src_qp = ntohl(cqe->rqpn) & 0xffffff; wc->dlid_path_bits = ntohs(cqe->sl_g_mlpath) & 0x7f; wc->pkey_index = ntohl(cqe->imm_etype_pkey_eec) >> 16; wc->wc_flags |= ntohs(cqe->sl_g_mlpath) & 0x80 ? IBV_WC_GRH : 0; } wc->status = IBV_WC_SUCCESS; out: if (free_cqe) { set_cqe_hw(cqe); ++(*freed); ++cq->cons_index; } return err; } int mthca_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc) { struct mthca_cq *cq = to_mcq(ibcq); struct mthca_qp *qp = NULL; int npolled; int err = CQ_OK; int freed = 0; pthread_spin_lock(&cq->lock); for (npolled = 0; npolled < ne; ++npolled) { err = mthca_poll_one(cq, &qp, &freed, wc + npolled); if (err != CQ_OK) break; } if (freed) { wmb(); update_cons_index(cq, freed); } pthread_spin_unlock(&cq->lock); return err == CQ_POLL_ERR ? err : npolled; } int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited) { uint32_t doorbell[2]; doorbell[0] = htonl((solicited ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn); doorbell[1] = 0xffffffff; mthca_write64(doorbell, to_mctx(cq->context), MTHCA_CQ_DOORBELL); return 0; } int mthca_arbel_arm_cq(struct ibv_cq *ibvcq, int solicited) { struct mthca_cq *cq = to_mcq(ibvcq); uint32_t doorbell[2]; uint32_t sn; uint32_t ci; sn = cq->arm_sn & 3; ci = htonl(cq->cons_index); doorbell[0] = ci; doorbell[1] = htonl((cq->cqn << 8) | (2 << 5) | (sn << 3) | (solicited ? 1 : 2)); mthca_write_db_rec(doorbell, cq->arm_db); /* * Make sure that the doorbell record in host memory is * written before ringing the doorbell via PCI MMIO. */ wmb(); doorbell[0] = htonl((sn << 28) | (solicited ? MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn); doorbell[1] = ci; mthca_write64(doorbell, to_mctx(ibvcq->context), MTHCA_CQ_DOORBELL); return 0; } void mthca_arbel_cq_event(struct ibv_cq *cq) { to_mcq(cq)->arm_sn++; } static inline int is_recv_cqe(struct mthca_cqe *cqe) { if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == MTHCA_ERROR_CQE_OPCODE_MASK) return !(cqe->opcode & 0x01); else return !(cqe->is_send & 0x80); } void __mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn, struct mthca_srq *srq) { struct mthca_cqe *cqe; uint32_t prod_index; int i, nfreed = 0; /* * First we need to find the current producer index, so we * know where to start cleaning from. It doesn't matter if HW * adds new entries after this loop -- the QP we're worried * about is already in RESET, so the new entries won't come * from our QP and therefore don't need to be checked. */ for (prod_index = cq->cons_index; cqe_sw(cq, prod_index & cq->ibv_cq.cqe); ++prod_index) if (prod_index == cq->cons_index + cq->ibv_cq.cqe) break; /* * Now sweep backwards through the CQ, removing CQ entries * that match our QP by copying older entries on top of them. */ while ((int) --prod_index - (int) cq->cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); if (cqe->my_qpn == htonl(qpn)) { if (srq && is_recv_cqe(cqe)) mthca_free_srq_wqe(srq, ntohl(cqe->wqe) >> srq->wqe_shift); ++nfreed; } else if (nfreed) memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe), cqe, MTHCA_CQ_ENTRY_SIZE); } if (nfreed) { for (i = 0; i < nfreed; ++i) set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibv_cq.cqe)); wmb(); cq->cons_index += nfreed; update_cons_index(cq, nfreed); } } void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn, struct mthca_srq *srq) { pthread_spin_lock(&cq->lock); __mthca_cq_clean(cq, qpn, srq); pthread_spin_unlock(&cq->lock); } void mthca_cq_resize_copy_cqes(struct mthca_cq *cq, void *buf, int old_cqe) { int i; /* * In Tavor mode, the hardware keeps the consumer and producer * indices mod the CQ size. Since we might be making the CQ * bigger, we need to deal with the case where the producer * index wrapped around before the CQ was resized. */ if (!mthca_is_memfree(cq->ibv_cq.context) && old_cqe < cq->ibv_cq.cqe) { cq->cons_index &= old_cqe; if (cqe_sw(cq, old_cqe)) cq->cons_index -= old_cqe + 1; } for (i = cq->cons_index; cqe_sw(cq, i & old_cqe); ++i) memcpy(buf + (i & cq->ibv_cq.cqe) * MTHCA_CQ_ENTRY_SIZE, get_cqe(cq, i & old_cqe), MTHCA_CQ_ENTRY_SIZE); } int mthca_alloc_cq_buf(struct mthca_device *dev, struct mthca_buf *buf, int nent) { int i; if (mthca_alloc_buf(buf, align(nent * MTHCA_CQ_ENTRY_SIZE, dev->page_size), dev->page_size)) return -1; for (i = 0; i < nent; ++i) ((struct mthca_cqe *) buf->buf)[i].owner = MTHCA_CQ_ENTRY_OWNER_HW; return 0; }
dplbsd/soc2013
head/contrib/ofed/libmthca/src/cq.c
C
bsd-2-clause
16,469
// RUN: %clang_builtins %s %librt -o %t && %run %t // REQUIRES: librt_has_absvdi2 //===-- absvdi2_test.c - Test __absvdi2 -----------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file tests __absvdi2 for the compiler_rt library. // //===----------------------------------------------------------------------===// #include "int_lib.h" #include <stdio.h> #include <stdlib.h> // Returns: absolute value // Effects: aborts if abs(x) < 0 COMPILER_RT_ABI di_int __absvdi2(di_int a); int test__absvdi2(di_int a) { di_int x = __absvdi2(a); di_int expected = a; if (expected < 0) expected = -expected; if (x != expected || expected < 0) printf("error in __absvdi2(0x%llX) = %lld, expected positive %lld\n", a, x, expected); return x != expected; } int main() { // if (test__absvdi2(0x8000000000000000LL)) // should abort // return 1; if (test__absvdi2(0x0000000000000000LL)) return 1; if (test__absvdi2(0x0000000000000001LL)) return 1; if (test__absvdi2(0x0000000000000002LL)) return 1; if (test__absvdi2(0x7FFFFFFFFFFFFFFELL)) return 1; if (test__absvdi2(0x7FFFFFFFFFFFFFFFLL)) return 1; if (test__absvdi2(0x8000000000000001LL)) return 1; if (test__absvdi2(0x8000000000000002LL)) return 1; if (test__absvdi2(0xFFFFFFFFFFFFFFFELL)) return 1; if (test__absvdi2(0xFFFFFFFFFFFFFFFFLL)) return 1; int i; for (i = 0; i < 10000; ++i) if (test__absvdi2(((di_int)rand() << 32) | rand())) return 1; return 0; }
endlessm/chromium-browser
third_party/llvm/compiler-rt/test/builtins/Unit/absvdi2_test.c
C
bsd-3-clause
1,888
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery Copyright 2011 Linaro Limited This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "offsets.h" #include "ex_tables.h" #include <signal.h> #define arm_exidx_step UNW_OBJ(arm_exidx_step) static inline int arm_exidx_step (struct cursor *c) { unw_word_t old_ip, old_cfa; uint8_t buf[32]; int ret; old_ip = c->dwarf.ip; old_cfa = c->dwarf.cfa; /* mark PC unsaved */ c->dwarf.loc[UNW_ARM_R15] = DWARF_NULL_LOC; if ((ret = tdep_find_proc_info (&c->dwarf, c->dwarf.ip, 1)) < 0) return ret; if (c->dwarf.pi.format != UNW_INFO_FORMAT_ARM_EXIDX) return -UNW_ENOINFO; ret = arm_exidx_extract (&c->dwarf, buf); if (ret == -UNW_ESTOPUNWIND) return 0; else if (ret < 0) return ret; ret = arm_exidx_decode (buf, ret, &c->dwarf); if (ret < 0) return ret; if (c->dwarf.ip == old_ip && c->dwarf.cfa == old_cfa) { Dprintf ("%s: ip and cfa unchanged; stopping here (ip=0x%lx)\n", __FUNCTION__, (long) c->dwarf.ip); return -UNW_EBADFRAME; } return (c->dwarf.ip == 0) ? 0 : 1; } PROTECTED int unw_handle_signal_frame (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int ret; unw_word_t sc_addr, sp, sp_addr = c->dwarf.cfa; struct dwarf_loc sp_loc = DWARF_LOC (sp_addr, 0); if ((ret = dwarf_get (&c->dwarf, sp_loc, &sp)) < 0) return -UNW_EUNSPEC; /* Obtain signal frame type (non-RT or RT). */ ret = unw_is_signal_frame (cursor); /* Save the SP and PC to be able to return execution at this point later in time (unw_resume). */ c->sigcontext_sp = c->dwarf.cfa; c->sigcontext_pc = c->dwarf.ip; /* Since kernel version 2.6.18 the non-RT signal frame starts with a ucontext while the RT signal frame starts with a siginfo, followed by a sigframe whose first element is an ucontext. Prior 2.6.18 the non-RT signal frame starts with a sigcontext while the RT signal frame starts with two pointers followed by a siginfo and an ucontext. The first pointer points to the start of the siginfo structure and the second one to the ucontext structure. */ if (ret == 1) { /* Handle non-RT signal frames. Check if the first word on the stack is the magic number. */ if (sp == 0x5ac3c35a) { c->sigcontext_format = ARM_SCF_LINUX_SIGFRAME; sc_addr = sp_addr + LINUX_UC_MCONTEXT_OFF; } else { c->sigcontext_format = ARM_SCF_LINUX_OLD_SIGFRAME; sc_addr = sp_addr; } c->sigcontext_addr = sp_addr; } else if (ret == 2) { /* Handle RT signal frames. Check if the first word on the stack is a pointer to the siginfo structure. */ if (sp == sp_addr + 8) { c->sigcontext_format = ARM_SCF_LINUX_OLD_RT_SIGFRAME; c->sigcontext_addr = sp_addr + 8 + sizeof (siginfo_t); } else { c->sigcontext_format = ARM_SCF_LINUX_RT_SIGFRAME; c->sigcontext_addr = sp_addr + sizeof (siginfo_t); } sc_addr = c->sigcontext_addr + LINUX_UC_MCONTEXT_OFF; } else return -UNW_EUNSPEC; /* Update the dwarf cursor. Set the location of the registers to the corresponding addresses of the uc_mcontext / sigcontext structure contents. */ c->dwarf.loc[UNW_ARM_R0] = DWARF_LOC (sc_addr + LINUX_SC_R0_OFF, 0); c->dwarf.loc[UNW_ARM_R1] = DWARF_LOC (sc_addr + LINUX_SC_R1_OFF, 0); c->dwarf.loc[UNW_ARM_R2] = DWARF_LOC (sc_addr + LINUX_SC_R2_OFF, 0); c->dwarf.loc[UNW_ARM_R3] = DWARF_LOC (sc_addr + LINUX_SC_R3_OFF, 0); c->dwarf.loc[UNW_ARM_R4] = DWARF_LOC (sc_addr + LINUX_SC_R4_OFF, 0); c->dwarf.loc[UNW_ARM_R5] = DWARF_LOC (sc_addr + LINUX_SC_R5_OFF, 0); c->dwarf.loc[UNW_ARM_R6] = DWARF_LOC (sc_addr + LINUX_SC_R6_OFF, 0); c->dwarf.loc[UNW_ARM_R7] = DWARF_LOC (sc_addr + LINUX_SC_R7_OFF, 0); c->dwarf.loc[UNW_ARM_R8] = DWARF_LOC (sc_addr + LINUX_SC_R8_OFF, 0); c->dwarf.loc[UNW_ARM_R9] = DWARF_LOC (sc_addr + LINUX_SC_R9_OFF, 0); c->dwarf.loc[UNW_ARM_R10] = DWARF_LOC (sc_addr + LINUX_SC_R10_OFF, 0); c->dwarf.loc[UNW_ARM_R11] = DWARF_LOC (sc_addr + LINUX_SC_FP_OFF, 0); c->dwarf.loc[UNW_ARM_R12] = DWARF_LOC (sc_addr + LINUX_SC_IP_OFF, 0); c->dwarf.loc[UNW_ARM_R13] = DWARF_LOC (sc_addr + LINUX_SC_SP_OFF, 0); c->dwarf.loc[UNW_ARM_R14] = DWARF_LOC (sc_addr + LINUX_SC_LR_OFF, 0); c->dwarf.loc[UNW_ARM_R15] = DWARF_LOC (sc_addr + LINUX_SC_PC_OFF, 0); /* Set SP/CFA and PC/IP. */ dwarf_get (&c->dwarf, c->dwarf.loc[UNW_ARM_R13], &c->dwarf.cfa); dwarf_get (&c->dwarf, c->dwarf.loc[UNW_ARM_R15], &c->dwarf.ip); return 1; } PROTECTED int unw_step (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int ret = -UNW_EUNSPEC; Debug (1, "(cursor=%p)\n", c); /* Check if this is a signal frame. */ if (unw_is_signal_frame (cursor)) return unw_handle_signal_frame (cursor); #ifdef CONFIG_DEBUG_FRAME /* First, try DWARF-based unwinding. */ if (UNW_TRY_METHOD(UNW_ARM_METHOD_DWARF)) { ret = dwarf_step (&c->dwarf); Debug(1, "dwarf_step()=%d\n", ret); if (likely (ret > 0)) return 1; else if (unlikely (ret == -UNW_ESTOPUNWIND)) return ret; if (ret < 0 && ret != -UNW_ENOINFO) { Debug (2, "returning %d\n", ret); return ret; } } #endif /* CONFIG_DEBUG_FRAME */ /* Next, try extbl-based unwinding. */ if (UNW_TRY_METHOD (UNW_ARM_METHOD_EXIDX)) { ret = arm_exidx_step (c); if (ret > 0) return 1; if (ret == -UNW_ESTOPUNWIND || ret == 0) return ret; } /* Fall back on APCS frame parsing. Note: This won't work in case the ARM EABI is used. */ if (unlikely (ret < 0)) { if (UNW_TRY_METHOD(UNW_ARM_METHOD_FRAME)) { ret = UNW_ESUCCESS; /* DWARF unwinding failed, try to follow APCS/optimized APCS frame chain */ unw_word_t instr, i; Debug (13, "dwarf_step() failed (ret=%d), trying frame-chain\n", ret); dwarf_loc_t ip_loc, fp_loc; unw_word_t frame; /* Mark all registers unsaved, since we don't know where they are saved (if at all), except for the EBP and EIP. */ if (dwarf_get(&c->dwarf, c->dwarf.loc[UNW_ARM_R11], &frame) < 0) { return 0; } for (i = 0; i < DWARF_NUM_PRESERVED_REGS; ++i) { c->dwarf.loc[i] = DWARF_NULL_LOC; } if (frame) { if (dwarf_get(&c->dwarf, DWARF_LOC(frame, 0), &instr) < 0) { return 0; } instr -= 8; if (dwarf_get(&c->dwarf, DWARF_LOC(instr, 0), &instr) < 0) { return 0; } if ((instr & 0xFFFFD800) == 0xE92DD800) { /* Standard APCS frame. */ ip_loc = DWARF_LOC(frame - 4, 0); fp_loc = DWARF_LOC(frame - 12, 0); } else { /* Codesourcery optimized normal frame. */ ip_loc = DWARF_LOC(frame, 0); fp_loc = DWARF_LOC(frame - 4, 0); } if (dwarf_get(&c->dwarf, ip_loc, &c->dwarf.ip) < 0) { return 0; } c->dwarf.loc[UNW_ARM_R12] = ip_loc; c->dwarf.loc[UNW_ARM_R11] = fp_loc; Debug(15, "ip=%lx\n", c->dwarf.ip); } else { ret = -UNW_ENOINFO; } } } return ret == -UNW_ENOINFO ? 0 : 1; }
adsharma/libunwind
src/arm/Gstep.c
C
mit
8,679
#if !defined(__mips_soft_float) && __mips >= 2 #include <math.h> float sqrtf(float x) { float r; __asm__("sqrt.s %0,%1" : "=f"(r) : "f"(x)); return r; } #else #include "../sqrtf.c" #endif
heatd/Onyx
musl/src/math/mips/sqrtf.c
C
mit
196
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2001-2020 Intel Corporation */ /* * 82542 Gigabit Ethernet Controller */ #include "e1000_api.h" STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw); STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw); STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw); STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw); STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw); STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw); STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw); STATIC s32 e1000_led_on_82542(struct e1000_hw *hw); STATIC s32 e1000_led_off_82542(struct e1000_hw *hw); STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw); /** * e1000_init_phy_params_82542 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_init_phy_params_82542"); phy->type = e1000_phy_none; return ret_val; } /** * e1000_init_nvm_params_82542 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; DEBUGFUNC("e1000_init_nvm_params_82542"); nvm->address_bits = 6; nvm->delay_usec = 50; nvm->opcode_bits = 3; nvm->type = e1000_nvm_eeprom_microwire; nvm->word_size = 64; /* Function Pointers */ nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.release = e1000_stop_nvm; nvm->ops.write = e1000_write_nvm_microwire; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; return E1000_SUCCESS; } /** * e1000_init_mac_params_82542 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_82542"); /* Set media type */ hw->phy.media_type = e1000_media_type_fiber; /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_82542; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82542; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82542; /* link setup */ mac->ops.setup_link = e1000_setup_link_82542; /* phy/fiber/serdes setup */ mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_generic; /* check for link */ mac->ops.check_for_link = e1000_check_for_fiber_link_generic; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82542; /* set RAR */ mac->ops.rar_set = e1000_rar_set_82542; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_82542; mac->ops.led_off = e1000_led_off_82542; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542; /* link info */ mac->ops.get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic; return E1000_SUCCESS; } /** * e1000_init_function_pointers_82542 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82542"); hw->mac.ops.init_params = e1000_init_mac_params_82542; hw->nvm.ops.init_params = e1000_init_nvm_params_82542; hw->phy.ops.init_params = e1000_init_phy_params_82542; } /** * e1000_get_bus_info_82542 - Obtain bus information for adapter * @hw: pointer to the HW structure * * This will obtain information about the HW bus for which the * adapter is attached and stores it in the hw structure. **/ STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_get_bus_info_82542"); hw->bus.type = e1000_bus_type_pci; hw->bus.speed = e1000_bus_speed_unknown; hw->bus.width = e1000_bus_width_unknown; return E1000_SUCCESS; } /** * e1000_reset_hw_82542 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val = E1000_SUCCESS; u32 ctrl; DEBUGFUNC("e1000_reset_hw_82542"); if (hw->revision_id == E1000_REVISION_2) { DEBUGOUT("Disabling MWI on 82542 rev 2\n"); e1000_pci_clear_mwi(hw); } DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); /* * Delay to allow any outstanding PCI transactions to complete before * resetting the device */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); hw->nvm.ops.reload(hw); msec_delay(2); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); if (hw->revision_id == E1000_REVISION_2) { if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } return ret_val; } /** * e1000_init_hw_82542 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542; s32 ret_val = E1000_SUCCESS; u32 ctrl; u16 i; DEBUGFUNC("e1000_init_hw_82542"); /* Disabling VLAN filtering */ E1000_WRITE_REG(hw, E1000_VET, 0); mac->ops.clear_vfta(hw); /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ if (hw->revision_id == E1000_REVISION_2) { DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); e1000_pci_clear_mwi(hw); E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); E1000_WRITE_FLUSH(hw); msec_delay(5); } /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ if (hw->revision_id == E1000_REVISION_2) { E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_FLUSH(hw); msec_delay(1); if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * Set the PCI priority bit correctly in the CTRL register. This * determines if the adapter gives priority to receives, or if it * gives equal priority to transmits and receives. */ if (dev_spec->dma_fairness) { ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); } /* Setup link and flow control */ ret_val = e1000_setup_link_82542(hw); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82542(hw); return ret_val; } /** * e1000_setup_link_82542 - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; DEBUGFUNC("e1000_setup_link_82542"); ret_val = e1000_set_default_fc_generic(hw); if (ret_val) goto out; hw->fc.requested_mode &= ~e1000_fc_tx_pause; if (mac->report_tx_early) hw->fc.requested_mode &= ~e1000_fc_rx_pause; /* * Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary subroutine to configure the link. */ ret_val = mac->ops.setup_physical_interface(hw); if (ret_val) goto out; /* * Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. */ DEBUGOUT("Initializing Flow Control address, type and timer regs\n"); E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); ret_val = e1000_set_fc_watermarks_generic(hw); out: return ret_val; } /** * e1000_led_on_82542 - Turn on SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED on. **/ STATIC s32 e1000_led_on_82542(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_on_82542"); ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_led_off_82542 - Turn off SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED off. **/ STATIC s32 e1000_led_off_82542(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_off_82542"); ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_rar_set_82542 - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. **/ STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; DEBUGFUNC("e1000_rar_set_82542"); /* * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); return E1000_SUCCESS; } /** * e1000_translate_register_82542 - Translate the proper register offset * @reg: e1000 register to be read * * Registers in 82542 are located in different offsets than other adapters * even though they function in the same manner. This function takes in * the name of the register to read and returns the correct offset for * 82542 silicon. **/ u32 e1000_translate_register_82542(u32 reg) { /* * Some of the 82542 registers are located at different * offsets than they are in newer adapters. * Despite the difference in location, the registers * function in the same manner. */ switch (reg) { case E1000_RA: reg = 0x00040; break; case E1000_RDTR: reg = 0x00108; break; case E1000_RDBAL(0): reg = 0x00110; break; case E1000_RDBAH(0): reg = 0x00114; break; case E1000_RDLEN(0): reg = 0x00118; break; case E1000_RDH(0): reg = 0x00120; break; case E1000_RDT(0): reg = 0x00128; break; case E1000_RDBAL(1): reg = 0x00138; break; case E1000_RDBAH(1): reg = 0x0013C; break; case E1000_RDLEN(1): reg = 0x00140; break; case E1000_RDH(1): reg = 0x00148; break; case E1000_RDT(1): reg = 0x00150; break; case E1000_FCRTH: reg = 0x00160; break; case E1000_FCRTL: reg = 0x00168; break; case E1000_MTA: reg = 0x00200; break; case E1000_TDBAL(0): reg = 0x00420; break; case E1000_TDBAH(0): reg = 0x00424; break; case E1000_TDLEN(0): reg = 0x00428; break; case E1000_TDH(0): reg = 0x00430; break; case E1000_TDT(0): reg = 0x00438; break; case E1000_TIDV: reg = 0x00440; break; case E1000_VFTA: reg = 0x00600; break; case E1000_TDFH: reg = 0x08010; break; case E1000_TDFT: reg = 0x08018; break; default: break; } return reg; } /** * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82542"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); } /** * e1000_read_mac_addr_82542 - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. **/ s32 e1000_read_mac_addr_82542(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 offset, nvm_data, i; DEBUGFUNC("e1000_read_mac_addr"); for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = i >> 1; ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); } for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; out: return ret_val; }
john-mcnamara-intel/dpdk
drivers/net/e1000/base/e1000_82542.c
C
mit
14,510
/* * linux/kernel/printk.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Modified to make sys_syslog() more flexible: added commands to * return the last 4k of kernel messages, regardless of whether * they've been read or not. Added option to suppress kernel printk's * to the console. Added hook for sending the console messages * elsewhere, in preparation for a serial line console (someday). * Ted Ts'o, 2/11/93. * Modified for sysctl support, 1/8/97, Chris Horn. * Fixed SMP synchronization, 08/08/99, Manfred Spraul * manfred@colorfullife.com * Rewrote bits to get rid of console_lock * 01Mar01 Andrew Morton */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/console.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/nmi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> /* For in_interrupt() */ #include <linux/delay.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/bootmem.h> #include <linux/syscalls.h> #include <asm/uaccess.h> /* * Architectures can override it: */ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) { } #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */ /* We show everything that is MORE important than this.. */ #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ DECLARE_WAIT_QUEUE_HEAD(log_wait); int console_printk[4] = { DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ }; /* * Low level drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. */ int oops_in_progress; EXPORT_SYMBOL(oops_in_progress); /* * console_sem protects the console_drivers list, and also * provides serialisation for access to the entire console * driver system. */ static DECLARE_MUTEX(console_sem); struct console *console_drivers; EXPORT_SYMBOL_GPL(console_drivers); /* * This is used for debugging the mess that is the VT code by * keeping track if we have the console semaphore held. It's * definitely not the perfect debug tool (we don't know if _WE_ * hold it are racing, but it helps tracking those weird code * path in the console code where we end up in places I want * locked without the console sempahore held */ static int console_locked, console_suspended; /* * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars * It is also used in interesting ways to provide interlocking in * release_console_sem(). */ static DEFINE_SPINLOCK(logbuf_lock); #define LOG_BUF_MASK (log_buf_len-1) #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) /* * The indices into log_buf are not constrained to log_buf_len - they * must be masked before subscripting */ static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */ static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */ static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */ /* * Array of consoles built from command line options (console=) */ struct console_cmdline { char name[8]; /* Name of the driver */ int index; /* Minor dev. to use */ char *options; /* Options for the driver */ #ifdef CONFIG_A11Y_BRAILLE_CONSOLE char *brl_options; /* Options for braille driver */ #endif }; #define MAX_CMDLINECONSOLES 8 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; static int selected_console = -1; static int preferred_console = -1; int console_set_on_cmdline; EXPORT_SYMBOL(console_set_on_cmdline); /* Flag: console code may call schedule() */ static int console_may_schedule; #ifdef CONFIG_PRINTK static char __log_buf[__LOG_BUF_LEN]; static char *log_buf = __log_buf; static int log_buf_len = __LOG_BUF_LEN; static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ static int __init log_buf_len_setup(char *str) { unsigned size = memparse(str, &str); unsigned long flags; if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) { unsigned start, dest_idx, offset; char *new_log_buf; new_log_buf = alloc_bootmem(size); if (!new_log_buf) { printk(KERN_WARNING "log_buf_len: allocation failed\n"); goto out; } spin_lock_irqsave(&logbuf_lock, flags); log_buf_len = size; log_buf = new_log_buf; offset = start = min(con_start, log_start); dest_idx = 0; while (start != log_end) { log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)]; start++; dest_idx++; } log_start -= offset; con_start -= offset; log_end -= offset; spin_unlock_irqrestore(&logbuf_lock, flags); printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len); } out: return 1; } __setup("log_buf_len=", log_buf_len_setup); #ifdef CONFIG_BOOT_PRINTK_DELAY static unsigned int boot_delay; /* msecs delay after each printk during bootup */ static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */ static int __init boot_delay_setup(char *str) { unsigned long lpj; unsigned long long loops_per_msec; lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ loops_per_msec = (unsigned long long)lpj / 1000 * HZ; get_option(&str, &boot_delay); if (boot_delay > 10 * 1000) boot_delay = 0; printk_delay_msec = loops_per_msec; printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, " "HZ: %d, printk_delay_msec: %llu\n", boot_delay, preset_lpj, lpj, HZ, printk_delay_msec); return 1; } __setup("boot_delay=", boot_delay_setup); static void boot_delay_msec(void) { unsigned long long k; unsigned long timeout; if (boot_delay == 0 || system_state != SYSTEM_BOOTING) return; k = (unsigned long long)printk_delay_msec * boot_delay; timeout = jiffies + msecs_to_jiffies(boot_delay); while (k) { k--; cpu_relax(); /* * use (volatile) jiffies to prevent * compiler reduction; loop termination via jiffies * is secondary and may or may not happen. */ if (time_after(jiffies, timeout)) break; touch_nmi_watchdog(); } } #else static inline void boot_delay_msec(void) { } #endif /* * Commands to do_syslog: * * 0 -- Close the log. Currently a NOP. * 1 -- Open the log. Currently a NOP. * 2 -- Read from the log. * 3 -- Read all messages remaining in the ring buffer. * 4 -- Read and clear all messages remaining in the ring buffer * 5 -- Clear ring buffer. * 6 -- Disable printk's to console * 7 -- Enable printk's to console * 8 -- Set level of messages printed to console * 9 -- Return number of unread characters in the log buffer * 10 -- Return size of the log buffer */ int do_syslog(int type, char __user *buf, int len) { unsigned i, j, limit, count; int do_clear = 0; char c; int error = 0; error = security_syslog(type); if (error) return error; switch (type) { case 0: /* Close log */ break; case 1: /* Open log */ break; case 2: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; case 4: /* Read/clear last kernel messages */ do_clear = 1; /* FALL THRU */ case 3: /* Read last kernel messages */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; case 5: /* Clear ring buffer */ logged_chars = 0; break; case 6: /* Disable logging to console */ console_loglevel = minimum_console_loglevel; break; case 7: /* Enable logging to console */ console_loglevel = default_console_loglevel; break; case 8: /* Set level of messages printed to console */ error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; error = 0; break; case 9: /* Number of chars in the log buffer */ error = log_end - log_start; break; case 10: /* Size of the log buffer */ error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; } SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) { return do_syslog(type, buf, len); } /* * Call the console drivers on a range of log_buf */ static void __call_console_drivers(unsigned start, unsigned end) { struct console *con; for (con = console_drivers; con; con = con->next) { if ((con->flags & CON_ENABLED) && con->write && (cpu_online(smp_processor_id()) || (con->flags & CON_ANYTIME))) con->write(con, &LOG_BUF(start), end - start); } } static int __read_mostly ignore_loglevel; static int __init ignore_loglevel_setup(char *str) { ignore_loglevel = 1; printk(KERN_INFO "debug: ignoring loglevel setting.\n"); return 0; } early_param("ignore_loglevel", ignore_loglevel_setup); /* * Write out chars from start to end - 1 inclusive */ static void _call_console_drivers(unsigned start, unsigned end, int msg_log_level) { if ((msg_log_level < console_loglevel || ignore_loglevel) && console_drivers && start != end) { if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) { /* wrapped write */ __call_console_drivers(start & LOG_BUF_MASK, log_buf_len); __call_console_drivers(0, end & LOG_BUF_MASK); } else { __call_console_drivers(start, end); } } } /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. * The console_sem must be held. */ static void call_console_drivers(unsigned start, unsigned end) { unsigned cur_index, start_print; static int msg_level = -1; BUG_ON(((int)(start - end)) > 0); cur_index = start; start_print = start; while (cur_index != end) { if (msg_level < 0 && ((end - cur_index) > 2) && LOG_BUF(cur_index + 0) == '<' && LOG_BUF(cur_index + 1) >= '0' && LOG_BUF(cur_index + 1) <= '7' && LOG_BUF(cur_index + 2) == '>') { msg_level = LOG_BUF(cur_index + 1) - '0'; cur_index += 3; start_print = cur_index; } while (cur_index != end) { char c = LOG_BUF(cur_index); cur_index++; if (c == '\n') { if (msg_level < 0) { /* * printk() has already given us loglevel tags in * the buffer. This code is here in case the * log buffer has wrapped right round and scribbled * on those tags */ msg_level = default_message_loglevel; } _call_console_drivers(start_print, cur_index, msg_level); msg_level = -1; start_print = cur_index; break; } } } _call_console_drivers(start_print, end, msg_level); } static void emit_log_char(char c) { LOG_BUF(log_end) = c; log_end++; if (log_end - log_start > log_buf_len) log_start = log_end - log_buf_len; if (log_end - con_start > log_buf_len) con_start = log_end - log_buf_len; if (logged_chars < log_buf_len) logged_chars++; } /* * Zap console related locks when oopsing. Only zap at most once * every 10 seconds, to leave time for slow consoles to print a * full oops. */ static void zap_locks(void) { static unsigned long oops_timestamp; if (time_after_eq(jiffies, oops_timestamp) && !time_after(jiffies, oops_timestamp + 30 * HZ)) return; oops_timestamp = jiffies; /* If a crash is occurring, make sure we can't deadlock */ spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ init_MUTEX(&console_sem); } #if defined(CONFIG_PRINTK_TIME) static int printk_time = 1; #else static int printk_time = 0; #endif module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); /* Check if we have any console registered that can be called early in boot. */ static int have_callable_console(void) { struct console *con; for (con = console_drivers; con; con = con->next) if (con->flags & CON_ANYTIME) return 1; return 0; } /** * printk - print a kernel message * @fmt: format string * * This is printk(). It can be called from any context. We want it to work. * * We try to grab the console_sem. If we succeed, it's easy - we log the output and * call the console drivers. If we fail to get the semaphore we place the output * into the log buffer and return. The current holder of the console_sem will * notice the new output in release_console_sem() and will send it to the * consoles before releasing the semaphore. * * One effect of this deferred printing is that code which calls printk() and * then changes console_loglevel may break. This is because console_loglevel * is inspected when the actual printing occurs. * * See also: * printf(3) * * See the vsnprintf() documentation for format string extensions over C99. */ asmlinkage int printk(const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk(fmt, args); va_end(args); return r; } /* cpu currently holding logbuf_lock */ static volatile unsigned int printk_cpu = UINT_MAX; /* * Can we actually use the console at this time on this cpu? * * Console drivers may assume that per-cpu resources have * been allocated. So unless they're explicitly marked as * being able to cope (CON_ANYTIME) don't call them until * this CPU is officially up. */ static inline int can_use_console(unsigned int cpu) { return cpu_online(cpu) || have_callable_console(); } /* * Try to get console ownership to actually show the kernel * messages from a 'printk'. Return true (and with the * console_semaphore held, and 'console_locked' set) if it * is successful, false otherwise. * * This gets called with the 'logbuf_lock' spinlock held and * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ static int acquire_console_semaphore_for_printk(unsigned int cpu) { int retval = 0; if (!try_acquire_console_sem()) { retval = 1; /* * If we can't use the console, we need to release * the console semaphore by hand to avoid flushing * the buffer. We need to hold the console semaphore * in order to do this test safely. */ if (!can_use_console(cpu)) { console_locked = 0; up(&console_sem); retval = 0; } } printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); return retval; } static const char recursion_bug_msg [] = KERN_CRIT "BUG: recent printk recursion!\n"; static int recursion_bug; static int new_text_line = 1; static char printk_buf[1024]; asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; boot_delay_msec(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = strlen(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (new_text_line) { /* If a token, set current_log_level and skip over */ if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' && p[2] == '>') { current_log_level = p[1] - '0'; p += 3; printed_len -= 3; } /* Always output the token */ emit_log_char('<'); emit_log_char(current_log_level + '0'); emit_log_char('>'); printed_len += 3; new_text_line = 0; if (printk_time) { /* Follow the token with the time */ char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } if (!*p) break; } emit_log_char(*p); if (*p == '\n') new_text_line = 1; } /* * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, * etc). * * The acquire_console_semaphore_for_printk() function * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ if (acquire_console_semaphore_for_printk(this_cpu)) release_console_sem(); lockdep_on(); out_restore_irqs: raw_local_irq_restore(flags); preempt_enable(); return printed_len; } EXPORT_SYMBOL(printk); EXPORT_SYMBOL(vprintk); #else static void call_console_drivers(unsigned start, unsigned end) { } #endif static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { struct console_cmdline *c; int i; /* * See if this tty is not yet registered, and * if we have a slot free. */ for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) if (strcmp(console_cmdline[i].name, name) == 0 && console_cmdline[i].index == idx) { if (!brl_options) selected_console = i; return 0; } if (i == MAX_CMDLINECONSOLES) return -E2BIG; if (!brl_options) selected_console = i; c = &console_cmdline[i]; strlcpy(c->name, name, sizeof(c->name)); c->options = options; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE c->brl_options = brl_options; #endif c->index = idx; return 0; } /* * Set up a list of consoles. Called from init/main.c */ static int __init console_setup(char *str) { char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */ char *s, *options, *brl_options = NULL; int idx; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (!memcmp(str, "brl,", 4)) { brl_options = ""; str += 4; } else if (!memcmp(str, "brl=", 4)) { brl_options = str + 4; str = strchr(brl_options, ','); if (!str) { printk(KERN_ERR "need port name after brl=\n"); return 1; } *(str++) = 0; } #endif /* * Decode str into name, index, options. */ if (str[0] >= '0' && str[0] <= '9') { strcpy(buf, "ttyS"); strncpy(buf + 4, str, sizeof(buf) - 5); } else { strncpy(buf, str, sizeof(buf) - 1); } buf[sizeof(buf) - 1] = 0; if ((options = strchr(str, ',')) != NULL) *(options++) = 0; #ifdef __sparc__ if (!strcmp(str, "ttya")) strcpy(buf, "ttyS0"); if (!strcmp(str, "ttyb")) strcpy(buf, "ttyS1"); #endif for (s = buf; *s; s++) if ((*s >= '0' && *s <= '9') || *s == ',') break; idx = simple_strtoul(s, NULL, 10); *s = 0; __add_preferred_console(buf, idx, options, brl_options); console_set_on_cmdline = 1; return 1; } __setup("console=", console_setup); /** * add_preferred_console - add a device to the list of preferred consoles. * @name: device name * @idx: device index * @options: options for this console * * The last preferred console added will be used for kernel messages * and stdin/out/err for init. Normally this is used by console_setup * above to handle user-supplied console arguments; however it can also * be used by arch-specific code either to override the user or more * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ int add_preferred_console(char *name, int idx, char *options) { return __add_preferred_console(name, idx, options, NULL); } int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) { struct console_cmdline *c; int i; for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) if (strcmp(console_cmdline[i].name, name) == 0 && console_cmdline[i].index == idx) { c = &console_cmdline[i]; strlcpy(c->name, name_new, sizeof(c->name)); c->name[sizeof(c->name) - 1] = 0; c->options = options; c->index = idx_new; return i; } /* not found */ return -1; } int console_suspend_enabled = 1; EXPORT_SYMBOL(console_suspend_enabled); static int __init console_suspend_disable(char *str) { console_suspend_enabled = 0; return 1; } __setup("no_console_suspend", console_suspend_disable); /** * suspend_console - suspend the console subsystem * * This disables printk() while we go into suspend states */ void suspend_console(void) { if (!console_suspend_enabled) return; printk("Suspending console(s) (use no_console_suspend to debug)\n"); acquire_console_sem(); console_suspended = 1; up(&console_sem); } void resume_console(void) { if (!console_suspend_enabled) return; down(&console_sem); console_suspended = 0; release_console_sem(); } /** * acquire_console_sem - lock the console system for exclusive use. * * Acquires a semaphore which guarantees that the caller has * exclusive access to the console system and the console_drivers list. * * Can sleep, returns nothing. */ void acquire_console_sem(void) { BUG_ON(in_interrupt()); down(&console_sem); if (console_suspended) return; console_locked = 1; console_may_schedule = 1; } EXPORT_SYMBOL(acquire_console_sem); int try_acquire_console_sem(void) { if (down_trylock(&console_sem)) return -1; if (console_suspended) { up(&console_sem); return -1; } console_locked = 1; console_may_schedule = 0; return 0; } EXPORT_SYMBOL(try_acquire_console_sem); int is_console_locked(void) { return console_locked; } static DEFINE_PER_CPU(int, printk_pending); void printk_tick(void) { if (__get_cpu_var(printk_pending)) { __get_cpu_var(printk_pending) = 0; wake_up_interruptible(&log_wait); } } int printk_needs_cpu(int cpu) { return per_cpu(printk_pending, cpu); } void wake_up_klogd(void) { if (waitqueue_active(&log_wait)) __raw_get_cpu_var(printk_pending) = 1; } /** * release_console_sem - unlock the console system * * Releases the semaphore which the caller holds on the console system * and the console driver list. * * While the semaphore was held, console output may have been buffered * by printk(). If this is the case, release_console_sem() emits * the output prior to releasing the semaphore. * * If there is output waiting for klogd, we wake it up. * * release_console_sem() may be called from any context. */ void release_console_sem(void) { unsigned long flags; unsigned _con_start, _log_end; unsigned wake_klogd = 0; if (console_suspended) { up(&console_sem); return; } console_may_schedule = 0; for ( ; ; ) { spin_lock_irqsave(&logbuf_lock, flags); wake_klogd |= log_start - log_end; if (con_start == log_end) break; /* Nothing to print */ _con_start = con_start; _log_end = log_end; con_start = log_end; /* Flush */ spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ call_console_drivers(_con_start, _log_end); start_critical_timings(); local_irq_restore(flags); } console_locked = 0; up(&console_sem); spin_unlock_irqrestore(&logbuf_lock, flags); if (wake_klogd) wake_up_klogd(); } EXPORT_SYMBOL(release_console_sem); /** * console_conditional_schedule - yield the CPU if required * * If the console code is currently allowed to sleep, and * if this CPU should yield the CPU to another task, do * so here. * * Must be called within acquire_console_sem(). */ void __sched console_conditional_schedule(void) { if (console_may_schedule) cond_resched(); } EXPORT_SYMBOL(console_conditional_schedule); void console_print(const char *s) { printk(KERN_EMERG "%s", s); } EXPORT_SYMBOL(console_print); void console_unblank(void) { struct console *c; /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. */ if (oops_in_progress) { if (down_trylock(&console_sem) != 0) return; } else acquire_console_sem(); console_locked = 1; console_may_schedule = 0; for (c = console_drivers; c != NULL; c = c->next) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); release_console_sem(); } /* * Return the console tty driver structure and its associated index */ struct tty_driver *console_device(int *index) { struct console *c; struct tty_driver *driver = NULL; acquire_console_sem(); for (c = console_drivers; c != NULL; c = c->next) { if (!c->device) continue; driver = c->device(c, index); if (driver) break; } release_console_sem(); return driver; } /* * Prevent further output on the passed console device so that (for example) * serial drivers can disable console output before suspending a port, and can * re-enable output afterwards. */ void console_stop(struct console *console) { acquire_console_sem(); console->flags &= ~CON_ENABLED; release_console_sem(); } EXPORT_SYMBOL(console_stop); void console_start(struct console *console) { acquire_console_sem(); console->flags |= CON_ENABLED; release_console_sem(); } EXPORT_SYMBOL(console_start); /* * The console driver calls this routine during kernel initialization * to register the console printing procedure with printk() and to * print any messages that were printed by the kernel before the * console driver was initialized. */ void register_console(struct console *console) { int i; unsigned long flags; struct console *bootconsole = NULL; if (console_drivers) { if (console->flags & CON_BOOT) return; if (console_drivers->flags & CON_BOOT) bootconsole = console_drivers; } if (preferred_console < 0 || bootconsole || !console_drivers) preferred_console = selected_console; if (console->early_setup) console->early_setup(); /* * See if we want to use this console driver. If we * didn't select a console we take the first one * that registers here. */ if (preferred_console < 0) { if (console->index < 0) console->index = 0; if (console->setup == NULL || console->setup(console, NULL) == 0) { console->flags |= CON_ENABLED; if (console->device) { console->flags |= CON_CONSDEV; preferred_console = 0; } } } /* * See if this console matches one we selected on * the command line. */ for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) { if (strcmp(console_cmdline[i].name, console->name) != 0) continue; if (console->index >= 0 && console->index != console_cmdline[i].index) continue; if (console->index < 0) console->index = console_cmdline[i].index; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (console_cmdline[i].brl_options) { console->flags |= CON_BRL; braille_register_console(console, console_cmdline[i].index, console_cmdline[i].options, console_cmdline[i].brl_options); return; } #endif if (console->setup && console->setup(console, console_cmdline[i].options) != 0) break; console->flags |= CON_ENABLED; console->index = console_cmdline[i].index; if (i == selected_console) { console->flags |= CON_CONSDEV; preferred_console = selected_console; } break; } if (!(console->flags & CON_ENABLED)) return; if (bootconsole && (console->flags & CON_CONSDEV)) { printk(KERN_INFO "console handover: boot [%s%d] -> real [%s%d]\n", bootconsole->name, bootconsole->index, console->name, console->index); unregister_console(bootconsole); console->flags &= ~CON_PRINTBUFFER; } else { printk(KERN_INFO "console [%s%d] enabled\n", console->name, console->index); } /* * Put this console in the list - keep the * preferred driver at the head of the list. */ acquire_console_sem(); if ((console->flags & CON_CONSDEV) || console_drivers == NULL) { console->next = console_drivers; console_drivers = console; if (console->next) console->next->flags &= ~CON_CONSDEV; } else { console->next = console_drivers->next; console_drivers->next = console; } if (console->flags & CON_PRINTBUFFER) { /* * release_console_sem() will print out the buffered messages * for us. */ spin_lock_irqsave(&logbuf_lock, flags); con_start = log_start; spin_unlock_irqrestore(&logbuf_lock, flags); } release_console_sem(); } EXPORT_SYMBOL(register_console); int unregister_console(struct console *console) { struct console *a, *b; int res = 1; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (console->flags & CON_BRL) return braille_unregister_console(console); #endif acquire_console_sem(); if (console_drivers == console) { console_drivers=console->next; res = 0; } else if (console_drivers) { for (a=console_drivers->next, b=console_drivers ; a; b=a, a=b->next) { if (a == console) { b->next = a->next; res = 0; break; } } } /* * If this isn't the last console and it has CON_CONSDEV set, we * need to set it on the next preferred console. */ if (console_drivers != NULL && console->flags & CON_CONSDEV) console_drivers->flags |= CON_CONSDEV; release_console_sem(); return res; } EXPORT_SYMBOL(unregister_console); static int __init disable_boot_consoles(void) { if (console_drivers != NULL) { if (console_drivers->flags & CON_BOOT) { printk(KERN_INFO "turn off boot console %s%d\n", console_drivers->name, console_drivers->index); return unregister_console(console_drivers); } } return 0; } late_initcall(disable_boot_consoles); #if defined CONFIG_PRINTK /* * printk rate limiting, lifted from the networking subsystem. * * This enforces a rate limit: not more than 10 kernel messages * every 5s to make a denial-of-service attack impossible. */ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int printk_ratelimit(void) { return __ratelimit(&printk_ratelimit_state); } EXPORT_SYMBOL(printk_ratelimit); /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state * @interval_msecs: minimum interval between prints * * printk_timed_ratelimit() returns true if more than @interval_msecs * milliseconds have elapsed since the last time printk_timed_ratelimit() * returned true. */ bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msecs) { if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) { *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs); return true; } return false; } EXPORT_SYMBOL(printk_timed_ratelimit); #endif
glfernando/linux-kernel-ipc
kernel/printk.c
C
gpl-2.0
32,592
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/fs.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/gfs2_ondisk.h> #include <linux/backing-dev.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "quota.h" #include "trans.h" #include "rgrp.h" #include "super.h" #include "util.h" #include "glops.h" void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, unsigned int from, unsigned int to) { struct buffer_head *head = page_buffers(page); unsigned int bsize = head->b_size; struct buffer_head *bh; unsigned int start, end; for (bh = head, start = 0; bh != head || !start; bh = bh->b_this_page, start = end) { end = start + bsize; if (end <= from || start >= to) continue; if (gfs2_is_jdata(ip)) set_buffer_uptodate(bh); gfs2_trans_add_bh(ip->i_gl, bh, 0); } } /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode * @lblock: The block number to look up * @bh_result: The buffer head to return the result in * @create: Non-zero if we may add block to the file * * Returns: errno */ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { int error; error = gfs2_block_map(inode, lblock, bh_result, 0); if (error) return error; if (!buffer_mapped(bh_result)) return -EIO; return 0; } static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { return gfs2_block_map(inode, lblock, bh_result, 0); } /** * gfs2_writepage_common - Common bits of writepage * @page: The page to be written * @wbc: The writeback control * * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. */ static int gfs2_writepage_common(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) goto out; if (current->journal_info) goto redirty; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); goto out; } return 1; redirty: redirty_page_for_writepage(wbc, page); out: unlock_page(page); return 0; } /** * gfs2_writeback_writepage - Write page for writeback mappings * @page: The page * @wbc: The writeback control * */ static int gfs2_writeback_writepage(struct page *page, struct writeback_control *wbc) { int ret; ret = gfs2_writepage_common(page, wbc); if (ret <= 0) return ret; return nobh_writepage(page, gfs2_get_block_noalloc, wbc); } /** * gfs2_ordered_writepage - Write page for ordered data files * @page: The page to write * @wbc: The writeback control * */ static int gfs2_ordered_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); int ret; ret = gfs2_writepage_common(page, wbc); if (ret <= 0) return ret; if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1); return block_write_full_page(page, gfs2_get_block_noalloc, wbc); } /** * __gfs2_jdata_writepage - The core of jdata writepage * @page: The page to write * @wbc: The writeback control * * This is shared between writepage and writepages and implements the * core of the writepage operation. If a transaction is required then * PageChecked will have been set and the transaction will have * already been started before this is called. */ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); if (PageChecked(page)) { ClearPageChecked(page); if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); } return block_write_full_page(page, gfs2_get_block_noalloc, wbc); } /** * gfs2_jdata_writepage - Write complete page * @page: Page to write * * Returns: errno * */ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); int ret; int done_trans = 0; if (PageChecked(page)) { if (wbc->sync_mode != WB_SYNC_ALL) goto out_ignore; ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); if (ret) goto out_ignore; done_trans = 1; } ret = gfs2_writepage_common(page, wbc); if (ret > 0) ret = __gfs2_jdata_writepage(page, wbc); if (done_trans) gfs2_trans_end(sdp); return ret; out_ignore: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } /** * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: Write-back control * * For the data=writeback case we can already ignore buffer heads * and write whole extents at once. This is a big reduction in the * number of I/O requests we send and the bmap calls we make in this case. */ static int gfs2_writeback_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); } /** * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages * @mapping: The mapping * @wbc: The writeback control * @writepage: The writepage function to call for each page * @pvec: The vector of pages * @nr_pages: The number of pages to write * * Returns: non-zero if loop should terminate, zero otherwise */ static int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset = i_size & (PAGE_CACHE_SIZE-1); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; lock_page(page); if (unlikely(page->mapping != mapping)) { unlock_page(page); continue; } if (!wbc->range_cyclic && page->index > end) { ret = 1; unlock_page(page); continue; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { unlock_page(page); continue; } /* Is the page fully outside i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); continue; } ret = __gfs2_jdata_writepage(page, wbc); if (ret || (--(wbc->nr_to_write) <= 0)) ret = 1; } gfs2_trans_end(sdp); return ret; } /** * gfs2_write_cache_jdata - Like write_cache_pages but different * @mapping: The mapping to write * @wbc: The writeback control * @writepage: The writepage function to call * @data: The data to pass to writepage * * The reason that we use our own function here is that we need to * start transactions before we grab page locks. This allows us * to get the ordering right. */ static int gfs2_write_cache_jdata(struct address_space *mapping, struct writeback_control *wbc) { int ret = 0; int done = 0; struct pagevec pvec; int nr_pages; pgoff_t index; pgoff_t end; int scanned = 0; int range_whole = 0; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; scanned = 1; } retry: while (!done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { scanned = 1; ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end); if (ret) done = 1; if (ret > 0) ret = 0; pagevec_release(&pvec); cond_resched(); } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = 1; index = 0; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; return ret; } /** * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: The writeback control * */ static int gfs2_jdata_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); int ret; ret = gfs2_write_cache_jdata(mapping, wbc); if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { gfs2_log_flush(sdp, ip->i_gl); ret = gfs2_write_cache_jdata(mapping, wbc); } return ret; } /** * stuffed_readpage - Fill in a Linux page with stuffed file data * @ip: the inode * @page: the page * * Returns: errno */ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) { struct buffer_head *dibh; u64 dsize = i_size_read(&ip->i_inode); void *kaddr; int error; /* * Due to the order of unstuffing files and ->fault(), we can be * asked for a zero page in the case of a stuffed file being extended, * so we need to supply one here. It doesn't happen often. */ if (unlikely(page->index)) { zero_user(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return 0; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) return error; kaddr = kmap_atomic(page, KM_USER0); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(page); brelse(dibh); SetPageUptodate(page); return 0; } /** * __gfs2_readpage - readpage * @file: The file to read a page for * @page: The page to read * * This is the core of gfs2's readpage. Its used by the internal file * reading code as in that case we already hold the glock. Also its * called by gfs2_readpage() once the required lock has been granted. * */ static int __gfs2_readpage(void *file, struct page *page) { struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); int error; if (gfs2_is_stuffed(ip)) { error = stuffed_readpage(ip, page); unlock_page(page); } else { error = mpage_readpage(page, gfs2_block_map); } if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; return error; } /** * gfs2_readpage - read a page of a file * @file: The file to read * @page: The page of the file * * This deals with the locking required. We have to unlock and * relock the page in order to get the locking in the right * order. */ static int gfs2_readpage(struct file *file, struct page *page) { struct address_space *mapping = page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder gh; int error; unlock_page(page); gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (unlikely(error)) goto out; error = AOP_TRUNCATED_PAGE; lock_page(page); if (page->mapping == mapping && !PageUptodate(page)) error = __gfs2_readpage(file, page); else unlock_page(page); gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (error && error != AOP_TRUNCATED_PAGE) lock_page(page); return error; } /** * gfs2_internal_read - read an internal file * @ip: The gfs2 inode * @ra_state: The readahead state (or NULL for no readahead) * @buf: The buffer to fill * @pos: The file position * @size: The amount to read * */ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, char *buf, loff_t *pos, unsigned size) { struct address_space *mapping = ip->i_inode.i_mapping; unsigned long index = *pos / PAGE_CACHE_SIZE; unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); unsigned copied = 0; unsigned amt; struct page *page; void *p; do { amt = size - copied; if (offset + size > PAGE_CACHE_SIZE) amt = PAGE_CACHE_SIZE - offset; page = read_cache_page(mapping, index, __gfs2_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); p = kmap_atomic(page, KM_USER0); memcpy(buf + copied, p + offset, amt); kunmap_atomic(p, KM_USER0); mark_page_accessed(page); page_cache_release(page); copied += amt; index++; offset = 0; } while(copied < size); (*pos) += size; return size; } /** * gfs2_readpages - Read a bunch of pages at once * * Some notes: * 1. This is only for readahead, so we can simply ignore any things * which are slightly inconvenient (such as locking conflicts between * the page lock and the glock) and return having done no I/O. Its * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. * 3. mpage_readpages() does most of the heavy lifting in the common case. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. */ static int gfs2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder gh; int ret; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); ret = gfs2_glock_nq(&gh); if (unlikely(ret)) goto out_uninit; if (!gfs2_is_stuffed(ip)) ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) ret = -EIO; return ret; } /** * gfs2_write_begin - Begin to write to a file * @file: The file to write to * @mapping: The mapping in which to write * @pos: The file offset at which to start writing * @len: Length of the write * @flags: Various flags * @pagep: Pointer to return the page * @fsdata: Pointer to return fs data (unused by GFS2) * * Returns: errno */ static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; int alloc_required; int error = 0; struct gfs2_alloc *al = NULL; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } alloc_required = gfs2_write_alloc_required(ip, pos, len); if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { al = gfs2_alloc_get(ip); if (!al) { error = -ENOMEM; goto out_unlock; } error = gfs2_quota_lock_check(ip); if (error) goto out_alloc_put; al->al_requested = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) rblocks += gfs2_rg_blocks(al); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = __block_write_begin(page, from, len, gfs2_block_map); out: if (error == 0) return 0; unlock_page(page); page_cache_release(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) gfs2_trim_blocks(&ip->i_inode); goto out_trans_fail; out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_alloc_put(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ static void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; u64 fs_total, new_free; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) return; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) goto out; update_statfs(sdp, m_bh, l_bh); brelse(l_bh); out: brelse(m_bh); } /** * gfs2_stuffed_write_end - Write end for stuffed files * @inode: The inode * @dibh: The buffer_head containing the on-disk inode * @pos: The file position * @len: The length of the write * @copied: How much was actually copied by the VFS * @page: The page * * This copies the data from the page into the inode block after * the inode data structure itself. * * Returns: errno */ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, loff_t pos, unsigned len, unsigned copied, struct page *page) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); kaddr = kmap_atomic(page, KM_USER0); memcpy(buf + pos, kaddr + pos, copied); memset(kaddr + pos + copied, 0, len - copied); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); page_cache_release(page); if (copied) { if (inode->i_size < to) i_size_write(inode, to); gfs2_dinode_out(ip, di); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); ip->i_gh.gh_flags |= GL_NOCACHE; } brelse(dibh); gfs2_trans_end(sdp); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return copied; } /** * gfs2_write_end * @file: The file to write to * @mapping: The address space to write to * @pos: The file position * @len: The length of the data * @copied: * @page: The page that has been written * @fsdata: The fsdata (unused in GFS2) * * The main write_end function for GFS2. We have a separate one for * stuffed files as they are slightly different, otherwise we just * put our locking around the VFS provided functions. * * Returns: errno */ static int gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; struct gfs2_alloc *al = ip->i_alloc; unsigned int from = pos & (PAGE_CACHE_SIZE - 1); unsigned int to = from + len; int ret; BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); page_cache_release(page); goto failed; } gfs2_trans_add_bh(ip->i_gl, dibh, 1); if (gfs2_is_stuffed(ip)) return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (ret > 0) { gfs2_dinode_out(ip, dibh->b_data); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); ip->i_gh.gh_flags |= GL_NOCACHE; } brelse(dibh); failed: gfs2_trans_end(sdp); if (al) { gfs2_inplace_release(ip); gfs2_quota_unlock(ip); gfs2_alloc_put(ip); } if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; } /** * gfs2_set_page_dirty - Page dirtying function * @page: The page to dirty * * Returns: 1 if it dirtyed the page, or 0 otherwise */ static int gfs2_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_buffers(page); } /** * gfs2_bmap - Block map function * @mapping: Address space info * @lblock: The block to map * * Returns: The disk address for the block or 0 on hole or error */ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder i_gh; sector_t dblock = 0; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return 0; if (!gfs2_is_stuffed(ip)) dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); gfs2_glock_dq_uninit(&i_gh); return dblock; } static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; lock_buffer(bh); gfs2_log_lock(sdp); clear_buffer_dirty(bh); bd = bh->b_private; if (bd) { if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh)) list_del_init(&bd->bd_le.le_list); else gfs2_remove_from_journal(bh, current->journal_info, 0); } bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); gfs2_log_unlock(sdp); unlock_buffer(bh); } static void gfs2_invalidatepage(struct page *page, unsigned long offset) { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!PageLocked(page)); if (offset == 0) ClearPageChecked(page); if (!page_has_buffers(page)) goto out; bh = head = page_buffers(page); do { if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: if (offset == 0) try_to_release_page(page, 0); } /** * gfs2_ok_for_dio - check that dio is valid on this file * @ip: The inode * @rw: READ or WRITE * @offset: The offset at which we are reading or writing * * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) * 1 (to accept the i/o request) */ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) { /* * Should we return an error here? I can't see that O_DIRECT for * a stuffed file makes any sense. For now we'll silently fall * back to buffered I/O */ if (gfs2_is_stuffed(ip)) return 0; if (offset >= i_size_read(&ip->i_inode)) return 0; return 1; } static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int rv; /* * Deferred lock, even if its a write, since we do no allocation * on this path. All we need change is atime, and this lock mode * ensures that other nodes have flushed their buffered read caches * (i.e. their page cache entries for this inode). We do not, * unfortunately have the option of only flushing a range like * the VFS does. */ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); rv = gfs2_glock_nq(&gh); if (rv) return rv; rv = gfs2_ok_for_dio(ip, rw, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, gfs2_get_block_direct, NULL, NULL, 0); out: gfs2_glock_dq_m(1, &gh); gfs2_holder_uninit(&gh); return rv; } /** * gfs2_releasepage - free the metadata associated with a page * @page: the page that's being released * @gfp_mask: passed from Linux VFS, ignored by us * * Call try_to_free_buffers() if the buffers in this page can be * released. * * Returns: 0 */ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) { struct address_space *mapping = page->mapping; struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct buffer_head *bh, *head; struct gfs2_bufdata *bd; if (!page_has_buffers(page)) return 0; gfs2_log_lock(sdp); head = bh = page_buffers(page); do { if (atomic_read(&bh->b_count)) goto cannot_release; bd = bh->b_private; if (bd && bd->bd_ail) goto cannot_release; if (buffer_pinned(bh) || buffer_dirty(bh)) goto not_possible; bh = bh->b_this_page; } while(bh != head); gfs2_log_unlock(sdp); head = bh = page_buffers(page); do { gfs2_log_lock(sdp); bd = bh->b_private; if (bd) { gfs2_assert_warn(sdp, bd->bd_bh == bh); gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); if (!list_empty(&bd->bd_le.le_list)) { if (!buffer_pinned(bh)) list_del_init(&bd->bd_le.le_list); else bd = NULL; } if (bd) bd->bd_bh = NULL; bh->b_private = NULL; } gfs2_log_unlock(sdp); if (bd) kmem_cache_free(gfs2_bufdata_cachep, bd); bh = bh->b_this_page; } while (bh != head); return try_to_free_buffers(page); not_possible: /* Should never happen */ WARN_ON(buffer_dirty(bh)); WARN_ON(buffer_pinned(bh)); cannot_release: gfs2_log_unlock(sdp); return 0; } static const struct address_space_operations gfs2_writeback_aops = { .writepage = gfs2_writeback_writepage, .writepages = gfs2_writeback_writepages, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .direct_IO = gfs2_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations gfs2_ordered_aops = { .writepage = gfs2_ordered_writepage, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .set_page_dirty = gfs2_set_page_dirty, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .direct_IO = gfs2_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations gfs2_jdata_aops = { .writepage = gfs2_jdata_writepage, .writepages = gfs2_jdata_writepages, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .set_page_dirty = gfs2_set_page_dirty, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; void gfs2_set_aops(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); if (gfs2_is_writeback(ip)) inode->i_mapping->a_ops = &gfs2_writeback_aops; else if (gfs2_is_ordered(ip)) inode->i_mapping->a_ops = &gfs2_ordered_aops; else if (gfs2_is_jdata(ip)) inode->i_mapping->a_ops = &gfs2_jdata_aops; else BUG(); }
bcnice20/android-kernel-common
fs/gfs2/aops.c
C
gpl-2.0
30,429
/* 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux. Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2000-2002 Jeff Garzik Much code comes from Donald Becker's rtl8139.c driver, versions 1.13 and older. This driver was originally based on rtl8139.c version 1.07. Header of rtl8139.c version 1.13: -----<snip>----- Written 1997-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet chips. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support and updates available at http://www.scyld.com/network/rtl8139.html Twister-tuning table provided by Kinston <shangh@realtek.com.tw>. -----<snip>----- This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Contributors: Donald Becker - he wrote the original driver, kudos to him! (but please don't e-mail him for support, this isn't his driver) Tigran Aivazian - bug fixes, skbuff free cleanup Martin Mares - suggestions for PCI cleanup David S. Miller - PCI DMA and softnet updates Ernst Gill - fixes ported from BSD driver Daniel Kobras - identified specific locations of posted MMIO write bugginess Gerard Sharp - bug fix, testing and feedback David Ford - Rx ring wrap fix Dan DeMaggio - swapped RTL8139 cards with me, and allowed me to find and fix a crucial bug on older chipsets. Donald Becker/Chris Butterworth/Marcus Westergren - Noticed various Rx packet size-related buglets. Santiago Garcia Mantinan - testing and feedback Jens David - 2.2.x kernel backports Martin Dennett - incredibly helpful insight on undocumented features of the 8139 chips Jean-Jacques Michel - bug fix Tobias Ringström - Rx interrupt status checking suggestion Andrew Morton - Clear blocked signals, avoid buffer overrun setting current->comm. Kalle Olavi Niemitalo - Wake-on-LAN ioctls Robert Kuebel - Save kernel thread from dying on any signal. Submitting bug reports: "rtl8139-diag -mmmaaavvveefN" output enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log */ #define DRV_NAME "8139too" #define DRV_VERSION "0.9.28" #include <linux/module.h> #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/completion.h> #include <linux/crc32.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/irq.h> #define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION #define PFX DRV_NAME ": " /* Default Message level */ #define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) /* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */ #ifdef CONFIG_8139TOO_PIO #define USE_IO_OPS 1 #endif /* define to 1, 2 or 3 to enable copious debugging info */ #define RTL8139_DEBUG 0 /* define to 1 to disable lightweight runtime debugging checks */ #undef RTL8139_NDEBUG #if RTL8139_DEBUG /* note: prints function name for you */ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) #else # define DPRINTK(fmt, args...) #endif #ifdef RTL8139_NDEBUG # define assert(expr) do {} while (0) #else # define assert(expr) \ if(unlikely(!(expr))) { \ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ #expr,__FILE__,__FUNCTION__,__LINE__); \ } #endif /* A few user-configurable values. */ /* media options */ #define MAX_UNITS 8 static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 32; /* bitmapped message enable number */ static int debug = -1; /* * Receive ring size * Warning: 64K ring has hardware issues and may lock up. */ #if defined(CONFIG_SH_DREAMCAST) #define RX_BUF_IDX 1 /* 16K ring */ #else #define RX_BUF_IDX 2 /* 32K ring */ #endif #define RX_BUF_LEN (8192 << RX_BUF_IDX) #define RX_BUF_PAD 16 #define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ #if RX_BUF_LEN == 65536 #define RX_BUF_TOT_LEN RX_BUF_LEN #else #define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) #endif /* Number of Tx descriptor registers. */ #define NUM_TX_DESC 4 /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ #define MAX_ETH_FRAME_SIZE 1536 /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) /* PCI Tuning Parameters Threshold is bytes transferred to chip before transmission starts. */ #define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ #define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ #define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */ /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6*HZ) enum { HAS_MII_XCVR = 0x010000, HAS_CHIP_XCVR = 0x020000, HAS_LNK_CHNG = 0x040000, }; #define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */ #define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */ #define RTL_MIN_IO_SIZE 0x80 #define RTL8139B_IO_SIZE 256 #define RTL8129_CAPS HAS_MII_XCVR #define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG typedef enum { RTL8139 = 0, RTL8129, } board_t; /* indexed by board_t, above */ static const struct { const char *name; u32 hw_flags; } board_info[] __devinitdata = { { "RealTek RTL8139", RTL8139_CAPS }, { "RealTek RTL8129", RTL8129_CAPS }, }; static struct pci_device_id rtl8139_pci_tbl[] = { {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, #ifdef CONFIG_SH_SECUREEDGE5410 /* Bogus 8139 silicon reports 8129 without external PROM :-( */ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, #endif #ifdef CONFIG_8139TOO_8129 {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 }, #endif /* some crazy cards report invalid vendor ids like * 0x0001 here. The other ids are valid and constant, * so we simply don't match on the main vendor id. */ {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 }, {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 }, {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 }, {0,} }; MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl); static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "early_rx" }, { "tx_buf_mapped" }, { "tx_timeouts" }, { "rx_lost_in_ring" }, }; /* The rest of these values should never change. */ /* Symbolic offsets to registers. */ enum RTL8139_registers { MAC0 = 0, /* Ethernet hardware address. */ MAR0 = 8, /* Multicast filter. */ TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ RxBuf = 0x30, ChipCmd = 0x37, RxBufPtr = 0x38, RxBufAddr = 0x3A, IntrMask = 0x3C, IntrStatus = 0x3E, TxConfig = 0x40, RxConfig = 0x44, Timer = 0x48, /* A general-purpose counter. */ RxMissed = 0x4C, /* 24 bits valid, write clears. */ Cfg9346 = 0x50, Config0 = 0x51, Config1 = 0x52, FlashReg = 0x54, MediaStatus = 0x58, Config3 = 0x59, Config4 = 0x5A, /* absent on RTL-8139A */ HltClk = 0x5B, MultiIntr = 0x5C, TxSummary = 0x60, BasicModeCtrl = 0x62, BasicModeStatus = 0x64, NWayAdvert = 0x66, NWayLPAR = 0x68, NWayExpansion = 0x6A, /* Undocumented registers, but required for proper operation. */ FIFOTMS = 0x70, /* FIFO Control and test. */ CSCR = 0x74, /* Chip Status and Configuration Register. */ PARA78 = 0x78, PARA7c = 0x7c, /* Magic transceiver parameter register. */ Config5 = 0xD8, /* absent on RTL-8139A */ }; enum ClearBitMasks { MultiIntrClear = 0xF000, ChipCmdClear = 0xE2, Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), }; enum ChipCmdBits { CmdReset = 0x10, CmdRxEnb = 0x08, CmdTxEnb = 0x04, RxBufEmpty = 0x01, }; /* Interrupt register bits, using my own meaningful names. */ enum IntrStatusBits { PCIErr = 0x8000, PCSTimeout = 0x4000, RxFIFOOver = 0x40, RxUnderrun = 0x20, RxOverflow = 0x10, TxErr = 0x08, TxOK = 0x04, RxErr = 0x02, RxOK = 0x01, RxAckBits = RxFIFOOver | RxOverflow | RxOK, }; enum TxStatusBits { TxHostOwns = 0x2000, TxUnderrun = 0x4000, TxStatOK = 0x8000, TxOutOfWindow = 0x20000000, TxAborted = 0x40000000, TxCarrierLost = 0x80000000, }; enum RxStatusBits { RxMulticast = 0x8000, RxPhysical = 0x4000, RxBroadcast = 0x2000, RxBadSymbol = 0x0020, RxRunt = 0x0010, RxTooLong = 0x0008, RxCRCErr = 0x0004, RxBadAlign = 0x0002, RxStatusOK = 0x0001, }; /* Bits in RxConfig. */ enum rx_mode_bits { AcceptErr = 0x20, AcceptRunt = 0x10, AcceptBroadcast = 0x08, AcceptMulticast = 0x04, AcceptMyPhys = 0x02, AcceptAllPhys = 0x01, }; /* Bits in TxConfig. */ enum tx_config_bits { /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ TxIFGShift = 24, TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ TxClearAbt = (1 << 0), /* Clear abort (WO) */ TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */ TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */ TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ }; /* Bits in Config1 */ enum Config1Bits { Cfg1_PM_Enable = 0x01, Cfg1_VPD_Enable = 0x02, Cfg1_PIO = 0x04, Cfg1_MMIO = 0x08, LWAKE = 0x10, /* not on 8139, 8139A */ Cfg1_Driver_Load = 0x20, Cfg1_LED0 = 0x40, Cfg1_LED1 = 0x80, SLEEP = (1 << 1), /* only on 8139, 8139A */ PWRDN = (1 << 0), /* only on 8139, 8139A */ }; /* Bits in Config3 */ enum Config3Bits { Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ }; /* Bits in Config4 */ enum Config4Bits { LWPTN = (1 << 2), /* not on 8139, 8139A */ }; /* Bits in Config5 */ enum Config5Bits { Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */ Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ }; enum RxConfigBits { /* rx fifo threshold */ RxCfgFIFOShift = 13, RxCfgFIFONone = (7 << RxCfgFIFOShift), /* Max DMA burst */ RxCfgDMAShift = 8, RxCfgDMAUnlimited = (7 << RxCfgDMAShift), /* rx ring buffer length */ RxCfgRcv8K = 0, RxCfgRcv16K = (1 << 11), RxCfgRcv32K = (1 << 12), RxCfgRcv64K = (1 << 11) | (1 << 12), /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ RxNoWrap = (1 << 7), }; /* Twister tuning parameters from RealTek. Completely undocumented, but required to tune bad links on some boards. */ enum CSCRBits { CSCR_LinkOKBit = 0x0400, CSCR_LinkChangeBit = 0x0800, CSCR_LinkStatusBits = 0x0f000, CSCR_LinkDownOffCmd = 0x003c0, CSCR_LinkDownCmd = 0x0f3c0, }; enum Cfg9346Bits { Cfg9346_Lock = 0x00, Cfg9346_Unlock = 0xC0, }; typedef enum { CH_8139 = 0, CH_8139_K, CH_8139A, CH_8139A_G, CH_8139B, CH_8130, CH_8139C, CH_8100, CH_8100B_8139D, CH_8101, } chip_t; enum chip_flags { HasHltClk = (1 << 0), HasLWake = (1 << 1), }; #define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22) #define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) /* directly indexed by chip_t, above */ static const struct { const char *name; u32 version; /* from RTL8139C/RTL8139D docs */ u32 flags; } rtl_chip_info[] = { { "RTL-8139", HW_REVID(1, 0, 0, 0, 0, 0, 0), HasHltClk, }, { "RTL-8139 rev K", HW_REVID(1, 1, 0, 0, 0, 0, 0), HasHltClk, }, { "RTL-8139A", HW_REVID(1, 1, 1, 0, 0, 0, 0), HasHltClk, /* XXX undocumented? */ }, { "RTL-8139A rev G", HW_REVID(1, 1, 1, 0, 0, 1, 0), HasHltClk, /* XXX undocumented? */ }, { "RTL-8139B", HW_REVID(1, 1, 1, 1, 0, 0, 0), HasLWake, }, { "RTL-8130", HW_REVID(1, 1, 1, 1, 1, 0, 0), HasLWake, }, { "RTL-8139C", HW_REVID(1, 1, 1, 0, 1, 0, 0), HasLWake, }, { "RTL-8100", HW_REVID(1, 1, 1, 1, 0, 1, 0), HasLWake, }, { "RTL-8100B/8139D", HW_REVID(1, 1, 1, 0, 1, 0, 1), HasHltClk /* XXX undocumented? */ | HasLWake, }, { "RTL-8101", HW_REVID(1, 1, 1, 0, 1, 1, 1), HasLWake, }, }; struct rtl_extra_stats { unsigned long early_rx; unsigned long tx_buf_mapped; unsigned long tx_timeouts; unsigned long rx_lost_in_ring; }; struct rtl8139_private { void __iomem *mmio_addr; int drv_flags; struct pci_dev *pci_dev; u32 msg_enable; struct net_device_stats stats; unsigned char *rx_ring; unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ unsigned int tx_flag; unsigned long cur_tx; unsigned long dirty_tx; unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ unsigned char *tx_bufs; /* Tx bounce buffer region. */ dma_addr_t rx_ring_dma; dma_addr_t tx_bufs_dma; signed char phys[4]; /* MII device addresses. */ char twistie, twist_row, twist_col; /* Twister tune state. */ unsigned int watchdog_fired : 1; unsigned int default_port : 4; /* Last dev->if_port value. */ unsigned int have_thread : 1; spinlock_t lock; spinlock_t rx_lock; chip_t chipset; u32 rx_config; struct rtl_extra_stats xstats; struct work_struct thread; struct mii_if_info mii; unsigned int regs_len; unsigned long fifo_copy_timeout; }; MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_param(multicast_filter_limit, int, 0); module_param_array(media, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); module_param(debug, int, 0); MODULE_PARM_DESC (debug, "8139too bitmapped message enable number"); MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses"); MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)"); static int read_eeprom (void __iomem *ioaddr, int location, int addr_len); static int rtl8139_open (struct net_device *dev); static int mdio_read (struct net_device *dev, int phy_id, int location); static void mdio_write (struct net_device *dev, int phy_id, int location, int val); static void rtl8139_start_thread(struct rtl8139_private *tp); static void rtl8139_tx_timeout (struct net_device *dev); static void rtl8139_init_ring (struct net_device *dev); static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev); static int rtl8139_poll(struct net_device *dev, int *budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void rtl8139_poll_controller(struct net_device *dev); #endif static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); static void rtl8139_thread (void *_data); static void rtl8139_tx_timeout_task(void *_data); static const struct ethtool_ops rtl8139_ethtool_ops; /* write MMIO register, with flush */ /* Flush avoids rtl8139 bug w/ posted MMIO writes */ #define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0) #define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) #define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) #define MMIO_FLUSH_AUDIT_COMPLETE 1 #if MMIO_FLUSH_AUDIT_COMPLETE /* write MMIO register */ #define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) #define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) #define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) #else /* write MMIO register, then flush */ #define RTL_W8 RTL_W8_F #define RTL_W16 RTL_W16_F #define RTL_W32 RTL_W32_F #endif /* MMIO_FLUSH_AUDIT_COMPLETE */ /* read MMIO register */ #define RTL_R8(reg) ioread8 (ioaddr + (reg)) #define RTL_R16(reg) ioread16 (ioaddr + (reg)) #define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg))) static const u16 rtl8139_intr_mask = PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK; static const u16 rtl8139_norx_intr_mask = PCIErr | PCSTimeout | RxUnderrun | TxErr | TxOK | RxErr ; #if RX_BUF_IDX == 0 static const unsigned int rtl8139_rx_config = RxCfgRcv8K | RxNoWrap | (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); #elif RX_BUF_IDX == 1 static const unsigned int rtl8139_rx_config = RxCfgRcv16K | RxNoWrap | (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); #elif RX_BUF_IDX == 2 static const unsigned int rtl8139_rx_config = RxCfgRcv32K | RxNoWrap | (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); #elif RX_BUF_IDX == 3 static const unsigned int rtl8139_rx_config = RxCfgRcv64K | (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); #else #error "Invalid configuration for 8139_RXBUF_IDX" #endif static const unsigned int rtl8139_tx_config = TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift); static void __rtl8139_cleanup_dev (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); struct pci_dev *pdev; assert (dev != NULL); assert (tp->pci_dev != NULL); pdev = tp->pci_dev; #ifdef USE_IO_OPS if (tp->mmio_addr) ioport_unmap (tp->mmio_addr); #else if (tp->mmio_addr) pci_iounmap (pdev, tp->mmio_addr); #endif /* USE_IO_OPS */ /* it's ok to call this even if we have no regions to free */ pci_release_regions (pdev); free_netdev(dev); pci_set_drvdata (pdev, NULL); } static void rtl8139_chip_reset (void __iomem *ioaddr) { int i; /* Soft reset the chip. */ RTL_W8 (ChipCmd, CmdReset); /* Check that the chip has finished the reset. */ for (i = 1000; i > 0; i--) { barrier(); if ((RTL_R8 (ChipCmd) & CmdReset) == 0) break; udelay (10); } } static int __devinit rtl8139_init_board (struct pci_dev *pdev, struct net_device **dev_out) { void __iomem *ioaddr; struct net_device *dev; struct rtl8139_private *tp; u8 tmp8; int rc, disable_dev_on_err = 0; unsigned int i; unsigned long pio_start, pio_end, pio_flags, pio_len; unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; u32 version; assert (pdev != NULL); *dev_out = NULL; /* dev and priv zeroed in alloc_etherdev */ dev = alloc_etherdev (sizeof (*tp)); if (dev == NULL) { dev_err(&pdev->dev, "Unable to alloc new net device\n"); return -ENOMEM; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->pci_dev = pdev; /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device (pdev); if (rc) goto err_out; pio_start = pci_resource_start (pdev, 0); pio_end = pci_resource_end (pdev, 0); pio_flags = pci_resource_flags (pdev, 0); pio_len = pci_resource_len (pdev, 0); mmio_start = pci_resource_start (pdev, 1); mmio_end = pci_resource_end (pdev, 1); mmio_flags = pci_resource_flags (pdev, 1); mmio_len = pci_resource_len (pdev, 1); /* set this immediately, we need to know before * we talk to the chip directly */ DPRINTK("PIO region size == 0x%02X\n", pio_len); DPRINTK("MMIO region size == 0x%02lX\n", mmio_len); #ifdef USE_IO_OPS /* make sure PCI base addr 0 is PIO */ if (!(pio_flags & IORESOURCE_IO)) { dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); rc = -ENODEV; goto err_out; } /* check for weird/broken PCI region reporting */ if (pio_len < RTL_MIN_IO_SIZE) { dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); rc = -ENODEV; goto err_out; } #else /* make sure PCI base addr 1 is MMIO */ if (!(mmio_flags & IORESOURCE_MEM)) { dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); rc = -ENODEV; goto err_out; } if (mmio_len < RTL_MIN_IO_SIZE) { dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); rc = -ENODEV; goto err_out; } #endif rc = pci_request_regions (pdev, DRV_NAME); if (rc) goto err_out; disable_dev_on_err = 1; /* enable PCI bus-mastering */ pci_set_master (pdev); #ifdef USE_IO_OPS ioaddr = ioport_map(pio_start, pio_len); if (!ioaddr) { dev_err(&pdev->dev, "cannot map PIO, aborting\n"); rc = -EIO; goto err_out; } dev->base_addr = pio_start; tp->mmio_addr = ioaddr; tp->regs_len = pio_len; #else /* ioremap MMIO region */ ioaddr = pci_iomap(pdev, 1, 0); if (ioaddr == NULL) { dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); rc = -EIO; goto err_out; } dev->base_addr = (long) ioaddr; tp->mmio_addr = ioaddr; tp->regs_len = mmio_len; #endif /* USE_IO_OPS */ /* Bring old chips out of low-power mode. */ RTL_W8 (HltClk, 'R'); /* check for missing/broken hardware */ if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { dev_err(&pdev->dev, "Chip not responding, ignoring board\n"); rc = -EIO; goto err_out; } /* identify chip attached to board */ version = RTL_R32 (TxConfig) & HW_REVID_MASK; for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++) if (version == rtl_chip_info[i].version) { tp->chipset = i; goto match; } /* if unknown chip, assume array element #0, original RTL-8139 in this case */ dev_printk (KERN_DEBUG, &pdev->dev, "unknown chip version, assuming RTL-8139\n"); dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); tp->chipset = 0; match: DPRINTK ("chipset id (%d) == index %d, '%s'\n", version, i, rtl_chip_info[i].name); if (tp->chipset >= CH_8139B) { u8 new_tmp8 = tmp8 = RTL_R8 (Config1); DPRINTK("PCI PM wakeup\n"); if ((rtl_chip_info[tp->chipset].flags & HasLWake) && (tmp8 & LWAKE)) new_tmp8 &= ~LWAKE; new_tmp8 |= Cfg1_PM_Enable; if (new_tmp8 != tmp8) { RTL_W8 (Cfg9346, Cfg9346_Unlock); RTL_W8 (Config1, tmp8); RTL_W8 (Cfg9346, Cfg9346_Lock); } if (rtl_chip_info[tp->chipset].flags & HasLWake) { tmp8 = RTL_R8 (Config4); if (tmp8 & LWPTN) { RTL_W8 (Cfg9346, Cfg9346_Unlock); RTL_W8 (Config4, tmp8 & ~LWPTN); RTL_W8 (Cfg9346, Cfg9346_Lock); } } } else { DPRINTK("Old chip wakeup\n"); tmp8 = RTL_R8 (Config1); tmp8 &= ~(SLEEP | PWRDN); RTL_W8 (Config1, tmp8); } rtl8139_chip_reset (ioaddr); *dev_out = dev; return 0; err_out: __rtl8139_cleanup_dev (dev); if (disable_dev_on_err) pci_disable_device (pdev); return rc; } static int __devinit rtl8139_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev = NULL; struct rtl8139_private *tp; int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; u8 pci_rev; assert (pdev != NULL); assert (ent != NULL); board_idx++; /* when we're built into the kernel, the driver version message * is only printed if at least one 8139 board has been found */ #ifndef MODULE { static int printed_version; if (!printed_version++) printk (KERN_INFO RTL8139_DRIVER_NAME "\n"); } #endif pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); if (pdev->vendor == PCI_VENDOR_ID_REALTEK && pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { dev_info(&pdev->dev, "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", pdev->vendor, pdev->device, pci_rev); dev_info(&pdev->dev, "Use the \"8139cp\" driver for improved performance and stability.\n"); } i = rtl8139_init_board (pdev, &dev); if (i < 0) return i; assert (dev != NULL); tp = netdev_priv(dev); ioaddr = tp->mmio_addr; assert (ioaddr != NULL); addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) ((u16 *) (dev->dev_addr))[i] = le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* The Rtl8139-specific entries in the device structure. */ dev->open = rtl8139_open; dev->hard_start_xmit = rtl8139_start_xmit; dev->poll = rtl8139_poll; dev->weight = 64; dev->stop = rtl8139_close; dev->get_stats = rtl8139_get_stats; dev->set_multicast_list = rtl8139_set_rx_mode; dev->do_ioctl = netdev_ioctl; dev->ethtool_ops = &rtl8139_ethtool_ops; dev->tx_timeout = rtl8139_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rtl8139_poll_controller; #endif /* note: the hardware is not capable of sg/csum/highdma, however * through the use of skb_copy_and_csum_dev we enable these * features */ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->irq = pdev->irq; /* tp zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); /* note: tp->chipset set in rtl8139_init_board */ tp->drv_flags = board_info[ent->driver_data].hw_flags; tp->mmio_addr = ioaddr; tp->msg_enable = (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); spin_lock_init (&tp->lock); spin_lock_init (&tp->rx_lock); INIT_WORK(&tp->thread, rtl8139_thread, dev); tp->mii.dev = dev; tp->mii.mdio_read = mdio_read; tp->mii.mdio_write = mdio_write; tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; /* dev is fully set up and ready to use now */ DPRINTK("about to register device named %s (%p)...\n", dev->name, dev); i = register_netdev (dev); if (i) goto err_out; pci_set_drvdata (pdev, dev); printk (KERN_INFO "%s: %s at 0x%lx, " "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " "IRQ %d\n", dev->name, board_info[ent->driver_data].name, dev->base_addr, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq); printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", dev->name, rtl_chip_info[tp->chipset].name); /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes too much time. */ #ifdef CONFIG_8139TOO_8129 if (tp->drv_flags & HAS_MII_XCVR) { int phy, phy_idx = 0; for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) { int mii_status = mdio_read(dev, phy, 1); if (mii_status != 0xffff && mii_status != 0x0000) { u16 advertising = mdio_read(dev, phy, 4); tp->phys[phy_idx++] = phy; printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x " "advertising %4.4x.\n", dev->name, phy, mii_status, advertising); } } if (phy_idx == 0) { printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM " "transceiver.\n", dev->name); tp->phys[0] = 32; } } else #endif tp->phys[0] = 32; tp->mii.phy_id = tp->phys[0]; /* The lower four bits are the media type. */ option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; if (option > 0) { tp->mii.full_duplex = (option & 0x210) ? 1 : 0; tp->default_port = option & 0xFF; if (tp->default_port) tp->mii.force_media = 1; } if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0) tp->mii.full_duplex = full_duplex[board_idx]; if (tp->mii.full_duplex) { printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name); /* Changing the MII-advertised media because might prevent re-connection. */ tp->mii.force_media = 1; } if (tp->default_port) { printk(KERN_INFO " Forcing %dMbps %s-duplex operation.\n", (option & 0x20 ? 100 : 10), (option & 0x10 ? "full" : "half")); mdio_write(dev, tp->phys[0], 0, ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ } /* Put the chip into low-power mode. */ if (rtl_chip_info[tp->chipset].flags & HasHltClk) RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ return 0; err_out: __rtl8139_cleanup_dev (dev); pci_disable_device (pdev); return i; } static void __devexit rtl8139_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); assert (dev != NULL); unregister_netdev (dev); __rtl8139_cleanup_dev (dev); pci_disable_device (pdev); } /* Serial EEPROM section. */ /* EEPROM_Ctrl bits. */ #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ #define EE_CS 0x08 /* EEPROM chip select. */ #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ #define EE_WRITE_0 0x00 #define EE_WRITE_1 0x02 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */ #define EE_ENB (0x80 | EE_CS) /* Delay between EEPROM clock transitions. No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ #define eeprom_delay() (void)RTL_R32(Cfg9346) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD (5) #define EE_READ_CMD (6) #define EE_ERASE_CMD (7) static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len) { int i; unsigned retval = 0; int read_cmd = location | (EE_READ_CMD << addr_len); RTL_W8 (Cfg9346, EE_ENB & ~EE_CS); RTL_W8 (Cfg9346, EE_ENB); eeprom_delay (); /* Shift the read command bits out. */ for (i = 4 + addr_len; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; RTL_W8 (Cfg9346, EE_ENB | dataval); eeprom_delay (); RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK); eeprom_delay (); } RTL_W8 (Cfg9346, EE_ENB); eeprom_delay (); for (i = 16; i > 0; i--) { RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK); eeprom_delay (); retval = (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 : 0); RTL_W8 (Cfg9346, EE_ENB); eeprom_delay (); } /* Terminate the EEPROM access. */ RTL_W8 (Cfg9346, ~EE_CS); eeprom_delay (); return retval; } /* MII serial management: mostly bogus for now. */ /* Read and write the MII management registers using software-generated serial MDIO protocol. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues. */ #define MDIO_DIR 0x80 #define MDIO_DATA_OUT 0x04 #define MDIO_DATA_IN 0x02 #define MDIO_CLK 0x01 #define MDIO_WRITE0 (MDIO_DIR) #define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) #define mdio_delay() RTL_R8(Config4) static const char mii_2_8139_map[8] = { BasicModeCtrl, BasicModeStatus, 0, 0, NWayAdvert, NWayLPAR, NWayExpansion, 0 }; #ifdef CONFIG_8139TOO_8129 /* Syncronize the MII management interface by shifting 32 one bits out. */ static void mdio_sync (void __iomem *ioaddr) { int i; for (i = 32; i >= 0; i--) { RTL_W8 (Config4, MDIO_WRITE1); mdio_delay (); RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK); mdio_delay (); } } #endif static int mdio_read (struct net_device *dev, int phy_id, int location) { struct rtl8139_private *tp = netdev_priv(dev); int retval = 0; #ifdef CONFIG_8139TOO_8129 void __iomem *ioaddr = tp->mmio_addr; int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; int i; #endif if (phy_id > 31) { /* Really a 8139. Use internal registers. */ void __iomem *ioaddr = tp->mmio_addr; return location < 8 && mii_2_8139_map[location] ? RTL_R16 (mii_2_8139_map[location]) : 0; } #ifdef CONFIG_8139TOO_8129 mdio_sync (ioaddr); /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; RTL_W8 (Config4, MDIO_DIR | dataval); mdio_delay (); RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK); mdio_delay (); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { RTL_W8 (Config4, 0); mdio_delay (); retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0); RTL_W8 (Config4, MDIO_CLK); mdio_delay (); } #endif return (retval >> 1) & 0xffff; } static void mdio_write (struct net_device *dev, int phy_id, int location, int value) { struct rtl8139_private *tp = netdev_priv(dev); #ifdef CONFIG_8139TOO_8129 void __iomem *ioaddr = tp->mmio_addr; int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; int i; #endif if (phy_id > 31) { /* Really a 8139. Use internal registers. */ void __iomem *ioaddr = tp->mmio_addr; if (location == 0) { RTL_W8 (Cfg9346, Cfg9346_Unlock); RTL_W16 (BasicModeCtrl, value); RTL_W8 (Cfg9346, Cfg9346_Lock); } else if (location < 8 && mii_2_8139_map[location]) RTL_W16 (mii_2_8139_map[location], value); return; } #ifdef CONFIG_8139TOO_8129 mdio_sync (ioaddr); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; RTL_W8 (Config4, dataval); mdio_delay (); RTL_W8 (Config4, dataval | MDIO_CLK); mdio_delay (); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { RTL_W8 (Config4, 0); mdio_delay (); RTL_W8 (Config4, MDIO_CLK); mdio_delay (); } #endif } static int rtl8139_open (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); int retval; void __iomem *ioaddr = tp->mmio_addr; retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); if (retval) return retval; tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN, &tp->tx_bufs_dma); tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN, &tp->rx_ring_dma); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { free_irq(dev->irq, dev); if (tp->tx_bufs) pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma); if (tp->rx_ring) pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma); return -ENOMEM; } tp->mii.full_duplex = tp->mii.force_media; tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; rtl8139_init_ring (dev); rtl8139_hw_start (dev); netif_start_queue (dev); if (netif_msg_ifup(tp)) printk(KERN_DEBUG "%s: rtl8139_open() ioaddr %#llx IRQ %d" " GP Pins %2.2x %s-duplex.\n", dev->name, (unsigned long long)pci_resource_start (tp->pci_dev, 1), dev->irq, RTL_R8 (MediaStatus), tp->mii.full_duplex ? "full" : "half"); rtl8139_start_thread(tp); return 0; } static void rtl_check_media (struct net_device *dev, unsigned int init_media) { struct rtl8139_private *tp = netdev_priv(dev); if (tp->phys[0] >= 0) { mii_check_media(&tp->mii, netif_msg_link(tp), init_media); } } /* Start the hardware at open or resume. */ static void rtl8139_hw_start (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 i; u8 tmp; /* Bring old chips out of low-power mode. */ if (rtl_chip_info[tp->chipset].flags & HasHltClk) RTL_W8 (HltClk, 'R'); rtl8139_chip_reset (ioaddr); /* unlock Config[01234] and BMCR register writes */ RTL_W8_F (Cfg9346, Cfg9346_Unlock); /* Restore our idea of the MAC address. */ RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); /* Must enable Tx/Rx before setting transfer thresholds! */ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; RTL_W32 (RxConfig, tp->rx_config); RTL_W32 (TxConfig, rtl8139_tx_config); tp->cur_rx = 0; rtl_check_media (dev, 1); if (tp->chipset >= CH_8139B) { /* Disable magic packet scanning, which is enabled * when PM is enabled in Config1. It can be reenabled * via ETHTOOL_SWOL if desired. */ RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); } DPRINTK("init buffer addresses\n"); /* Lock Config[01234] and BMCR register writes */ RTL_W8 (Cfg9346, Cfg9346_Lock); /* init Rx ring buffer DMA address */ RTL_W32_F (RxBuf, tp->rx_ring_dma); /* init Tx buffer DMA addresses */ for (i = 0; i < NUM_TX_DESC; i++) RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); RTL_W32 (RxMissed, 0); rtl8139_set_rx_mode (dev); /* no early-rx interrupts */ RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear); /* make sure RxTx has started */ tmp = RTL_R8 (ChipCmd); if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16 (IntrMask, rtl8139_intr_mask); } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void rtl8139_init_ring (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); int i; tp->cur_rx = 0; tp->cur_tx = 0; tp->dirty_tx = 0; for (i = 0; i < NUM_TX_DESC; i++) tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; } /* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */ static int next_tick = 3 * HZ; #ifndef CONFIG_8139TOO_TUNE_TWISTER static inline void rtl8139_tune_twister (struct net_device *dev, struct rtl8139_private *tp) {} #else enum TwisterParamVals { PARA78_default = 0x78fa8388, PARA7c_default = 0xcb38de43, /* param[0][3] */ PARA7c_xxx = 0xcb38de43, }; static const unsigned long param[4][4] = { {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} }; static void rtl8139_tune_twister (struct net_device *dev, struct rtl8139_private *tp) { int linkcase; void __iomem *ioaddr = tp->mmio_addr; /* This is a complicated state machine to configure the "twister" for impedance/echos based on the cable length. All of this is magic and undocumented. */ switch (tp->twistie) { case 1: if (RTL_R16 (CSCR) & CSCR_LinkOKBit) { /* We have link beat, let us tune the twister. */ RTL_W16 (CSCR, CSCR_LinkDownOffCmd); tp->twistie = 2; /* Change to state 2. */ next_tick = HZ / 10; } else { /* Just put in some reasonable defaults for when beat returns. */ RTL_W16 (CSCR, CSCR_LinkDownCmd); RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */ RTL_W32 (PARA78, PARA78_default); RTL_W32 (PARA7c, PARA7c_default); tp->twistie = 0; /* Bail from future actions. */ } break; case 2: /* Read how long it took to hear the echo. */ linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits; if (linkcase == 0x7000) tp->twist_row = 3; else if (linkcase == 0x3000) tp->twist_row = 2; else if (linkcase == 0x1000) tp->twist_row = 1; else tp->twist_row = 0; tp->twist_col = 0; tp->twistie = 3; /* Change to state 2. */ next_tick = HZ / 10; break; case 3: /* Put out four tuning parameters, one per 100msec. */ if (tp->twist_col == 0) RTL_W16 (FIFOTMS, 0); RTL_W32 (PARA7c, param[(int) tp->twist_row] [(int) tp->twist_col]); next_tick = HZ / 10; if (++tp->twist_col >= 4) { /* For short cables we are done. For long cables (row == 3) check for mistune. */ tp->twistie = (tp->twist_row == 3) ? 4 : 0; } break; case 4: /* Special case for long cables: check for mistune. */ if ((RTL_R16 (CSCR) & CSCR_LinkStatusBits) == 0x7000) { tp->twistie = 0; break; } else { RTL_W32 (PARA7c, 0xfb38de03); tp->twistie = 5; next_tick = HZ / 10; } break; case 5: /* Retune for shorter cable (column 2). */ RTL_W32 (FIFOTMS, 0x20); RTL_W32 (PARA78, PARA78_default); RTL_W32 (PARA7c, PARA7c_default); RTL_W32 (FIFOTMS, 0x00); tp->twist_row = 2; tp->twist_col = 0; tp->twistie = 3; next_tick = HZ / 10; break; default: /* do nothing */ break; } } #endif /* CONFIG_8139TOO_TUNE_TWISTER */ static inline void rtl8139_thread_iter (struct net_device *dev, struct rtl8139_private *tp, void __iomem *ioaddr) { int mii_lpa; mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA); if (!tp->mii.force_media && mii_lpa != 0xffff) { int duplex = (mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == 0x0040; if (tp->mii.full_duplex != duplex) { tp->mii.full_duplex = duplex; if (mii_lpa) { printk (KERN_INFO "%s: Setting %s-duplex based on MII #%d link" " partner ability of %4.4x.\n", dev->name, tp->mii.full_duplex ? "full" : "half", tp->phys[0], mii_lpa); } else { printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n", dev->name); } #if 0 RTL_W8 (Cfg9346, Cfg9346_Unlock); RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20); RTL_W8 (Cfg9346, Cfg9346_Lock); #endif } } next_tick = HZ * 60; rtl8139_tune_twister (dev, tp); DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n", dev->name, RTL_R16 (NWayLPAR)); DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n", dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus)); DPRINTK ("%s: Chip config %2.2x %2.2x.\n", dev->name, RTL_R8 (Config0), RTL_R8 (Config1)); } static void rtl8139_thread (void *_data) { struct net_device *dev = _data; struct rtl8139_private *tp = netdev_priv(dev); unsigned long thr_delay = next_tick; if (tp->watchdog_fired) { tp->watchdog_fired = 0; rtl8139_tx_timeout_task(_data); } else if (rtnl_trylock()) { rtl8139_thread_iter (dev, tp, tp->mmio_addr); rtnl_unlock (); } else { /* unlikely race. mitigate with fast poll. */ thr_delay = HZ / 2; } schedule_delayed_work(&tp->thread, thr_delay); } static void rtl8139_start_thread(struct rtl8139_private *tp) { tp->twistie = 0; if (tp->chipset == CH_8139_K) tp->twistie = 1; else if (tp->drv_flags & HAS_LNK_CHNG) return; tp->have_thread = 1; schedule_delayed_work(&tp->thread, next_tick); } static void rtl8139_stop_thread(struct rtl8139_private *tp) { if (tp->have_thread) { cancel_rearming_delayed_work(&tp->thread); tp->have_thread = 0; } else flush_scheduled_work(); } static inline void rtl8139_tx_clear (struct rtl8139_private *tp) { tp->cur_tx = 0; tp->dirty_tx = 0; /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ } static void rtl8139_tx_timeout_task (void *_data) { struct net_device *dev = _data; struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int i; u8 tmp8; printk (KERN_DEBUG "%s: Transmit timeout, status %2.2x %4.4x %4.4x " "media %2.2x.\n", dev->name, RTL_R8 (ChipCmd), RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus)); /* Emit info to figure out what went wrong. */ printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n", dev->name, tp->cur_tx, tp->dirty_tx); for (i = 0; i < NUM_TX_DESC; i++) printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n", dev->name, i, RTL_R32 (TxStatus0 + (i * 4)), i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : ""); tp->xstats.tx_timeouts++; /* disable Tx ASAP, if not already */ tmp8 = RTL_R8 (ChipCmd); if (tmp8 & CmdTxEnb) RTL_W8 (ChipCmd, CmdRxEnb); spin_lock_bh(&tp->rx_lock); /* Disable interrupts by clearing the interrupt mask. */ RTL_W16 (IntrMask, 0x0000); /* Stop a shared interrupt from scavenging while we are. */ spin_lock_irq(&tp->lock); rtl8139_tx_clear (tp); spin_unlock_irq(&tp->lock); /* ...and finally, reset everything */ if (netif_running(dev)) { rtl8139_hw_start (dev); netif_wake_queue (dev); } spin_unlock_bh(&tp->rx_lock); } static void rtl8139_tx_timeout (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); if (!tp->have_thread) { INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); schedule_delayed_work(&tp->thread, next_tick); } else tp->watchdog_fired = 1; } static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int entry; unsigned int len = skb->len; unsigned long flags; /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % NUM_TX_DESC; /* Note: the chip doesn't have auto-pad! */ if (likely(len < TX_BUF_SIZE)) { if (len < ETH_ZLEN) memset(tp->tx_buf[entry], 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); dev_kfree_skb(skb); } else { dev_kfree_skb(skb); tp->stats.tx_dropped++; return 0; } spin_lock_irqsave(&tp->lock, flags); RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); dev->trans_start = jiffies; tp->cur_tx++; wmb(); if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) netif_stop_queue (dev); spin_unlock_irqrestore(&tp->lock, flags); if (netif_msg_tx_queued(tp)) printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", dev->name, len, entry); return 0; } static void rtl8139_tx_interrupt (struct net_device *dev, struct rtl8139_private *tp, void __iomem *ioaddr) { unsigned long dirty_tx, tx_left; assert (dev != NULL); assert (ioaddr != NULL); dirty_tx = tp->dirty_tx; tx_left = tp->cur_tx - dirty_tx; while (tx_left > 0) { int entry = dirty_tx % NUM_TX_DESC; int txstatus; txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32))); if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) break; /* It still hasn't been Txed */ /* Note: TxCarrierLost is always asserted at 100mbps. */ if (txstatus & (TxOutOfWindow | TxAborted)) { /* There was an major error, log it. */ if (netif_msg_tx_err(tp)) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, txstatus); tp->stats.tx_errors++; if (txstatus & TxAborted) { tp->stats.tx_aborted_errors++; RTL_W32 (TxConfig, TxClearAbt); RTL_W16 (IntrStatus, TxErr); wmb(); } if (txstatus & TxCarrierLost) tp->stats.tx_carrier_errors++; if (txstatus & TxOutOfWindow) tp->stats.tx_window_errors++; } else { if (txstatus & TxUnderrun) { /* Add 64 to the Tx FIFO threshold. */ if (tp->tx_flag < 0x00300000) tp->tx_flag += 0x00020000; tp->stats.tx_fifo_errors++; } tp->stats.collisions += (txstatus >> 24) & 15; tp->stats.tx_bytes += txstatus & 0x7ff; tp->stats.tx_packets++; } dirty_tx++; tx_left--; } #ifndef RTL8139_NDEBUG if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { printk (KERN_ERR "%s: Out-of-sync dirty pointer, %ld vs. %ld.\n", dev->name, dirty_tx, tp->cur_tx); dirty_tx += NUM_TX_DESC; } #endif /* RTL8139_NDEBUG */ /* only wake the queue if we did work, and the queue is stopped */ if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; mb(); netif_wake_queue (dev); } } /* TODO: clean this up! Rx reset need not be this intensive */ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, struct rtl8139_private *tp, void __iomem *ioaddr) { u8 tmp8; #ifdef CONFIG_8139_OLD_RX_RESET int tmp_work; #endif if (netif_msg_rx_err (tp)) printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n", dev->name, rx_status); tp->stats.rx_errors++; if (!(rx_status & RxStatusOK)) { if (rx_status & RxTooLong) { DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n", dev->name, rx_status); /* A.C.: The chip hangs here. */ } if (rx_status & (RxBadSymbol | RxBadAlign)) tp->stats.rx_frame_errors++; if (rx_status & (RxRunt | RxTooLong)) tp->stats.rx_length_errors++; if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++; } else { tp->xstats.rx_lost_in_ring++; } #ifndef CONFIG_8139_OLD_RX_RESET tmp8 = RTL_R8 (ChipCmd); RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb); RTL_W8 (ChipCmd, tmp8); RTL_W32 (RxConfig, tp->rx_config); tp->cur_rx = 0; #else /* Reset the receiver, based on RealTek recommendation. (Bug?) */ /* disable receive */ RTL_W8_F (ChipCmd, CmdTxEnb); tmp_work = 200; while (--tmp_work > 0) { udelay(1); tmp8 = RTL_R8 (ChipCmd); if (!(tmp8 & CmdRxEnb)) break; } if (tmp_work <= 0) printk (KERN_WARNING PFX "rx stop wait too long\n"); /* restart receive */ tmp_work = 200; while (--tmp_work > 0) { RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb); udelay(1); tmp8 = RTL_R8 (ChipCmd); if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb)) break; } if (tmp_work <= 0) printk (KERN_WARNING PFX "tx/rx enable wait too long\n"); /* and reinitialize all rx related registers */ RTL_W8_F (Cfg9346, Cfg9346_Unlock); /* Must enable Tx/Rx before setting transfer thresholds! */ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; RTL_W32 (RxConfig, tp->rx_config); tp->cur_rx = 0; DPRINTK("init buffer addresses\n"); /* Lock Config[01234] and BMCR register writes */ RTL_W8 (Cfg9346, Cfg9346_Lock); /* init Rx ring buffer DMA address */ RTL_W32_F (RxBuf, tp->rx_ring_dma); /* A.C.: Reset the multicast list. */ __set_rx_mode (dev); #endif } #if RX_BUF_IDX == 3 static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring, u32 offset, unsigned int size) { u32 left = RX_BUF_LEN - offset; if (size > left) { memcpy(skb->data, ring + offset, left); memcpy(skb->data+left, ring, size - left); } else memcpy(skb->data, ring + offset, size); } #endif static void rtl8139_isr_ack(struct rtl8139_private *tp) { void __iomem *ioaddr = tp->mmio_addr; u16 status; status = RTL_R16 (IntrStatus) & RxAckBits; /* Clear out errors and receive interrupts */ if (likely(status != 0)) { if (unlikely(status & (RxFIFOOver | RxOverflow))) { tp->stats.rx_errors++; if (status & RxFIFOOver) tp->stats.rx_fifo_errors++; } RTL_W16_F (IntrStatus, RxAckBits); } } static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, int budget) { void __iomem *ioaddr = tp->mmio_addr; int received = 0; unsigned char *rx_ring = tp->rx_ring; unsigned int cur_rx = tp->cur_rx; unsigned int rx_size = 0; DPRINTK ("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x," " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx, RTL_R16 (RxBufAddr), RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); while (netif_running(dev) && received < budget && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { u32 ring_offset = cur_rx % RX_BUF_LEN; u32 rx_status; unsigned int pkt_size; struct sk_buff *skb; rmb(); /* read size+status of next frame from DMA ring buffer */ rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); rx_size = rx_status >> 16; pkt_size = rx_size - 4; if (netif_msg_rx_status(tp)) printk(KERN_DEBUG "%s: rtl8139_rx() status %4.4x, size %4.4x," " cur %4.4x.\n", dev->name, rx_status, rx_size, cur_rx); #if RTL8139_DEBUG > 2 { int i; DPRINTK ("%s: Frame contents ", dev->name); for (i = 0; i < 70; i++) printk (" %2.2x", rx_ring[ring_offset + i]); printk (".\n"); } #endif /* Packet copy from FIFO still in progress. * Theoretically, this should never happen * since EarlyRx is disabled. */ if (unlikely(rx_size == 0xfff0)) { if (!tp->fifo_copy_timeout) tp->fifo_copy_timeout = jiffies + 2; else if (time_after(jiffies, tp->fifo_copy_timeout)) { DPRINTK ("%s: hung FIFO. Reset.", dev->name); rx_size = 0; goto no_early_rx; } if (netif_msg_intr(tp)) { printk(KERN_DEBUG "%s: fifo copy in progress.", dev->name); } tp->xstats.early_rx++; break; } no_early_rx: tp->fifo_copy_timeout = 0; /* If Rx err or invalid rx_size/rx_status received * (which happens if we get lost in the ring), * Rx process gets reset, so we abort any further * Rx processing. */ if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || (rx_size < 8) || (!(rx_status & RxStatusOK)))) { rtl8139_rx_err (rx_status, dev, tp, ioaddr); received = -1; goto out; } /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ skb = dev_alloc_skb (pkt_size + 2); if (likely(skb)) { skb->dev = dev; skb_reserve (skb, 2); /* 16 byte align the IP fields. */ #if RX_BUF_IDX == 3 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); #else eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); #endif skb_put (skb, pkt_size); skb->protocol = eth_type_trans (skb, dev); dev->last_rx = jiffies; tp->stats.rx_bytes += pkt_size; tp->stats.rx_packets++; netif_receive_skb (skb); } else { if (net_ratelimit()) printk (KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); tp->stats.rx_dropped++; } received++; cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; RTL_W16 (RxBufPtr, (u16) (cur_rx - 16)); rtl8139_isr_ack(tp); } if (unlikely(!received || rx_size == 0xfff0)) rtl8139_isr_ack(tp); #if RTL8139_DEBUG > 1 DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x," " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx, RTL_R16 (RxBufAddr), RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); #endif tp->cur_rx = cur_rx; /* * The receive buffer should be mostly empty. * Tell NAPI to reenable the Rx irq. */ if (tp->fifo_copy_timeout) received = budget; out: return received; } static void rtl8139_weird_interrupt (struct net_device *dev, struct rtl8139_private *tp, void __iomem *ioaddr, int status, int link_changed) { DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n", dev->name, status); assert (dev != NULL); assert (tp != NULL); assert (ioaddr != NULL); /* Update the error count. */ tp->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); if ((status & RxUnderrun) && link_changed && (tp->drv_flags & HAS_LNK_CHNG)) { rtl_check_media(dev, 0); status &= ~RxUnderrun; } if (status & (RxUnderrun | RxErr)) tp->stats.rx_errors++; if (status & PCSTimeout) tp->stats.rx_length_errors++; if (status & RxUnderrun) tp->stats.rx_fifo_errors++; if (status & PCIErr) { u16 pci_cmd_status; pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); printk (KERN_ERR "%s: PCI Bus error %4.4x.\n", dev->name, pci_cmd_status); } } static int rtl8139_poll(struct net_device *dev, int *budget) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int orig_budget = min(*budget, dev->quota); int done = 1; spin_lock(&tp->rx_lock); if (likely(RTL_R16(IntrStatus) & RxAckBits)) { int work_done; work_done = rtl8139_rx(dev, tp, orig_budget); if (likely(work_done > 0)) { *budget -= work_done; dev->quota -= work_done; done = (work_done < orig_budget); } } if (done) { /* * Order is important since data can get interrupted * again when we think we are done. */ local_irq_disable(); RTL_W16_F(IntrMask, rtl8139_intr_mask); __netif_rx_complete(dev); local_irq_enable(); } spin_unlock(&tp->rx_lock); return !done; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u16 status, ackstat; int link_changed = 0; /* avoid bogus "uninit" warning */ int handled = 0; spin_lock (&tp->lock); status = RTL_R16 (IntrStatus); /* shared irq? */ if (unlikely((status & rtl8139_intr_mask) == 0)) goto out; handled = 1; /* h/w no longer present (hotplug?) or major error, bail */ if (unlikely(status == 0xFFFF)) goto out; /* close possible race's with dev_close */ if (unlikely(!netif_running(dev))) { RTL_W16 (IntrMask, 0); goto out; } /* Acknowledge all of the current interrupt sources ASAP, but an first get an additional status bit from CSCR. */ if (unlikely(status & RxUnderrun)) link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit; ackstat = status & ~(RxAckBits | TxErr); if (ackstat) RTL_W16 (IntrStatus, ackstat); /* Receive packets are processed by poll routine. If not running start it now. */ if (status & RxAckBits){ if (netif_rx_schedule_prep(dev)) { RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); __netif_rx_schedule (dev); } } /* Check uncommon events with one test. */ if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr))) rtl8139_weird_interrupt (dev, tp, ioaddr, status, link_changed); if (status & (TxOK | TxErr)) { rtl8139_tx_interrupt (dev, tp, ioaddr); if (status & TxErr) RTL_W16 (IntrStatus, TxErr); } out: spin_unlock (&tp->lock); DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n", dev->name, RTL_R16 (IntrStatus)); return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void rtl8139_poll_controller(struct net_device *dev) { disable_irq(dev->irq); rtl8139_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static int rtl8139_close (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; netif_stop_queue (dev); rtl8139_stop_thread(tp); if (netif_msg_ifdown(tp)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", dev->name, RTL_R16 (IntrStatus)); spin_lock_irqsave (&tp->lock, flags); /* Stop the chip's Tx and Rx DMA processes. */ RTL_W8 (ChipCmd, 0); /* Disable interrupts by clearing the interrupt mask. */ RTL_W16 (IntrMask, 0); /* Update the error counts. */ tp->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); synchronize_irq (dev->irq); /* racy, but that's ok here */ free_irq (dev->irq, dev); rtl8139_tx_clear (tp); pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma); pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma); tp->rx_ring = NULL; tp->tx_bufs = NULL; /* Green! Put the chip in low-power mode. */ RTL_W8 (Cfg9346, Cfg9346_Unlock); if (rtl_chip_info[tp->chipset].flags & HasHltClk) RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ return 0; } /* Get the ethtool Wake-on-LAN settings. Assumes that wol points to kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and other threads or interrupts aren't messing with the 8139. */ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8139_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mmio_addr; spin_lock_irq(&np->lock); if (rtl_chip_info[np->chipset].flags & HasLWake) { u8 cfg3 = RTL_R8 (Config3); u8 cfg5 = RTL_R8 (Config5); wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; wol->wolopts = 0; if (cfg3 & Cfg3_LinkUp) wol->wolopts |= WAKE_PHY; if (cfg3 & Cfg3_Magic) wol->wolopts |= WAKE_MAGIC; /* (KON)FIXME: See how netdev_set_wol() handles the following constants. */ if (cfg5 & Cfg5_UWF) wol->wolopts |= WAKE_UCAST; if (cfg5 & Cfg5_MWF) wol->wolopts |= WAKE_MCAST; if (cfg5 & Cfg5_BWF) wol->wolopts |= WAKE_BCAST; } spin_unlock_irq(&np->lock); } /* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes that wol points to kernel memory and other threads or interrupts aren't messing with the 8139. */ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8139_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mmio_addr; u32 support; u8 cfg3, cfg5; support = ((rtl_chip_info[np->chipset].flags & HasLWake) ? (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST) : 0); if (wol->wolopts & ~support) return -EINVAL; spin_lock_irq(&np->lock); cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic); if (wol->wolopts & WAKE_PHY) cfg3 |= Cfg3_LinkUp; if (wol->wolopts & WAKE_MAGIC) cfg3 |= Cfg3_Magic; RTL_W8 (Cfg9346, Cfg9346_Unlock); RTL_W8 (Config3, cfg3); RTL_W8 (Cfg9346, Cfg9346_Lock); cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF); /* (KON)FIXME: These are untested. We may have to set the CRC0, Wakeup0 and LSBCRC0 registers too, but I have no documentation. */ if (wol->wolopts & WAKE_UCAST) cfg5 |= Cfg5_UWF; if (wol->wolopts & WAKE_MCAST) cfg5 |= Cfg5_MWF; if (wol->wolopts & WAKE_BCAST) cfg5 |= Cfg5_BWF; RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */ spin_unlock_irq(&np->lock); return 0; } static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rtl8139_private *np = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev)); info->regdump_len = np->regs_len; } static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8139_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); mii_ethtool_gset(&np->mii, cmd); spin_unlock_irq(&np->lock); return 0; } static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8139_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); rc = mii_ethtool_sset(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; } static int rtl8139_nway_reset(struct net_device *dev) { struct rtl8139_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii); } static u32 rtl8139_get_link(struct net_device *dev) { struct rtl8139_private *np = netdev_priv(dev); return mii_link_ok(&np->mii); } static u32 rtl8139_get_msglevel(struct net_device *dev) { struct rtl8139_private *np = netdev_priv(dev); return np->msg_enable; } static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) { struct rtl8139_private *np = netdev_priv(dev); np->msg_enable = datum; } /* TODO: we are too slack to do reg dumping for pio, for now */ #ifdef CONFIG_8139TOO_PIO #define rtl8139_get_regs_len NULL #define rtl8139_get_regs NULL #else static int rtl8139_get_regs_len(struct net_device *dev) { struct rtl8139_private *np = netdev_priv(dev); return np->regs_len; } static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) { struct rtl8139_private *np = netdev_priv(dev); regs->version = RTL_REGS_VER; spin_lock_irq(&np->lock); memcpy_fromio(regbuf, np->mmio_addr, regs->len); spin_unlock_irq(&np->lock); } #endif /* CONFIG_8139TOO_MMIO */ static int rtl8139_get_stats_count(struct net_device *dev) { return RTL_NUM_STATS; } static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8139_private *np = netdev_priv(dev); data[0] = np->xstats.early_rx; data[1] = np->xstats.tx_buf_mapped; data[2] = np->xstats.tx_timeouts; data[3] = np->xstats.rx_lost_in_ring; } static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); } static const struct ethtool_ops rtl8139_ethtool_ops = { .get_drvinfo = rtl8139_get_drvinfo, .get_settings = rtl8139_get_settings, .set_settings = rtl8139_set_settings, .get_regs_len = rtl8139_get_regs_len, .get_regs = rtl8139_get_regs, .nway_reset = rtl8139_nway_reset, .get_link = rtl8139_get_link, .get_msglevel = rtl8139_get_msglevel, .set_msglevel = rtl8139_set_msglevel, .get_wol = rtl8139_get_wol, .set_wol = rtl8139_set_wol, .get_strings = rtl8139_get_strings, .get_stats_count = rtl8139_get_stats_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct rtl8139_private *np = netdev_priv(dev); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); spin_unlock_irq(&np->lock); return rc; } static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; if (netif_running(dev)) { spin_lock_irqsave (&tp->lock, flags); tp->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); } return &tp->stats; } /* Set or clear the multicast filter for this adaptor. This routine is not state sensitive and need not be SMP locked. */ static void __set_rx_mode (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 mc_filter[2]; /* Multicast hash filter */ int i, rx_mode; u32 tmp; DPRINTK ("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n", dev->name, dev->flags, RTL_R32 (RxConfig)); /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { struct dev_mc_list *mclist; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } } /* We can safely update without stopping the chip. */ tmp = rtl8139_rx_config | rx_mode; if (tp->rx_config != tmp) { RTL_W32_F (RxConfig, tmp); tp->rx_config = tmp; } RTL_W32_F (MAR0 + 0, mc_filter[0]); RTL_W32_F (MAR0 + 4, mc_filter[1]); } static void rtl8139_set_rx_mode (struct net_device *dev) { unsigned long flags; struct rtl8139_private *tp = netdev_priv(dev); spin_lock_irqsave (&tp->lock, flags); __set_rx_mode(dev); spin_unlock_irqrestore (&tp->lock, flags); } #ifdef CONFIG_PM static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata (pdev); struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; pci_save_state (pdev); if (!netif_running (dev)) return 0; netif_device_detach (dev); spin_lock_irqsave (&tp->lock, flags); /* Disable interrupts, stop Tx and Rx. */ RTL_W16 (IntrMask, 0); RTL_W8 (ChipCmd, 0); /* Update the error counts. */ tp->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); pci_set_power_state (pdev, PCI_D3hot); return 0; } static int rtl8139_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); pci_restore_state (pdev); if (!netif_running (dev)) return 0; pci_set_power_state (pdev, PCI_D0); rtl8139_init_ring (dev); rtl8139_hw_start (dev); netif_device_attach (dev); return 0; } #endif /* CONFIG_PM */ static struct pci_driver rtl8139_pci_driver = { .name = DRV_NAME, .id_table = rtl8139_pci_tbl, .probe = rtl8139_init_one, .remove = __devexit_p(rtl8139_remove_one), #ifdef CONFIG_PM .suspend = rtl8139_suspend, .resume = rtl8139_resume, #endif /* CONFIG_PM */ }; static int __init rtl8139_init_module (void) { /* when we're a module, we always print a version message, * even if no 8139 board is found. */ #ifdef MODULE printk (KERN_INFO RTL8139_DRIVER_NAME "\n"); #endif return pci_register_driver(&rtl8139_pci_driver); } static void __exit rtl8139_cleanup_module (void) { pci_unregister_driver (&rtl8139_pci_driver); } module_init(rtl8139_init_module); module_exit(rtl8139_cleanup_module);
milaq/linux-hpc
drivers/net/8139too.c
C
gpl-2.0
70,963
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic socket support routines. Memory allocators, socket lock/release * handler for protocols to use and generic option handler. * * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Florian La Roche, <flla@stud.uni-sb.de> * Alan Cox, <A.Cox@swansea.ac.uk> * * Fixes: * Alan Cox : Numerous verify_area() problems * Alan Cox : Connecting on a connecting socket * now returns an error for tcp. * Alan Cox : sock->protocol is set correctly. * and is not sometimes left as 0. * Alan Cox : connect handles icmp errors on a * connect properly. Unfortunately there * is a restart syscall nasty there. I * can't match BSD without hacking the C * library. Ideas urgently sought! * Alan Cox : Disallow bind() to addresses that are * not ours - especially broadcast ones!! * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, * instead they leave that for the DESTROY timer. * Alan Cox : Clean up error flag in accept * Alan Cox : TCP ack handling is buggy, the DESTROY timer * was buggy. Put a remove_sock() in the handler * for memory when we hit 0. Also altered the timer * code. The ACK stuff can wait and needs major * TCP layer surgery. * Alan Cox : Fixed TCP ack bug, removed remove sock * and fixed timer/inet_bh race. * Alan Cox : Added zapped flag for TCP * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... * Rick Sladkey : Relaxed UDP rules for matching packets. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support * Pauline Middelink : identd support * Alan Cox : Fixed connect() taking signals I think. * Alan Cox : SO_LINGER supported * Alan Cox : Error reporting fixes * Anonymous : inet_create tidied up (sk->reuse setting) * Alan Cox : inet sockets don't set sk->type! * Alan Cox : Split socket option code * Alan Cox : Callbacks * Alan Cox : Nagle flag for Charles & Johannes stuff * Alex : Removed restriction on inet fioctl * Alan Cox : Splitting INET from NET core * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code * Alan Cox : Split IP from generic code * Alan Cox : New kfree_skbmem() * Alan Cox : Make SO_DEBUG superuser only. * Alan Cox : Allow anyone to clear SO_DEBUG * (compatibility fix) * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. * Alan Cox : Allocator for a socket is settable. * Alan Cox : SO_ERROR includes soft errors. * Alan Cox : Allow NULL arguments on some SO_ opts * Alan Cox : Generic socket allocation to make hooks * easier (suggested by Craig Metz). * Michael Pall : SO_ERROR returns positive errno again * Steve Whitehouse: Added default destructor to free * protocol private data. * Steve Whitehouse: Added various other default routines * common to several socket families. * Chris Evans : Call suser() check last on F_SETOWN * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() * Andi Kleen : Fix write_space callback * Chris Evans : Security fixes - signedness again * Arnaldo C. Melo : cleanups, use skb_queue_purge * * To Fix: * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/errno.h> #include <linux/errqueue.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/tcp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/user_namespace.h> #include <linux/static_key.h> #include <linux/memcontrol.h> #include <linux/prefetch.h> #include <asm/uaccess.h> #include <linux/netdevice.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/request_sock.h> #include <net/sock.h> #include <linux/net_tstamp.h> #include <net/xfrm.h> #include <linux/ipsec.h> #include <net/cls_cgroup.h> #include <net/netprio_cgroup.h> #include <linux/sock_diag.h> #include <linux/filter.h> #include <net/sock_reuseport.h> #include <trace/events/sock.h> #ifdef CONFIG_INET #include <net/tcp.h> #endif #include <net/busy_poll.h> static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); /** * sk_ns_capable - General socket capability test * @sk: Socket to use a capability on or through * @user_ns: The user namespace of the capability to use * @cap: The capability to use * * Test to see if the opener of the socket had when the socket was * created and the current process has the capability @cap in the user * namespace @user_ns. */ bool sk_ns_capable(const struct sock *sk, struct user_namespace *user_ns, int cap) { return file_ns_capable(sk->sk_socket->file, user_ns, cap) && ns_capable(user_ns, cap); } EXPORT_SYMBOL(sk_ns_capable); /** * sk_capable - Socket global capability test * @sk: Socket to use a capability on or through * @cap: The global capability to use * * Test to see if the opener of the socket had when the socket was * created and the current process has the capability @cap in all user * namespaces. */ bool sk_capable(const struct sock *sk, int cap) { return sk_ns_capable(sk, &init_user_ns, cap); } EXPORT_SYMBOL(sk_capable); /** * sk_net_capable - Network namespace socket capability test * @sk: Socket to use a capability on or through * @cap: The capability to use * * Test to see if the opener of the socket had when the socket was created * and the current process has the capability @cap over the network namespace * the socket is a member of. */ bool sk_net_capable(const struct sock *sk, int cap) { return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); } EXPORT_SYMBOL(sk_net_capable); /* * Each address family might have different locking rules, so we have * one slock key per address family: */ static struct lock_class_key af_family_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; /* * Make lock validator output more readable. (we pre-construct these * strings build-time, so that runtime initialization of socket * locks is fast): */ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , "slock-27" , "slock-28" , "slock-AF_CAN" , "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , "clock-AF_MAX" }; /* * sk_callback_lock locking rules are per-address-family, * so split the lock classes by using a per-AF key: */ static struct lock_class_key af_callback_keys[AF_MAX]; /* Take into consideration the size of the struct sk_buff overhead in the * determination of these values, since that is non-constant across * platforms. This makes socket queueing behavior and performance * not depend upon such differences. */ #define _SK_MEM_PACKETS 256 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) /* Run time adjustable parameters. */ __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; EXPORT_SYMBOL(sysctl_wmem_max); __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; EXPORT_SYMBOL(sysctl_rmem_max); __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; /* Maximal space eaten by iovec or ancillary data plus some space */ int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); EXPORT_SYMBOL(sysctl_optmem_max); int sysctl_tstamp_allow_data __read_mostly = 1; struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; EXPORT_SYMBOL_GPL(memalloc_socks); /** * sk_set_memalloc - sets %SOCK_MEMALLOC * @sk: socket to set it on * * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. * It's the responsibility of the admin to adjust min_free_kbytes * to meet the requirements */ void sk_set_memalloc(struct sock *sk) { sock_set_flag(sk, SOCK_MEMALLOC); sk->sk_allocation |= __GFP_MEMALLOC; static_key_slow_inc(&memalloc_socks); } EXPORT_SYMBOL_GPL(sk_set_memalloc); void sk_clear_memalloc(struct sock *sk) { sock_reset_flag(sk, SOCK_MEMALLOC); sk->sk_allocation &= ~__GFP_MEMALLOC; static_key_slow_dec(&memalloc_socks); /* * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward * progress of swapping. SOCK_MEMALLOC may be cleared while * it has rmem allocations due to the last swapfile being deactivated * but there is a risk that the socket is unusable due to exceeding * the rmem limits. Reclaim the reserves and obey rmem limits again. */ sk_mem_reclaim(sk); } EXPORT_SYMBOL_GPL(sk_clear_memalloc); int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int ret; unsigned long pflags = current->flags; /* these should have been dropped before queueing */ BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); current->flags |= PF_MEMALLOC; ret = sk->sk_backlog_rcv(sk, skb); tsk_restore_flags(current, pflags, PF_MEMALLOC); return ret; } EXPORT_SYMBOL(__sk_backlog_rcv); static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { struct timeval tv; if (optlen < sizeof(tv)) return -EINVAL; if (copy_from_user(&tv, optval, sizeof(tv))) return -EFAULT; if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) return -EDOM; if (tv.tv_sec < 0) { static int warned __read_mostly; *timeo_p = 0; if (warned < 10 && net_ratelimit()) { warned++; pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", __func__, current->comm, task_pid_nr(current)); } return 0; } *timeo_p = MAX_SCHEDULE_TIMEOUT; if (tv.tv_sec == 0 && tv.tv_usec == 0) return 0; if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); return 0; } static void sock_warn_obsolete_bsdism(const char *name) { static int warned; static char warncomm[TASK_COMM_LEN]; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", warncomm, name); warned++; } } static bool sock_needs_netstamp(const struct sock *sk) { switch (sk->sk_family) { case AF_UNSPEC: case AF_UNIX: return false; default: return true; } } static void sock_disable_timestamp(struct sock *sk, unsigned long flags) { if (sk->sk_flags & flags) { sk->sk_flags &= ~flags; if (sock_needs_netstamp(sk) && !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) net_disable_timestamp(); } } int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; } if (!sk_rmem_schedule(sk, skb, skb->truesize)) { atomic_inc(&sk->sk_drops); return -ENOBUFS; } skb->dev = NULL; skb_set_owner_r(skb, sk); /* we escape from rcu protected region, make sure we dont leak * a norefcounted dst */ skb_dst_force(skb); spin_lock_irqsave(&list->lock, flags); sock_skb_set_dropcount(sk, skb); __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return 0; } EXPORT_SYMBOL(__sock_queue_rcv_skb); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; err = sk_filter(sk, skb); if (err) return err; return __sock_queue_rcv_skb(sk, skb); } EXPORT_SYMBOL(sock_queue_rcv_skb); int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, unsigned int trim_cap, bool refcounted) { int rc = NET_RX_SUCCESS; if (sk_filter_trim_cap(sk, skb, trim_cap)) goto discard_and_relse; skb->dev = NULL; if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } if (nested) bh_lock_sock_nested(sk); else bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { /* * trylock + unlock semantics: */ mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; } bh_unlock_sock(sk); out: if (refcounted) sock_put(sk); return rc; discard_and_relse: kfree_skb(skb); goto out; } EXPORT_SYMBOL(__sk_receive_skb); struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_tx_queue_clear(sk); RCU_INIT_POINTER(sk->sk_dst_cache, NULL); dst_release(dst); return NULL; } return dst; } EXPORT_SYMBOL(__sk_dst_check); struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = sk_dst_get(sk); if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_dst_reset(sk); dst_release(dst); return NULL; } return dst; } EXPORT_SYMBOL(sk_dst_check); static int sock_setbindtodevice(struct sock *sk, char __user *optval, int optlen) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES struct net *net = sock_net(sk); char devname[IFNAMSIZ]; int index; /* Sorry... */ ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_RAW)) goto out; ret = -EINVAL; if (optlen < 0) goto out; /* Bind this socket to a particular device like "eth0", * as specified in the passed interface name. If the * name is "" or the option length is zero the socket * is not bound. */ if (optlen > IFNAMSIZ - 1) optlen = IFNAMSIZ - 1; memset(devname, 0, sizeof(devname)); ret = -EFAULT; if (copy_from_user(devname, optval, optlen)) goto out; index = 0; if (devname[0] != '\0') { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, devname); if (dev) index = dev->ifindex; rcu_read_unlock(); ret = -ENODEV; if (!dev) goto out; } lock_sock(sk); sk->sk_bound_dev_if = index; sk_dst_reset(sk); release_sock(sk); ret = 0; out: #endif return ret; } static int sock_getbindtodevice(struct sock *sk, char __user *optval, int __user *optlen, int len) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES struct net *net = sock_net(sk); char devname[IFNAMSIZ]; if (sk->sk_bound_dev_if == 0) { len = 0; goto zero; } ret = -EINVAL; if (len < IFNAMSIZ) goto out; ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); if (ret) goto out; len = strlen(devname) + 1; ret = -EFAULT; if (copy_to_user(optval, devname, len)) goto out; zero: ret = -EFAULT; if (put_user(len, optlen)) goto out; ret = 0; out: #endif return ret; } static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) { if (valbool) sock_set_flag(sk, bit); else sock_reset_flag(sk, bit); } bool sk_mc_loop(struct sock *sk) { if (dev_recursion_level()) return false; if (!sk) return true; switch (sk->sk_family) { case AF_INET: return inet_sk(sk)->mc_loop; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return inet6_sk(sk)->mc_loop; #endif } WARN_ON(1); return true; } EXPORT_SYMBOL(sk_mc_loop); /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. */ int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ if (optname == SO_BINDTODEVICE) return sock_setbindtodevice(sk, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val ? 1 : 0; lock_sock(sk); switch (optname) { case SO_DEBUG: if (val && !capable(CAP_NET_ADMIN)) ret = -EACCES; else sock_valbool_flag(sk, SOCK_DBG, valbool); break; case SO_REUSEADDR: sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_REUSEPORT: sk->sk_reuseport = valbool; break; case SO_TYPE: case SO_PROTOCOL: case SO_DOMAIN: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); break; case SO_BROADCAST: sock_valbool_flag(sk, SOCK_BROADCAST, valbool); break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think * about it this is right. Otherwise apps have to * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; case SO_SNDBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_sndbuf; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think * about it this is right. Otherwise apps have to * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ val = min_t(u32, val, sysctl_rmem_max); set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* * We double it on the way in to account for * "struct sk_buff" etc. overhead. Applications * assume that the SO_RCVBUF setting they make will * allow that much actual data to be received on that * socket. * * Applications are unaware that "struct sk_buff" and * other overheads allocate from the receive buffer * during socket buffer allocation. * * And after considering the possible alternatives, * returning the value we actually used in getsockopt * is the most desirable behavior. */ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_rcvbuf; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; case SO_OOBINLINE: sock_valbool_flag(sk, SOCK_URGINLINE, valbool); break; case SO_NO_CHECK: sk->sk_no_check_tx = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; break; case SO_LINGER: if (optlen < sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling, optval, sizeof(ling))) { ret = -EFAULT; break; } if (!ling.l_onoff) sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("setsockopt"); break; case SO_PASSCRED: if (valbool) set_bit(SOCK_PASSCRED, &sock->flags); else clear_bit(SOCK_PASSCRED, &sock->flags); break; case SO_TIMESTAMP: case SO_TIMESTAMPNS: if (valbool) { if (optname == SO_TIMESTAMP) sock_reset_flag(sk, SOCK_RCVTSTAMPNS); else sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk, SOCK_TIMESTAMP); } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); sock_reset_flag(sk, SOCK_RCVTSTAMPNS); } break; case SO_TIMESTAMPING: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; break; } if (val & SOF_TIMESTAMPING_OPT_ID && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) { if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) { ret = -EINVAL; break; } sk->sk_tskey = tcp_sk(sk)->snd_una; } else { sk->sk_tskey = 0; } } if (val & SOF_TIMESTAMPING_OPT_STATS && !(val & SOF_TIMESTAMPING_OPT_TSONLY)) { ret = -EINVAL; break; } sk->sk_tsflags = val; if (val & SOF_TIMESTAMPING_RX_SOFTWARE) sock_enable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); else sock_disable_timestamp(sk, (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); break; case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_ATTACH_BPF: ret = -EINVAL; if (optlen == sizeof(u32)) { u32 ufd; ret = -EFAULT; if (copy_from_user(&ufd, optval, sizeof(ufd))) break; ret = sk_attach_bpf(ufd, sk); } break; case SO_ATTACH_REUSEPORT_CBPF: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_reuseport_attach_filter(&fprog, sk); } break; case SO_ATTACH_REUSEPORT_EBPF: ret = -EINVAL; if (optlen == sizeof(u32)) { u32 ufd; ret = -EFAULT; if (copy_from_user(&ufd, optval, sizeof(ufd))) break; ret = sk_reuseport_attach_bpf(ufd, sk); } break; case SO_DETACH_FILTER: ret = sk_detach_filter(sk); break; case SO_LOCK_FILTER: if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) ret = -EPERM; else sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); break; case SO_PASSSEC: if (valbool) set_bit(SOCK_PASSSEC, &sock->flags); else clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) ret = -EPERM; else sk->sk_mark = val; break; case SO_RXQ_OVFL: sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); break; case SO_WIFI_STATUS: sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); break; case SO_PEEK_OFF: if (sock->ops->set_peek_off) ret = sock->ops->set_peek_off(sk, val); else ret = -EOPNOTSUPP; break; case SO_NOFCS: sock_valbool_flag(sk, SOCK_NOFCS, valbool); break; case SO_SELECT_ERR_QUEUE: sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); break; #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: /* allow unprivileged users to decrease the value */ if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) ret = -EPERM; else { if (val < 0) ret = -EINVAL; else sk->sk_ll_usec = val; } break; #endif case SO_MAX_PACING_RATE: sk->sk_max_pacing_rate = val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); break; case SO_INCOMING_CPU: sk->sk_incoming_cpu = val; break; case SO_CNX_ADVICE: if (val == 1) dst_negative_advice(sk); break; default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; } EXPORT_SYMBOL(sock_setsockopt); static void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred) { ucred->pid = pid_vnr(pid); ucred->uid = ucred->gid = -1; if (cred) { struct user_namespace *current_ns = current_user_ns(); ucred->uid = from_kuid_munged(current_ns, cred->euid); ucred->gid = from_kgid_munged(current_ns, cred->egid); } } int sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; union { int val; struct linger ling; struct timeval tm; } v; int lv = sizeof(int); int len; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; memset(&v, 0, sizeof(v)); switch (optname) { case SO_DEBUG: v.val = sock_flag(sk, SOCK_DBG); break; case SO_DONTROUTE: v.val = sock_flag(sk, SOCK_LOCALROUTE); break; case SO_BROADCAST: v.val = sock_flag(sk, SOCK_BROADCAST); break; case SO_SNDBUF: v.val = sk->sk_sndbuf; break; case SO_RCVBUF: v.val = sk->sk_rcvbuf; break; case SO_REUSEADDR: v.val = sk->sk_reuse; break; case SO_REUSEPORT: v.val = sk->sk_reuseport; break; case SO_KEEPALIVE: v.val = sock_flag(sk, SOCK_KEEPOPEN); break; case SO_TYPE: v.val = sk->sk_type; break; case SO_PROTOCOL: v.val = sk->sk_protocol; break; case SO_DOMAIN: v.val = sk->sk_family; break; case SO_ERROR: v.val = -sock_error(sk); if (v.val == 0) v.val = xchg(&sk->sk_err_soft, 0); break; case SO_OOBINLINE: v.val = sock_flag(sk, SOCK_URGINLINE); break; case SO_NO_CHECK: v.val = sk->sk_no_check_tx; break; case SO_PRIORITY: v.val = sk->sk_priority; break; case SO_LINGER: lv = sizeof(v.ling); v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); v.ling.l_linger = sk->sk_lingertime / HZ; break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("getsockopt"); break; case SO_TIMESTAMP: v.val = sock_flag(sk, SOCK_RCVTSTAMP) && !sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_TIMESTAMPNS: v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_TIMESTAMPING: v.val = sk->sk_tsflags; break; case SO_RCVTIMEO: lv = sizeof(struct timeval); if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { v.tm.tv_sec = 0; v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_rcvtimeo / HZ; v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; } break; case SO_SNDTIMEO: lv = sizeof(struct timeval); if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { v.tm.tv_sec = 0; v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_sndtimeo / HZ; v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; } break; case SO_RCVLOWAT: v.val = sk->sk_rcvlowat; break; case SO_SNDLOWAT: v.val = 1; break; case SO_PASSCRED: v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); break; case SO_PEERCRED: { struct ucred peercred; if (len > sizeof(peercred)) len = sizeof(peercred); cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); if (copy_to_user(optval, &peercred, len)) return -EFAULT; goto lenout; } case SO_PEERNAME: { char address[128]; if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) return -ENOTCONN; if (lv < len) return -EINVAL; if (copy_to_user(optval, address, len)) return -EFAULT; goto lenout; } /* Dubious BSD thing... Probably nobody even uses it, but * the UNIX standard wants it for whatever reason... -DaveM */ case SO_ACCEPTCONN: v.val = sk->sk_state == TCP_LISTEN; break; case SO_PASSSEC: v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); break; case SO_PEERSEC: return security_socket_getpeersec_stream(sock, optval, optlen, len); case SO_MARK: v.val = sk->sk_mark; break; case SO_RXQ_OVFL: v.val = sock_flag(sk, SOCK_RXQ_OVFL); break; case SO_WIFI_STATUS: v.val = sock_flag(sk, SOCK_WIFI_STATUS); break; case SO_PEEK_OFF: if (!sock->ops->set_peek_off) return -EOPNOTSUPP; v.val = sk->sk_peek_off; break; case SO_NOFCS: v.val = sock_flag(sk, SOCK_NOFCS); break; case SO_BINDTODEVICE: return sock_getbindtodevice(sk, optval, optlen, len); case SO_GET_FILTER: len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); if (len < 0) return len; goto lenout; case SO_LOCK_FILTER: v.val = sock_flag(sk, SOCK_FILTER_LOCKED); break; case SO_BPF_EXTENSIONS: v.val = bpf_tell_extensions(); break; case SO_SELECT_ERR_QUEUE: v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); break; #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: v.val = sk->sk_ll_usec; break; #endif case SO_MAX_PACING_RATE: v.val = sk->sk_max_pacing_rate; break; case SO_INCOMING_CPU: v.val = sk->sk_incoming_cpu; break; default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). */ return -ENOPROTOOPT; } if (len > lv) len = lv; if (copy_to_user(optval, &v, len)) return -EFAULT; lenout: if (put_user(len, optlen)) return -EFAULT; return 0; } /* * Initialize an sk_lock. * * (We also register the sk_lock with the lock validator.) */ static inline void sock_lock_init(struct sock *sk) { sock_lock_init_class_and_name(sk, af_family_slock_key_strings[sk->sk_family], af_family_slock_keys + sk->sk_family, af_family_key_strings[sk->sk_family], af_family_keys + sk->sk_family); } /* * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, * even temporarly, because of RCU lookups. sk_node should also be left as is. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end */ static void sock_copy(struct sock *nsk, const struct sock *osk) { #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); #ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; security_sk_clone(osk, nsk); #endif } static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, int family) { struct sock *sk; struct kmem_cache *slab; slab = prot->slab; if (slab != NULL) { sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); if (!sk) return sk; if (priority & __GFP_ZERO) sk_prot_clear_nulls(sk, prot->obj_size); } else sk = kmalloc(prot->obj_size, priority); if (sk != NULL) { kmemcheck_annotate_bitfield(sk, flags); if (security_sk_alloc(sk, family, priority)) goto out_free; if (!try_module_get(prot->owner)) goto out_free_sec; sk_tx_queue_clear(sk); } return sk; out_free_sec: security_sk_free(sk); out_free: if (slab != NULL) kmem_cache_free(slab, sk); else kfree(sk); return NULL; } static void sk_prot_free(struct proto *prot, struct sock *sk) { struct kmem_cache *slab; struct module *owner; owner = prot->owner; slab = prot->slab; cgroup_sk_free(&sk->sk_cgrp_data); mem_cgroup_sk_free(sk); security_sk_free(sk); if (slab != NULL) kmem_cache_free(slab, sk); else kfree(sk); module_put(owner); } /** * sk_alloc - All socket objects are allocated here * @net: the applicable net namespace * @family: protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * @prot: struct proto associated with this new sock instance * @kern: is this to be a kernel socket? */ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern) { struct sock *sk; sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); if (sk) { sk->sk_family = family; /* * See comment in struct sock definition to understand * why we need sk_prot_creator -acme */ sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); sk->sk_net_refcnt = kern ? 0 : 1; if (likely(sk->sk_net_refcnt)) get_net(net); sock_net_set(sk, net); atomic_set(&sk->sk_wmem_alloc, 1); mem_cgroup_sk_alloc(sk); cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); } return sk; } EXPORT_SYMBOL(sk_alloc); /* Sockets having SOCK_RCU_FREE will call this function after one RCU * grace period. This is the case for UDP sockets and TCP listeners. */ static void __sk_destruct(struct rcu_head *head) { struct sock *sk = container_of(head, struct sock, sk_rcu); struct sk_filter *filter; if (sk->sk_destruct) sk->sk_destruct(sk); filter = rcu_dereference_check(sk->sk_filter, atomic_read(&sk->sk_wmem_alloc) == 0); if (filter) { sk_filter_uncharge(sk, filter); RCU_INIT_POINTER(sk->sk_filter, NULL); } if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); if (atomic_read(&sk->sk_omem_alloc)) pr_debug("%s: optmem leakage (%d bytes) detected\n", __func__, atomic_read(&sk->sk_omem_alloc)); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); if (likely(sk->sk_net_refcnt)) put_net(sock_net(sk)); sk_prot_free(sk->sk_prot_creator, sk); } void sk_destruct(struct sock *sk) { if (sock_flag(sk, SOCK_RCU_FREE)) call_rcu(&sk->sk_rcu, __sk_destruct); else __sk_destruct(&sk->sk_rcu); } static void __sk_free(struct sock *sk) { if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) sock_diag_broadcast_destroy(sk); else sk_destruct(sk); } void sk_free(struct sock *sk) { /* * We subtract one from sk_wmem_alloc and can know if * some packets are still in some tx queue. * If not null, sock_wfree() will call __sk_free(sk) later */ if (atomic_dec_and_test(&sk->sk_wmem_alloc)) __sk_free(sk); } EXPORT_SYMBOL(sk_free); /** * sk_clone_lock - clone a socket, and lock its clone * @sk: the socket to clone * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) */ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) { struct sock *newsk; bool is_charged = true; newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); if (newsk != NULL) { struct sk_filter *filter; sock_copy(newsk, sk); /* SANITY */ if (likely(newsk->sk_net_refcnt)) get_net(sock_net(newsk)); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.len = 0; atomic_set(&newsk->sk_rmem_alloc, 0); /* * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ atomic_set(&newsk->sk_wmem_alloc, 1); atomic_set(&newsk->sk_omem_alloc, 0); skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_write_queue); rwlock_init(&newsk->sk_callback_lock); lockdep_set_class_and_name(&newsk->sk_callback_lock, af_callback_keys + newsk->sk_family, af_family_clock_key_strings[newsk->sk_family]); newsk->sk_dst_cache = NULL; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; atomic_set(&newsk->sk_drops, 0); newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); skb_queue_head_init(&newsk->sk_error_queue); filter = rcu_dereference_protected(newsk->sk_filter, 1); if (filter != NULL) /* though it's an empty new sock, the charging may fail * if sysctl_optmem_max was changed between creation of * original socket and cloning */ is_charged = sk_filter_charge(newsk, filter); if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; bh_unlock_sock(newsk); sk_free(newsk); newsk = NULL; goto out; } RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); newsk->sk_err = 0; newsk->sk_err_soft = 0; newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); atomic64_set(&newsk->sk_cookie, 0); mem_cgroup_sk_alloc(newsk); cgroup_sk_alloc(&newsk->sk_cgrp_data); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) */ smp_wmb(); atomic_set(&newsk->sk_refcnt, 2); /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that * is the same as sk->sk_prot->socks, as this field was copied * with memcpy). * * This _changes_ the previous behaviour, where * tcp_create_openreq_child always was incrementing the * equivalent to tcp_prot->socks (inet_sock_nr), so this have * to be taken into account in all callers. -acme */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); newsk->sk_wq = NULL; if (newsk->sk_prot->sockets_allocated) sk_sockets_allocated_inc(newsk); if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); } out: return newsk; } EXPORT_SYMBOL_GPL(sk_clone_lock); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps &= ~sk->sk_route_nocaps; if (sk_can_gso(sk)) { if (dst->header_len) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; max_segs = max_t(u32, dst->dev->gso_max_segs, 1); } } sk->sk_gso_max_segs = max_segs; } EXPORT_SYMBOL_GPL(sk_setup_caps); /* * Simple resource managers for sockets. */ /* * Write buffer destructor automatically called from kfree_skb. */ void sock_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; unsigned int len = skb->truesize; if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { /* * Keep a reference on sk_wmem_alloc, this will be released * after sk_write_space() call */ atomic_sub(len - 1, &sk->sk_wmem_alloc); sk->sk_write_space(sk); len = 1; } /* * if sk_wmem_alloc reaches 0, we must finish what sk_free() * could not do because of in-flight packets */ if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) __sk_free(sk); } EXPORT_SYMBOL(sock_wfree); /* This variant of sock_wfree() is used by TCP, * since it sets SOCK_USE_WRITE_QUEUE. */ void __sock_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) __sk_free(sk); } void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); skb->sk = sk; #ifdef CONFIG_INET if (unlikely(!sk_fullsock(sk))) { skb->destructor = sock_edemux; sock_hold(sk); return; } #endif skb->destructor = sock_wfree; skb_set_hash_from_sk(skb, sk); /* * We used to take a refcount on sk, but following operation * is enough to guarantee sk_free() wont free this sock until * all in-flight packets are completed */ atomic_add(skb->truesize, &sk->sk_wmem_alloc); } EXPORT_SYMBOL(skb_set_owner_w); /* This helper is used by netem, as it can hold packets in its * delay queue. We want to allow the owner socket to send more * packets, as if they were already TX completed by a typical driver. * But we also want to keep skb->sk set because some packet schedulers * rely on it (sch_fq for example). So we set skb->truesize to a small * amount (1) and decrease sk_wmem_alloc accordingly. */ void skb_orphan_partial(struct sk_buff *skb) { /* If this skb is a TCP pure ACK or already went here, * we have nothing to do. 2 is already a very small truesize. */ if (skb->truesize <= 2) return; /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, * so we do not completely orphan skb, but transfert all * accounted bytes but one, to avoid unexpected reorders. */ if (skb->destructor == sock_wfree #ifdef CONFIG_INET || skb->destructor == tcp_wfree #endif ) { atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); skb->truesize = 1; } else { skb_orphan(skb); } } EXPORT_SYMBOL(skb_orphan_partial); /* * Read buffer destructor automatically called from kfree_skb. */ void sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; unsigned int len = skb->truesize; atomic_sub(len, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, len); } EXPORT_SYMBOL(sock_rfree); /* * Buffer destructor for skbs that are not used directly in read or write * path, e.g. for error handler skbs. Automatically called from kfree_skb. */ void sock_efree(struct sk_buff *skb) { sock_put(skb->sk); } EXPORT_SYMBOL(sock_efree); kuid_t sock_i_uid(struct sock *sk) { kuid_t uid; read_lock_bh(&sk->sk_callback_lock); uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; read_unlock_bh(&sk->sk_callback_lock); return uid; } EXPORT_SYMBOL(sock_i_uid); unsigned long sock_i_ino(struct sock *sk) { unsigned long ino; read_lock_bh(&sk->sk_callback_lock); ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; read_unlock_bh(&sk->sk_callback_lock); return ino; } EXPORT_SYMBOL(sock_i_ino); /* * Allocate a skb from the socket's send buffer. */ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff *skb = alloc_skb(size, priority); if (skb) { skb_set_owner_w(skb, sk); return skb; } } return NULL; } EXPORT_SYMBOL(sock_wmalloc); /* * Allocate a memory block from the socket's option memory buffer. */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned int)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc * might sleep. */ atomic_add(size, &sk->sk_omem_alloc); mem = kmalloc(size, priority); if (mem) return mem; atomic_sub(size, &sk->sk_omem_alloc); } return NULL; } EXPORT_SYMBOL(sock_kmalloc); /* Free an option memory block. Note, we actually want the inline * here as this allows gcc to detect the nullify and fold away the * condition entirely. */ static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, const bool nullify) { if (WARN_ON_ONCE(!mem)) return; if (nullify) kzfree(mem); else kfree(mem); atomic_sub(size, &sk->sk_omem_alloc); } void sock_kfree_s(struct sock *sk, void *mem, int size) { __sock_kfree_s(sk, mem, size, false); } EXPORT_SYMBOL(sock_kfree_s); void sock_kzfree_s(struct sock *sk, void *mem, int size) { __sock_kfree_s(sk, mem, size, true); } EXPORT_SYMBOL(sock_kzfree_s); /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. I think, these locks should be removed for datagram sockets. */ static long sock_wait_for_wmem(struct sock *sk, long timeo) { DEFINE_WAIT(wait); sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (!timeo) break; if (signal_pending(current)) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) break; if (sk->sk_shutdown & SEND_SHUTDOWN) break; if (sk->sk_err) break; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(sk), &wait); return timeo; } /* * Generic send/receive buffer handlers */ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode, int max_page_order) { struct sk_buff *skb; long timeo; int err; timeo = sock_sndtimeo(sk, noblock); for (;;) { err = sock_error(sk); if (err != 0) goto failure; err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) break; sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) goto failure; if (signal_pending(current)) goto interrupted; timeo = sock_wait_for_wmem(sk, timeo); } skb = alloc_skb_with_frags(header_len, data_len, max_page_order, errcode, sk->sk_allocation); if (skb) skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: *errcode = err; return NULL; } EXPORT_SYMBOL(sock_alloc_send_pskb); struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode) { return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); } EXPORT_SYMBOL(sock_alloc_send_skb); int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, struct sockcm_cookie *sockc) { u32 tsflags; switch (cmsg->cmsg_type) { case SO_MARK: if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) return -EINVAL; sockc->mark = *(u32 *)CMSG_DATA(cmsg); break; case SO_TIMESTAMPING: if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) return -EINVAL; tsflags = *(u32 *)CMSG_DATA(cmsg); if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) return -EINVAL; sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; sockc->tsflags |= tsflags; break; /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ case SCM_RIGHTS: case SCM_CREDENTIALS: break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL(__sock_cmsg_send); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc) { struct cmsghdr *cmsg; int ret; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_SOCKET) continue; ret = __sock_cmsg_send(sk, msg, cmsg, sockc); if (ret) return ret; } return 0; } EXPORT_SYMBOL(sock_cmsg_send); /* On 32bit arches, an skb frag is limited to 2^15 */ #define SKB_FRAG_PAGE_ORDER get_order(32768) /** * skb_page_frag_refill - check that a page_frag contains enough room * @sz: minimum size of the fragment we want to get * @pfrag: pointer to page_frag * @gfp: priority for memory allocation * * Note: While this allocator tries to use high order pages, there is * no guarantee that allocations succeed. Therefore, @sz MUST be * less or equal than PAGE_SIZE. */ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) { if (pfrag->page) { if (page_ref_count(pfrag->page) == 1) { pfrag->offset = 0; return true; } if (pfrag->offset + sz <= pfrag->size) return true; put_page(pfrag->page); } pfrag->offset = 0; if (SKB_FRAG_PAGE_ORDER) { /* Avoid direct reclaim but allow kswapd to wake */ pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, SKB_FRAG_PAGE_ORDER); if (likely(pfrag->page)) { pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; return true; } } pfrag->page = alloc_page(gfp); if (likely(pfrag->page)) { pfrag->size = PAGE_SIZE; return true; } return false; } EXPORT_SYMBOL(skb_page_frag_refill); bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) { if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) return true; sk_enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); return false; } EXPORT_SYMBOL(sk_page_frag_refill); static void __lock_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_bh(&sk->sk_lock.slock); schedule(); spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) break; } finish_wait(&sk->sk_lock.wq, &wait); } static void __release_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { struct sk_buff *skb, *next; while ((skb = sk->sk_backlog.head) != NULL) { sk->sk_backlog.head = sk->sk_backlog.tail = NULL; spin_unlock_bh(&sk->sk_lock.slock); do { next = skb->next; prefetch(next); WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); cond_resched(); skb = next; } while (skb != NULL); spin_lock_bh(&sk->sk_lock.slock); } /* * Doing the zeroing here guarantee we can not loop forever * while a wild producer attempts to flood us. */ sk->sk_backlog.len = 0; } void __sk_flush_backlog(struct sock *sk) { spin_lock_bh(&sk->sk_lock.slock); __release_sock(sk); spin_unlock_bh(&sk->sk_lock.slock); } /** * sk_wait_data - wait for data to arrive at sk_receive_queue * @sk: sock to wait on * @timeo: for how long * @skb: last skb seen on sk_receive_queue * * Now socket state including sk->sk_err is changed only under lock, * hence we may omit checks after joining wait queue. * We check receive queue before schedule() only as optimization; * it is very likely that release_sock() added new data. */ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int rc; add_wait_queue(sk_sleep(sk), &wait); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return rc; } EXPORT_SYMBOL(sk_wait_data); /** * __sk_mem_raise_allocated - increase memory_allocated * @sk: socket * @size: memory size to allocate * @amt: pages to allocate * @kind: allocation type * * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc */ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { struct proto *prot = sk->sk_prot; long allocated = sk_memory_allocated_add(sk, amt); if (mem_cgroup_sockets_enabled && sk->sk_memcg && !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) goto suppress_allocation; /* Under limit. */ if (allocated <= sk_prot_mem_limits(sk, 0)) { sk_leave_memory_pressure(sk); return 1; } /* Under pressure. */ if (allocated > sk_prot_mem_limits(sk, 1)) sk_enter_memory_pressure(sk); /* Over hard limit. */ if (allocated > sk_prot_mem_limits(sk, 2)) goto suppress_allocation; /* guarantee minimum buffer size under pressure */ if (kind == SK_MEM_RECV) { if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) return 1; } else { /* SK_MEM_SEND */ if (sk->sk_type == SOCK_STREAM) { if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) return 1; } else if (atomic_read(&sk->sk_wmem_alloc) < prot->sysctl_wmem[0]) return 1; } if (sk_has_memory_pressure(sk)) { int alloc; if (!sk_under_memory_pressure(sk)) return 1; alloc = sk_sockets_allocated_read_positive(sk); if (sk_prot_mem_limits(sk, 2) > alloc * sk_mem_pages(sk->sk_wmem_queued + atomic_read(&sk->sk_rmem_alloc) + sk->sk_forward_alloc)) return 1; } suppress_allocation: if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { sk_stream_moderate_sndbuf(sk); /* Fail only if socket is _under_ its sndbuf. * In this case we cannot block, so that we have to fail. */ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) return 1; } trace_sock_exceed_buf_limit(sk, prot, allocated); sk_memory_allocated_sub(sk, amt); if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); return 0; } EXPORT_SYMBOL(__sk_mem_raise_allocated); /** * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated * @sk: socket * @size: memory size to allocate * @kind: allocation type * * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means * rmem allocation. This function assumes that protocols which have * memory_pressure use sk_wmem_queued as write buffer accounting. */ int __sk_mem_schedule(struct sock *sk, int size, int kind) { int ret, amt = sk_mem_pages(size); sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT; ret = __sk_mem_raise_allocated(sk, size, amt, kind); if (!ret) sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT; return ret; } EXPORT_SYMBOL(__sk_mem_schedule); /** * __sk_mem_reduce_allocated - reclaim memory_allocated * @sk: socket * @amount: number of quanta * * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc */ void __sk_mem_reduce_allocated(struct sock *sk, int amount) { sk_memory_allocated_sub(sk, amount); if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); if (sk_under_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } EXPORT_SYMBOL(__sk_mem_reduce_allocated); /** * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated * @sk: socket * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple) */ void __sk_mem_reclaim(struct sock *sk, int amount) { amount >>= SK_MEM_QUANTUM_SHIFT; sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; __sk_mem_reduce_allocated(sk, amount); } EXPORT_SYMBOL(__sk_mem_reclaim); int sk_set_peek_off(struct sock *sk, int val) { if (val < 0) return -EINVAL; sk->sk_peek_off = val; return 0; } EXPORT_SYMBOL_GPL(sk_set_peek_off); /* * Set of default routines for initialising struct proto_ops when * the protocol does not support a particular function. In certain * cases where it makes no sense for a protocol to have a "do nothing" * function, some default processing is provided. */ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_bind); int sock_no_connect(struct socket *sock, struct sockaddr *saddr, int len, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_connect); int sock_no_socketpair(struct socket *sock1, struct socket *sock2) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_socketpair); int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_accept); int sock_no_getname(struct socket *sock, struct sockaddr *saddr, int *len, int peer) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_getname); unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) { return 0; } EXPORT_SYMBOL(sock_no_poll); int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_ioctl); int sock_no_listen(struct socket *sock, int backlog) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_listen); int sock_no_shutdown(struct socket *sock, int how) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_shutdown); int sock_no_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_setsockopt); int sock_no_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_getsockopt); int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_sendmsg); int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_recvmsg); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { /* Mirror missing mmap method error code */ return -ENODEV; } EXPORT_SYMBOL(sock_no_mmap); ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; struct msghdr msg = {.msg_flags = flags}; struct kvec iov; char *kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; res = kernel_sendmsg(sock, &msg, &iov, 1, size); kunmap(page); return res; } EXPORT_SYMBOL(sock_no_sendpage); /* * Default Socket Callbacks */ static void sock_def_wakeup(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); rcu_read_unlock(); } static void sock_def_error_report(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLERR); sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); rcu_read_unlock(); } static void sock_def_readable(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } static void sock_def_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); /* Do not wake up a writer until he can make "significant" * progress. --DaveM */ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } static void sock_def_destruct(struct sock *sk) { } void sk_send_sigurg(struct sock *sk) { if (sk->sk_socket && sk->sk_socket->file) if (send_sigurg(&sk->sk_socket->file->f_owner)) sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); } EXPORT_SYMBOL(sk_send_sigurg); void sk_reset_timer(struct sock *sk, struct timer_list* timer, unsigned long expires) { if (!mod_timer(timer, expires)) sock_hold(sk); } EXPORT_SYMBOL(sk_reset_timer); void sk_stop_timer(struct sock *sk, struct timer_list* timer) { if (del_timer(timer)) __sock_put(sk); } EXPORT_SYMBOL(sk_stop_timer); void sock_init_data(struct socket *sock, struct sock *sk) { skb_queue_head_init(&sk->sk_receive_queue); skb_queue_head_init(&sk->sk_write_queue); skb_queue_head_init(&sk->sk_error_queue); sk->sk_send_head = NULL; init_timer(&sk->sk_timer); sk->sk_allocation = GFP_KERNEL; sk->sk_rcvbuf = sysctl_rmem_default; sk->sk_sndbuf = sysctl_wmem_default; sk->sk_state = TCP_CLOSE; sk_set_socket(sk, sock); sock_set_flag(sk, SOCK_ZAPPED); if (sock) { sk->sk_type = sock->type; sk->sk_wq = sock->wq; sock->sk = sk; sk->sk_uid = SOCK_INODE(sock)->i_uid; } else { sk->sk_wq = NULL; sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0); } rwlock_init(&sk->sk_callback_lock); lockdep_set_class_and_name(&sk->sk_callback_lock, af_callback_keys + sk->sk_family, af_family_clock_key_strings[sk->sk_family]); sk->sk_state_change = sock_def_wakeup; sk->sk_data_ready = sock_def_readable; sk->sk_write_space = sock_def_write_space; sk->sk_error_report = sock_def_error_report; sk->sk_destruct = sock_def_destruct; sk->sk_frag.page = NULL; sk->sk_frag.offset = 0; sk->sk_peek_off = -1; sk->sk_peer_pid = NULL; sk->sk_peer_cred = NULL; sk->sk_write_pending = 0; sk->sk_rcvlowat = 1; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_stamp = ktime_set(-1L, 0); #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = 0; sk->sk_ll_usec = sysctl_net_busy_read; #endif sk->sk_max_pacing_rate = ~0U; sk->sk_pacing_rate = ~0U; sk->sk_incoming_cpu = -1; /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) */ smp_wmb(); atomic_set(&sk->sk_refcnt, 1); atomic_set(&sk->sk_drops, 0); } EXPORT_SYMBOL(sock_init_data); void lock_sock_nested(struct sock *sk, int subclass) { might_sleep(); spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); local_bh_enable(); } EXPORT_SYMBOL(lock_sock_nested); void release_sock(struct sock *sk) { spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_backlog.tail) __release_sock(sk); /* Warning : release_cb() might need to release sk ownership, * ie call sock_release_ownership(sk) before us. */ if (sk->sk_prot->release_cb) sk->sk_prot->release_cb(sk); sock_release_ownership(sk); if (waitqueue_active(&sk->sk_lock.wq)) wake_up(&sk->sk_lock.wq); spin_unlock_bh(&sk->sk_lock.slock); } EXPORT_SYMBOL(release_sock); /** * lock_sock_fast - fast version of lock_sock * @sk: socket * * This version should be used for very small section, where process wont block * return false if fast path is taken * sk_lock.slock locked, owned = 0, BH disabled * return true if slow path is taken * sk_lock.slock unlocked, owned = 1, BH enabled */ bool lock_sock_fast(struct sock *sk) { might_sleep(); spin_lock_bh(&sk->sk_lock.slock); if (!sk->sk_lock.owned) /* * Note : We must disable BH */ return false; __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); local_bh_enable(); return true; } EXPORT_SYMBOL(lock_sock_fast); int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct timeval tv; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); tv = ktime_to_timeval(sk->sk_stamp); if (tv.tv_sec == -1) return -ENOENT; if (tv.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); tv = ktime_to_timeval(sk->sk_stamp); } return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestamp); int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct timespec ts; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return -ENOENT; if (ts.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); ts = ktime_to_timespec(sk->sk_stamp); } return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestampns); void sock_enable_timestamp(struct sock *sk, int flag) { if (!sock_flag(sk, flag)) { unsigned long previous_flags = sk->sk_flags; sock_set_flag(sk, flag); /* * we just set one of the two flags which require net * time stamping, but time stamping might have been on * already because of the other one */ if (sock_needs_netstamp(sk) && !(previous_flags & SK_FLAGS_TIMESTAMP)) net_enable_timestamp(); } } int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type) { struct sock_exterr_skb *serr; struct sk_buff *skb; int copied, err; err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); msg->msg_flags |= MSG_ERRQUEUE; err = copied; out_free_skb: kfree_skb(skb); out: return err; } EXPORT_SYMBOL(sock_recv_errqueue); /* * Get a socket option on an socket. * * FIX: POSIX 1003.1g is very ambiguous here. It states that * asynchronous errors should be reported by getsockopt. We assume * this means if you specify SO_ERROR (otherwise whats the point of it). */ int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); #ifdef CONFIG_COMPAT int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; if (sk->sk_prot->compat_getsockopt != NULL) return sk->sk_prot->compat_getsockopt(sk, level, optname, optval, optlen); return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_sock_common_getsockopt); #endif int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int addr_len = 0; int err; err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; } EXPORT_SYMBOL(sock_common_recvmsg); /* * Set socket options on an inet socket. */ int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); #ifdef CONFIG_COMPAT int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; if (sk->sk_prot->compat_setsockopt != NULL) return sk->sk_prot->compat_setsockopt(sk, level, optname, optval, optlen); return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_sock_common_setsockopt); #endif void sk_common_release(struct sock *sk) { if (sk->sk_prot->destroy) sk->sk_prot->destroy(sk); /* * Observation: when sock_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * * A. Remove from hash tables. */ sk->sk_prot->unhash(sk); /* * In this point socket cannot receive new packets, but it is possible * that some packets are in flight because some CPU runs receiver and * did hash table lookup before we unhashed socket. They will achieve * receive queue and will be purged by socket destructor. * * Also we still have packets pending on receive queue and probably, * our own packets waiting in device queues. sock_destroy will drain * receive queue, but transmitted packets will delay socket destruction * until the last reference will be released. */ sock_orphan(sk); xfrm_sk_free_policy(sk); sk_refcnt_debug_release(sk); if (sk->sk_frag.page) { put_page(sk->sk_frag.page); sk->sk_frag.page = NULL; } sock_put(sk); } EXPORT_SYMBOL(sk_common_release); #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { int val[PROTO_INUSE_NR]; }; static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); #ifdef CONFIG_NET_NS void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; int res = 0; for_each_possible_cpu(cpu) res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; return res >= 0 ? res : 0; } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); static int __net_init sock_inuse_init_net(struct net *net) { net->core.inuse = alloc_percpu(struct prot_inuse); return net->core.inuse ? 0 : -ENOMEM; } static void __net_exit sock_inuse_exit_net(struct net *net) { free_percpu(net->core.inuse); } static struct pernet_operations net_inuse_ops = { .init = sock_inuse_init_net, .exit = sock_inuse_exit_net, }; static __init int net_inuse_init(void) { if (register_pernet_subsys(&net_inuse_ops)) panic("Cannot initialize net inuse counters"); return 0; } core_initcall(net_inuse_init); #else static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; int res = 0; for_each_possible_cpu(cpu) res += per_cpu(prot_inuse, cpu).val[idx]; return res >= 0 ? res : 0; } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); #endif static void assign_proto_idx(struct proto *prot) { prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { pr_err("PROTO_INUSE_NR exhausted\n"); return; } set_bit(prot->inuse_idx, proto_inuse_idx); } static void release_proto_idx(struct proto *prot) { if (prot->inuse_idx != PROTO_INUSE_NR - 1) clear_bit(prot->inuse_idx, proto_inuse_idx); } #else static inline void assign_proto_idx(struct proto *prot) { } static inline void release_proto_idx(struct proto *prot) { } #endif static void req_prot_cleanup(struct request_sock_ops *rsk_prot) { if (!rsk_prot) return; kfree(rsk_prot->slab_name); rsk_prot->slab_name = NULL; kmem_cache_destroy(rsk_prot->slab); rsk_prot->slab = NULL; } static int req_prot_init(const struct proto *prot) { struct request_sock_ops *rsk_prot = prot->rsk_prot; if (!rsk_prot) return 0; rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); if (!rsk_prot->slab_name) return -ENOMEM; rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, rsk_prot->obj_size, 0, prot->slab_flags, NULL); if (!rsk_prot->slab) { pr_crit("%s: Can't create request sock SLAB cache!\n", prot->name); return -ENOMEM; } return 0; } int proto_register(struct proto *prot, int alloc_slab) { if (alloc_slab) { prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN | prot->slab_flags, NULL); if (prot->slab == NULL) { pr_crit("%s: Can't create sock SLAB cache!\n", prot->name); goto out; } if (req_prot_init(prot)) goto out_free_request_sock_slab; if (prot->twsk_prot != NULL) { prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); if (prot->twsk_prot->twsk_slab_name == NULL) goto out_free_request_sock_slab; prot->twsk_prot->twsk_slab = kmem_cache_create(prot->twsk_prot->twsk_slab_name, prot->twsk_prot->twsk_obj_size, 0, prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) goto out_free_timewait_sock_slab_name; } } mutex_lock(&proto_list_mutex); list_add(&prot->node, &proto_list); assign_proto_idx(prot); mutex_unlock(&proto_list_mutex); return 0; out_free_timewait_sock_slab_name: kfree(prot->twsk_prot->twsk_slab_name); out_free_request_sock_slab: req_prot_cleanup(prot->rsk_prot); kmem_cache_destroy(prot->slab); prot->slab = NULL; out: return -ENOBUFS; } EXPORT_SYMBOL(proto_register); void proto_unregister(struct proto *prot) { mutex_lock(&proto_list_mutex); release_proto_idx(prot); list_del(&prot->node); mutex_unlock(&proto_list_mutex); kmem_cache_destroy(prot->slab); prot->slab = NULL; req_prot_cleanup(prot->rsk_prot); if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { kmem_cache_destroy(prot->twsk_prot->twsk_slab); kfree(prot->twsk_prot->twsk_slab_name); prot->twsk_prot->twsk_slab = NULL; } } EXPORT_SYMBOL(proto_unregister); #ifdef CONFIG_PROC_FS static void *proto_seq_start(struct seq_file *seq, loff_t *pos) __acquires(proto_list_mutex) { mutex_lock(&proto_list_mutex); return seq_list_start_head(&proto_list, *pos); } static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &proto_list, pos); } static void proto_seq_stop(struct seq_file *seq, void *v) __releases(proto_list_mutex) { mutex_unlock(&proto_list_mutex); } static char proto_method_implemented(const void *method) { return method == NULL ? 'n' : 'y'; } static long sock_prot_memory_allocated(struct proto *proto) { return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; } static char *sock_prot_memory_pressure(struct proto *proto) { return proto->memory_pressure != NULL ? proto_memory_pressure(proto) ? "yes" : "no" : "NI"; } static void proto_seq_printf(struct seq_file *seq, struct proto *proto) { seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", proto->name, proto->obj_size, sock_prot_inuse_get(seq_file_net(seq), proto), sock_prot_memory_allocated(proto), sock_prot_memory_pressure(proto), proto->max_header, proto->slab == NULL ? "no" : "yes", module_name(proto->owner), proto_method_implemented(proto->close), proto_method_implemented(proto->connect), proto_method_implemented(proto->disconnect), proto_method_implemented(proto->accept), proto_method_implemented(proto->ioctl), proto_method_implemented(proto->init), proto_method_implemented(proto->destroy), proto_method_implemented(proto->shutdown), proto_method_implemented(proto->setsockopt), proto_method_implemented(proto->getsockopt), proto_method_implemented(proto->sendmsg), proto_method_implemented(proto->recvmsg), proto_method_implemented(proto->sendpage), proto_method_implemented(proto->bind), proto_method_implemented(proto->backlog_rcv), proto_method_implemented(proto->hash), proto_method_implemented(proto->unhash), proto_method_implemented(proto->get_port), proto_method_implemented(proto->enter_memory_pressure)); } static int proto_seq_show(struct seq_file *seq, void *v) { if (v == &proto_list) seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", "protocol", "size", "sockets", "memory", "press", "maxhdr", "slab", "module", "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); else proto_seq_printf(seq, list_entry(v, struct proto, node)); return 0; } static const struct seq_operations proto_seq_ops = { .start = proto_seq_start, .next = proto_seq_next, .stop = proto_seq_stop, .show = proto_seq_show, }; static int proto_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &proto_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations proto_seq_fops = { .owner = THIS_MODULE, .open = proto_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static __net_init int proto_init_net(struct net *net) { if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops)) return -ENOMEM; return 0; } static __net_exit void proto_exit_net(struct net *net) { remove_proc_entry("protocols", net->proc_net); } static __net_initdata struct pernet_operations proto_net_ops = { .init = proto_init_net, .exit = proto_exit_net, }; static int __init proto_init(void) { return register_pernet_subsys(&proto_net_ops); } subsys_initcall(proto_init); #endif /* PROC_FS */
tgraf/net-next
net/core/sock.c
C
gpl-2.0
78,117
/* * focuser driver. * * Copyright (C) 2010 LGE Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/delay.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <media/ar0832_focuser.h> // LGE_CHANGE_S X2_ICS [byun.youngki@lge.com], 2012-05-03, < recover to GB version > #include <media/ar0832_main.h> // LGE_CHANGE_E X2_ICS [byun.youngki@lge.com], 2012-05-03, < recover to GB version > #define POS_LOW 0//50 #define POS_HIGH 255 #define SETTLETIME_MS 100 #define FOCAL_LENGTH (3.5f) #define FNUMBER (2.8f) #define FPOS_COUNT 1024 DEFINE_MUTEX(star_focuser_lock); #define DW9716_MAX_RETRIES (3) static int ar0832_focuser_write(struct i2c_client *client, u16 value) { int count; struct i2c_msg msg[1]; unsigned char data[2]; int retry = 0; if (!client->adapter) return -ENODEV; data[0] = (u8) ((value >> 4) & 0x3F); data[1] = (u8) ((value & 0xF) << 4); data[1] = (data[1] &0xF0) |0x05; // Slew rate control (8 steps, 50us) msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = ARRAY_SIZE(data); msg[0].buf = data; // pr_info("%s: focuser set position = %d, 0x%x\n", __func__, value, *(u16*)data); do { count = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); if (count == ARRAY_SIZE(msg)) return 0; retry++; pr_err("ar0832_focuser: i2c transfer failed, retrying %x\n", value); msleep(3); }while (retry <= DW9716_MAX_RETRIES); return -EIO; } static int ar0832_focuser_write_helper(struct ar0832_focuser_info *info, u16 value) { int ret; switch(info->camera_mode){ case Main: case LeftOnly: ret = ar0832_focuser_write(info->i2c_client, value); break; case Stereo: ret = ar0832_focuser_write(info->i2c_client, value); ret = ar0832_focuser_write(info->i2c_client_right, value); break; case RightOnly: ret = ar0832_focuser_write(info->i2c_client_right, value); break; default : return -1; } return ret; } static int ar0832_focuser_set_position(struct ar0832_focuser_info *info, u32 position) { if (position < info->config.pos_low || position > info->config.pos_high) return -EINVAL; return ar0832_focuser_write(info->i2c_client, position); } static long ar0832_focuser_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ar0832_focuser_info *info = file->private_data; int ret; switch (cmd) { case AR0832_FOCUSER_IOCTL_GET_CONFIG: { if (copy_to_user((void __user *) arg, &info->config, sizeof(info->config))) { pr_err("%s: 0x%x\n", __func__, __LINE__); return -EFAULT; } break; } case AR0832_FOCUSER_IOCTL_SET_POSITION: mutex_lock(&star_focuser_lock); ret = ar0832_focuser_set_position(info, (u32) arg); mutex_unlock(&star_focuser_lock); return ret; case AR0832_FOCUSER_IOCTL_SET_MODE: info->camera_mode =(enum StereoCameraMode)arg; break; default: return -EINVAL; } return 0; } static struct ar0832_focuser_info *info = NULL; static int ar0832_focuser_open(struct inode *inode, struct file *file) { pr_info("ar0832_focuser: open!\n"); file->private_data = info; if (info->regulator) regulator_enable(info->regulator); return 0; } int ar0832_focuser_release(struct inode *inode, struct file *file) { pr_info("ar0832_focuser: release!\n"); if (info->regulator) regulator_disable(info->regulator); file->private_data = NULL; return 0; } static const struct file_operations ar0832_focuser_fileops = { .owner = THIS_MODULE, .open = ar0832_focuser_open, .unlocked_ioctl = ar0832_focuser_ioctl, .release = ar0832_focuser_release, }; static struct miscdevice ar0832_focuser_device = { .minor = MISC_DYNAMIC_MINOR, .name = "ar0832_focuser", .fops = &ar0832_focuser_fileops, }; static int ar0832_focuser_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; pr_info("ar0832_focuser: probing sensor.\n"); info = kzalloc(sizeof(struct ar0832_focuser_info), GFP_KERNEL); if (!info) { pr_err("ar0832_focuser: Unable to allocate memory!\n"); return -ENOMEM; } err = misc_register(&ar0832_focuser_device); if (err) { pr_err("ar0832_focuser: Unable to register misc device!\n"); kfree(info); return err; } //WBT#196353 : don'use the regulator. the power turn on when camera turn on. /* info->regulator = regulator_get(&client->dev, "p_cam_avdd"); if (IS_ERR_OR_NULL(info->regulator)) { dev_err(&client->dev, "unable to get regulator %s\n", dev_name(&client->dev)); info->regulator = NULL; } else { regulator_enable(info->regulator); } */ info->regulator = 0; info->i2c_client = client; info->config.settle_time = SETTLETIME_MS; info->config.focal_length = FOCAL_LENGTH; info->config.fnumber = FNUMBER; info->config.pos_low = POS_LOW; info->config.pos_high = POS_HIGH; i2c_set_clientdata(client, info); return 0; } static int ar0832_focuser_remove(struct i2c_client *client) { struct ar0832_focuser_info *info; info = i2c_get_clientdata(client); misc_deregister(&ar0832_focuser_device); kfree(info); return 0; } static const struct i2c_device_id ar0832_focuser_id[] = { { "ar0832_focuser", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ar0832_focuser_id); static struct i2c_driver ar0832_focuser_i2c_driver = { .driver = { .name = "ar0832_focuser", .owner = THIS_MODULE, }, .probe = ar0832_focuser_probe, .remove = ar0832_focuser_remove, .id_table = ar0832_focuser_id, }; static int __init ar0832_focuser_init(void) { pr_info("ar0832_focuser sensor driver loading\n"); i2c_add_driver(&ar0832_focuser_i2c_driver); return 0; } static void __exit ar0832_focuser_exit(void) { i2c_del_driver(&ar0832_focuser_i2c_driver); } module_init(ar0832_focuser_init); module_exit(ar0832_focuser_exit);
carburano/KingOfBirds_Kernel
drivers/media/video/tegra/ar0832_focuser.c
C
gpl-2.0
6,171
/* * arch/arm/mach-tegra/pm-t3.c * * Tegra3 SOC-specific power and cluster management * * Copyright (c) 2009-2011, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/irq.h> #include <mach/gpio.h> #include <mach/iomap.h> #include <mach/irqs.h> #include <asm/cpu_pm.h> #include <asm/hardware/gic.h> #include <trace/events/power.h> #include "clock.h" #include "cpuidle.h" #include "pm.h" #include "sleep.h" #include "tegra3_emc.h" #include "dvfs.h" #ifdef CONFIG_TEGRA_CLUSTER_CONTROL #define CAR_CCLK_BURST_POLICY \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20) #define CAR_SUPER_CCLK_DIVIDER \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24) #define CAR_CCLKG_BURST_POLICY \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368) #define CAR_SUPER_CCLKG_DIVIDER \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C) #define CAR_CCLKLP_BURST_POLICY \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370) #define PLLX_DIV2_BYPASS_LP (1<<16) #define CAR_SUPER_CCLKLP_DIVIDER \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374) #define CAR_BOND_OUT_V \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390) #define CAR_BOND_OUT_V_CPU_G (1<<0) #define CAR_BOND_OUT_V_CPU_LP (1<<1) #define CAR_CLK_ENB_V_SET \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440) #define CAR_CLK_ENB_V_CPU_G (1<<0) #define CAR_CLK_ENB_V_CPU_LP (1<<1) #define CAR_RST_CPUG_CMPLX_SET \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450) #define CAR_RST_CPUG_CMPLX_CLR \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454) #define CAR_RST_CPULP_CMPLX_SET \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458) #define CAR_RST_CPULP_CMPLX_CLR \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C) #define CAR_CLK_CPUG_CMPLX_SET \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460) #define CAR_CLK_CPUG_CMPLX_CLR \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464) #define CAR_CLK_CPULP_CMPLX_SET \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468) #define CAR_CLK_CPULP_CMPLX_CLR \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C) #define CPU_CLOCK(cpu) (0x1<<(8+cpu)) #define CPU_RESET(cpu) (0x1111ul<<(cpu)) static int cluster_switch_prolog_clock(unsigned int flags) { u32 reg; u32 CclkBurstPolicy; u32 SuperCclkDivier; /* Read the bond out register containing the G and LP CPUs. */ reg = readl(CAR_BOND_OUT_V); /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent LP settings overwrite by save/restore code */ CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY); CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY); writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY); /* Switching to G? */ if (flags & TEGRA_POWER_CLUSTER_G) { /* Do the G CPUs exist? */ if (reg & CAR_BOND_OUT_V_CPU_G) return -ENXIO; /* Keep G CPU clock policy set by upper laayer, with the exception of the transition via LP1 */ if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { /* In LP1 power mode come up on CLKM (oscillator) */ CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY); CclkBurstPolicy &= ~0xF; SuperCclkDivier = 0; writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY); writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER); } /* Hold G CPUs 1-3 in reset after the switch */ reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3); writel(reg, CAR_RST_CPUG_CMPLX_SET); /* Take G CPU 0 out of reset after the switch */ reg = CPU_RESET(0); writel(reg, CAR_RST_CPUG_CMPLX_CLR); /* Disable the clocks on G CPUs 1-3 after the switch */ reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3); writel(reg, CAR_CLK_CPUG_CMPLX_SET); /* Enable the clock on G CPU 0 after the switch */ reg = CPU_CLOCK(0); writel(reg, CAR_CLK_CPUG_CMPLX_CLR); /* Enable the G CPU complex clock after the switch */ reg = CAR_CLK_ENB_V_CPU_G; writel(reg, CAR_CLK_ENB_V_SET); } /* Switching to LP? */ else if (flags & TEGRA_POWER_CLUSTER_LP) { /* Does the LP CPU exist? */ if (reg & CAR_BOND_OUT_V_CPU_LP) return -ENXIO; /* Keep LP CPU clock policy set by upper layer, with the exception of the transition via LP1 */ if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { /* In LP1 power mode come up on CLKM (oscillator) */ CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY); CclkBurstPolicy &= ~0xF; SuperCclkDivier = 0; writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY); writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER); } /* Take the LP CPU ut of reset after the switch */ reg = CPU_RESET(0); writel(reg, CAR_RST_CPULP_CMPLX_CLR); /* Enable the clock on the LP CPU after the switch */ reg = CPU_CLOCK(0); writel(reg, CAR_CLK_CPULP_CMPLX_CLR); /* Enable the LP CPU complex clock after the switch */ reg = CAR_CLK_ENB_V_CPU_LP; writel(reg, CAR_CLK_ENB_V_SET); } return 0; } void tegra_cluster_switch_prolog(unsigned int flags) { unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK; unsigned int current_cluster = is_lp_cluster() ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G; u32 reg; /* Read the flow controler CSR register and clear the CPU switch and immediate flags. If an actual CPU switch is to be performed, re-write the CSR register with the desired values. */ reg = readl(FLOW_CTRL_CPU_CSR(0)); reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE | FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER); /* Program flow controller for immediate wake if requested */ if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) reg |= FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE; /* Do nothing if no switch actions requested */ if (!target_cluster) goto done; if ((current_cluster != target_cluster) || (flags & TEGRA_POWER_CLUSTER_FORCE)) { if (current_cluster != target_cluster) { // Set up the clocks for the target CPU. if (cluster_switch_prolog_clock(flags)) { /* The target CPU does not exist */ goto done; } /* Set up the flow controller to switch CPUs. */ reg |= FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER; } } done: writel(reg, FLOW_CTRL_CPU_CSR(0)); } static void cluster_switch_epilog_actlr(void) { u32 actlr; /* TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads as zero, writes ignored). Hence, it is not preserved across G=>LP=>G switch by CPU save/restore code, but SMP bit is restored correctly. Synchronize these two bits here after LP=>G transition. Note that only CPU0 core is powered on before and after the switch. See also bug 807595. */ __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); if (actlr & (0x1 << 6)) { actlr |= 0x1; __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); } } static void cluster_switch_epilog_gic(void) { unsigned int max_irq, i; void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE); /* Reprogram the interrupt affinity because the on the LP CPU, the interrupt distributor affinity regsiters are stubbed out by ARM (reads as zero, writes ignored). So when the LP CPU context save code runs, the affinity registers will read as all zero. This causes all interrupts to be effectively disabled when back on the G CPU because they aren't routable to any CPU. See bug 667720 for details. */ max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; for (i = 32; i < max_irq; i += 4) { u32 val = 0x01010101; #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS unsigned int irq; for (irq = i; irq < (i + 4); irq++) { struct cpumask mask; struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->affinity_hint && desc->irq_data.affinity) { if (cpumask_and(&mask, desc->affinity_hint, desc->irq_data.affinity)) val |= (*cpumask_bits(&mask) & 0xff) << ((irq & 3) * 8); } } #endif writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4); } } void tegra_cluster_switch_epilog(unsigned int flags) { u32 reg; /* Make sure the switch and immediate flags are cleared in the flow controller to prevent undesirable side-effects for future users of the flow controller. */ reg = readl(FLOW_CTRL_CPU_CSR(0)); reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE | FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER); writel(reg, FLOW_CTRL_CPU_CSR(0)); /* Perform post-switch LP=>G clean-up */ if (!is_lp_cluster()) { cluster_switch_epilog_actlr(); cluster_switch_epilog_gic(); } #if DEBUG_CLUSTER_SWITCH { /* FIXME: clock functions below are taking mutex */ struct clk *c = tegra_get_clock_by_name( is_lp_cluster() ? "cpu_lp" : "cpu_g"); DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__, is_lp_cluster() ? "LP" : "G", clk_get_rate(c))); } #endif } int tegra_cluster_control(unsigned int us, unsigned int flags) { static ktime_t last_g2lp; unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK; unsigned int current_cluster = is_lp_cluster() ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G; unsigned long irq_flags; if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster) return -EINVAL; if (num_online_cpus() > 1) return -EBUSY; if ((current_cluster == target_cluster) && !(flags & TEGRA_POWER_CLUSTER_FORCE)) return -EEXIST; if (target_cluster == TEGRA_POWER_CLUSTER_G) if (!is_g_cluster_present()) return -EPERM; trace_power_start(POWER_PSTATE, target_cluster, 0); if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) us = 0; DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__, (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2, is_lp_cluster() ? "LP" : "G", (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP", (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "", (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "", us)); local_irq_save(irq_flags); if (current_cluster != target_cluster && !timekeeping_suspended) { ktime_t now = ktime_get(); if (target_cluster == TEGRA_POWER_CLUSTER_G) { s64 t = ktime_to_us(ktime_sub(now, last_g2lp)); s64 t_off = tegra_cpu_power_off_time(); if (t_off > t) udelay((unsigned int)(t_off - t)); tegra_dvfs_rail_on(tegra_cpu_rail, now); } else { last_g2lp = now; tegra_dvfs_rail_off(tegra_cpu_rail, now); } } if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { if (us) tegra_lp2_set_trigger(us); tegra_cluster_switch_prolog(flags); tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags); tegra_cluster_switch_epilog(flags); if (us) tegra_lp2_set_trigger(0); } else { tegra_set_cpu_in_lp2(0); cpu_pm_enter(); tegra_idle_lp2_last(0, flags); cpu_pm_exit(); tegra_clear_cpu_in_lp2(0); } local_irq_restore(irq_flags); DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G")); return 0; } #endif #ifdef CONFIG_PM_SLEEP void tegra_lp0_suspend_mc(void) { /* Since memory frequency after LP0 is restored to boot rate mc timing is saved during init, not on entry to LP0. Keep this hook just in case, anyway */ } void tegra_lp0_resume_mc(void) { tegra_mc_timing_restore(); } void tegra_exit_lp_mode(void) { unsigned int flags; printk("tegra_exit_lp_mod+\n"); if (is_lp_cluster()) { flags = TEGRA_POWER_CLUSTER_G; flags |= TEGRA_POWER_CLUSTER_IMMEDIATE; tegra_cluster_control(0, flags); } printk("tegra_exit_lp_mod-\n"); } void tegra_enter_lp_mode(void) { unsigned int flags; printk("tegra_enter_lp_mod+\n"); if (!is_lp_cluster()) { flags = TEGRA_POWER_CLUSTER_LP; flags |= TEGRA_POWER_CLUSTER_IMMEDIATE; tegra_cluster_control(0, flags); } printk("tegra_enter_lp_mod-\n"); } void tegra_lp0_cpu_mode(bool enter) { static bool entered_on_g = false; unsigned int flags; if (enter) entered_on_g = !is_lp_cluster(); if (entered_on_g) { flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G; flags |= TEGRA_POWER_CLUSTER_IMMEDIATE; tegra_cluster_control(0, flags); } } #endif
Galaxian-Soup/android_kernel_asus_tf300t
arch/arm/mach-tegra/pm-t3.c
C
gpl-2.0
12,299
/* * Copyright (c) 2006-2021, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2006-03-13 Bernard the first version */ #include <rtthread.h> #include <sep4020.h> /** * @addtogroup S3C24X0 */ /*@{*/ /** * This function will initialize thread stack * * @param tentry the entry of thread * @param parameter the parameter of entry * @param stack_addr the beginning stack address * @param texit the function will be called when thread exit * * @return stack address */ rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter, rt_uint8_t *stack_addr, void *texit) { rt_uint32_t *stk; stack_addr += sizeof(rt_uint32_t); stack_addr = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stack_addr, 8); stk = (rt_uint32_t *)stack_addr; *(--stk) = (rt_uint32_t)tentry; /* entry point */ *(--stk) = (rt_uint32_t)texit; /* lr */ *(--stk) = 0xdeadbeef; /* r12 */ *(--stk) = 0xdeadbeef; /* r11 */ *(--stk) = 0xdeadbeef; /* r10 */ *(--stk) = 0xdeadbeef; /* r9 */ *(--stk) = 0xdeadbeef; /* r8 */ *(--stk) = 0xdeadbeef; /* r7 */ *(--stk) = 0xdeadbeef; /* r6 */ *(--stk) = 0xdeadbeef; /* r5 */ *(--stk) = 0xdeadbeef; /* r4 */ *(--stk) = 0xdeadbeef; /* r3 */ *(--stk) = 0xdeadbeef; /* r2 */ *(--stk) = 0xdeadbeef; /* r1 */ *(--stk) = (rt_uint32_t)parameter; /* r0 : argument */ *(--stk) = Mode_SVC; /* cpsr */ *(--stk) = Mode_SVC; /* spsr */ /* return task's current stack address */ return (rt_uint8_t *)stk; } /*@}*/
grissiom/rt-thread
libcpu/arm/sep4020/stack.c
C
gpl-2.0
1,888
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ /*** This file is part of systemd. Copyright 2010 Lennart Poettering systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <stdlib.h> #ifdef HAVE_SELINUX #include <selinux/selinux.h> #endif #include "selinux-setup.h" #include "selinux-util.h" #include "label.h" #include "mount-setup.h" #include "macro.h" #include "util.h" #include "log.h" #ifdef HAVE_SELINUX static int null_log(int type, const char *fmt, ...) { return 0; } #endif int selinux_setup(bool *loaded_policy) { #ifdef HAVE_SELINUX int enforce = 0; usec_t before_load, after_load; security_context_t con; int r; union selinux_callback cb; bool initialized = false; assert(loaded_policy); /* Turn off all of SELinux' own logging, we want to do that */ cb.func_log = null_log; selinux_set_callback(SELINUX_CB_LOG, cb); /* Don't load policy in the initrd if we don't appear to have * it. For the real root, we check below if we've already * loaded policy, and return gracefully. */ if (in_initrd() && access(selinux_path(), F_OK) < 0) return 0; /* Already initialized by somebody else? */ r = getcon_raw(&con); if (r == 0) { initialized = !streq(con, "kernel"); freecon(con); } /* Make sure we have no fds open while loading the policy and * transitioning */ log_close(); /* Now load the policy */ before_load = now(CLOCK_MONOTONIC); r = selinux_init_load_policy(&enforce); if (r == 0) { char timespan[FORMAT_TIMESPAN_MAX]; char *label; retest_selinux(); /* Transition to the new context */ r = label_get_create_label_from_exe(SYSTEMD_BINARY_PATH, &label); if (r < 0 || label == NULL) { log_open(); log_error("Failed to compute init label, ignoring."); } else { r = setcon(label); log_open(); if (r < 0) log_error("Failed to transition into init label '%s', ignoring.", label); label_free(label); } after_load = now(CLOCK_MONOTONIC); log_info("Successfully loaded SELinux policy in %s.", format_timespan(timespan, sizeof(timespan), after_load - before_load, 0)); *loaded_policy = true; } else { log_open(); if (enforce > 0) { if (!initialized) { log_error("Failed to load SELinux policy. Freezing."); return -EIO; } log_warning("Failed to load new SELinux policy. Continuing with old policy."); } else log_debug("Unable to load SELinux policy. Ignoring."); } #endif return 0; }
flonatel/systemd-pne1
src/core/selinux-setup.c
C
gpl-2.0
3,884
/* This testcase is part of GDB, the GNU debugger. Contributed by Intel Corp. <keven.boell@intel.com> Copyright 2014-2017 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ int func (int n) { int vla[n], i; for (i = 0; i < n; i++) vla[i] = i; return n; /* vla-filled */ } int main (void) { func (5); return 0; }
totalspectrum/binutils-propeller
gdb/testsuite/gdb.mi/vla.c
C
gpl-2.0
982
/* * aQuantia Corporation Network Driver * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. */ /* File hw_atl_utils.c: Definition of common functions for Atlantic hardware * abstraction layer. */ #include "../aq_hw.h" #include "../aq_hw_utils.h" #include "../aq_pci_func.h" #include "../aq_ring.h" #include "../aq_vec.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #include <linux/random.h> #define HW_ATL_UCP_0X370_REG 0x0370U #define HW_ATL_FW_SM_RAM 0x2U #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU #define HW_ATL_MPI_STATE_MSK 0x00FFU #define HW_ATL_MPI_STATE_SHIFT 0U #define HW_ATL_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_MPI_SPEED_SHIFT 16U static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) { int err = 0; AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM) == 1U, 1U, 10000U); if (err < 0) { bool is_locked; reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); if (!is_locked) { err = -ETIME; goto err_exit; } } aq_hw_write_reg(self, 0x00000208U, a); for (++cnt; --cnt;) { u32 i = 0U; aq_hw_write_reg(self, 0x00000200U, 0x00008000U); for (i = 1024U; (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { } *(p++) = aq_hw_read_reg(self, 0x0000020CU); } reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); err_exit: return err; } static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) { int err = 0; bool is_locked; is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); if (!is_locked) { err = -ETIME; goto err_exit; } aq_hw_write_reg(self, 0x00000208U, a); for (++cnt; --cnt;) { u32 i = 0U; aq_hw_write_reg(self, 0x0000020CU, *(p++)); aq_hw_write_reg(self, 0x00000200U, 0xC000U); for (i = 1024U; (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { } } reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); err_exit: return err; } static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) { int err = 0; const u32 dw_major_mask = 0xff000000U; const u32 dw_minor_mask = 0x00ffffffU; err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; if (err < 0) goto err_exit; err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? -EOPNOTSUPP : 0; err_exit: return err; } static int hw_atl_utils_init_ucp(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps) { int err = 0; if (!aq_hw_read_reg(self, 0x370U)) { unsigned int rnd = 0U; unsigned int ucp_0x370 = 0U; get_random_bytes(&rnd, sizeof(unsigned int)); ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); } reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = aq_hw_read_reg(self, 0x360U)), 1000U, 10U); err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, aq_hw_read_reg(self, 0x18U)); if (err < 0) pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n", AQ_CFG_DRV_NAME, aq_hw_caps->fw_ver_expected, aq_hw_read_reg(self, 0x18U)); return err; } #define HW_ATL_RPC_CONTROL_ADR 0x0338U #define HW_ATL_RPC_STATE_ADR 0x033CU struct aq_hw_atl_utils_fw_rpc_tid_s { union { u32 val; struct { u16 tid; u16 len; }; }; }; #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; if (!IS_CHIP_FEATURE(MIPS)) { err = -1; goto err_exit; } err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, (u32 *)(void *)&PHAL_ATLANTIC->rpc, (rpc_size + sizeof(u32) - sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); sw.len = (u16)rpc_size; aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); err_exit: return err; } static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, struct hw_aq_atl_utils_fw_rpc **rpc) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; struct aq_hw_atl_utils_fw_rpc_tid_s fw; do { sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); PHAL_ATLANTIC->rpc_tid = sw.tid; AQ_HW_WAIT_FOR(sw.tid == (fw.val = aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), fw.tid), 1000U, 100U); if (err < 0) goto err_exit; if (fw.len == 0xFFFFU) { err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; } } while (sw.tid != fw.tid || 0xFFFFU == fw.len); if (err < 0) goto err_exit; if (rpc) { if (fw.len) { err = hw_atl_utils_fw_downld_dwords(self, PHAL_ATLANTIC->rpc_addr, (u32 *)(void *) &PHAL_ATLANTIC->rpc, (fw.len + sizeof(u32) - sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; } *rpc = &PHAL_ATLANTIC->rpc; } err_exit: return err; } static int hw_atl_utils_mpi_create(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps) { int err = 0; err = hw_atl_utils_init_ucp(self, aq_hw_caps); if (err < 0) goto err_exit; err = hw_atl_utils_fw_rpc_init(self); if (err < 0) goto err_exit; err_exit: return err; } void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, struct hw_aq_atl_utils_mbox *pmbox) { int err = 0; err = hw_atl_utils_fw_downld_dwords(self, PHAL_ATLANTIC->mbox_addr, (u32 *)(void *)pmbox, sizeof(*pmbox) / sizeof(u32)); if (err < 0) goto err_exit; if (pmbox != &PHAL_ATLANTIC->mbox) memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); if (IS_CHIP_FEATURE(REVISION_A0)) { unsigned int mtu = self->aq_nic_cfg ? self->aq_nic_cfg->mtu : 1514U; pmbox->stats.ubrc = pmbox->stats.uprc * mtu; pmbox->stats.ubtc = pmbox->stats.uptc * mtu; pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); } else { pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); } err_exit:; } int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, enum hal_atl_utils_fw_state_e state) { u32 ucp_0x368 = 0; ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); return 0; } void hw_atl_utils_mpi_set(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state, u32 speed) { int err = 0; u32 transaction_id = 0; if (state == MPI_RESET) { hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); transaction_id = PHAL_ATLANTIC->mbox.transaction_id; AQ_HW_WAIT_FOR(transaction_id != (hw_atl_utils_mpi_read_stats (self, &PHAL_ATLANTIC->mbox), PHAL_ATLANTIC->mbox.transaction_id), 1000U, 100U); if (err < 0) goto err_exit; } err = hw_atl_utils_mpi_set_speed(self, speed, state); err_exit:; } int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) { u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; struct aq_hw_link_status_s *link_status = &self->aq_link_status; if (!link_speed_mask) { link_status->mbps = 0U; } else { switch (link_speed_mask) { case HAL_ATLANTIC_RATE_10G: link_status->mbps = 10000U; break; case HAL_ATLANTIC_RATE_5G: case HAL_ATLANTIC_RATE_5GSR: link_status->mbps = 5000U; break; case HAL_ATLANTIC_RATE_2GS: link_status->mbps = 2500U; break; case HAL_ATLANTIC_RATE_1G: link_status->mbps = 1000U; break; case HAL_ATLANTIC_RATE_100M: link_status->mbps = 100U; break; default: return -EBUSY; } } return 0; } int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, u8 *mac) { int err = 0; u32 h = 0U; u32 l = 0U; u32 mac_addr[2]; self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); hw_atl_utils_hw_chip_features_init(self, &PHAL_ATLANTIC_A0->chip_features); err = hw_atl_utils_mpi_create(self, aq_hw_caps); if (err < 0) goto err_exit; if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { unsigned int rnd = 0; unsigned int ucp_0x370 = 0; get_random_bytes(&rnd, sizeof(unsigned int)); ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); } err = hw_atl_utils_fw_downld_dwords(self, aq_hw_read_reg(self, 0x00000374U) + (40U * 4U), mac_addr, AQ_DIMOF(mac_addr)); if (err < 0) { mac_addr[0] = 0U; mac_addr[1] = 0U; err = 0; } else { mac_addr[0] = __swab32(mac_addr[0]); mac_addr[1] = __swab32(mac_addr[1]); } ether_addr_copy(mac, (u8 *)mac_addr); if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { /* chip revision */ l = 0xE3000000U | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) | (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); l >>= 8; mac[4] = (u8)(0xFFU & l); l >>= 8; mac[3] = (u8)(0xFFU & l); l >>= 8; mac[2] = (u8)(0xFFU & l); mac[1] = (u8)(0xFFU & h); h >>= 8; mac[0] = (u8)(0xFFU & h); } err_exit: return err; } unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) { unsigned int ret = 0U; switch (mbps) { case 100U: ret = 5U; break; case 1000U: ret = 4U; break; case 2500U: ret = 3U; break; case 5000U: ret = 1U; break; case 10000U: ret = 0U; break; default: break; } return ret; } void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) { u32 chip_features = 0U; u32 val = reg_glb_mif_id_get(self); u32 mif_rev = val & 0xFFU; if ((3U & mif_rev) == 1U) { chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | HAL_ATLANTIC_UTILS_CHIP_MIPS; } else if ((3U & mif_rev) == 2U) { chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | HAL_ATLANTIC_UTILS_CHIP_MIPS | HAL_ATLANTIC_UTILS_CHIP_TPO2 | HAL_ATLANTIC_UTILS_CHIP_RPF2; } *p = chip_features; } int hw_atl_utils_hw_deinit(struct aq_hw_s *self) { hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); return 0; } int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state) { hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); return 0; } int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, u64 *data, unsigned int *p_count) { struct hw_atl_stats_s *stats = NULL; int i = 0; hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); stats = &PHAL_ATLANTIC->mbox.stats; data[i] = stats->uprc + stats->mprc + stats->bprc; data[++i] = stats->uprc; data[++i] = stats->mprc; data[++i] = stats->bprc; data[++i] = stats->erpt; data[++i] = stats->uptc + stats->mptc + stats->bptc; data[++i] = stats->uptc; data[++i] = stats->mptc; data[++i] = stats->bptc; data[++i] = stats->ubrc; data[++i] = stats->ubtc; data[++i] = stats->mbrc; data[++i] = stats->mbtc; data[++i] = stats->bbrc; data[++i] = stats->bbtc; data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); data[++i] = stats->dpc; if (p_count) *p_count = ++i; return 0; } static const u32 hw_atl_utils_hw_mac_regs[] = { 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, }; int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff) { unsigned int i = 0U; for (i = 0; i < aq_hw_caps->mac_regs_count; i++) regs_buff[i] = aq_hw_read_reg(self, hw_atl_utils_hw_mac_regs[i]); return 0; } int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) { *fw_version = aq_hw_read_reg(self, 0x18U); return 0; }
animalcreek/linux
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
C
gpl-2.0
13,329
/* $Id: gazel.c,v 2.19.2.4 2004/01/14 16:04:48 keil Exp $ * * low level stuff for Gazel isdn cards * * Author BeWan Systems * based on source code from Karsten Keil * Copyright by BeWan Systems * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/config.h> #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hscx.h" #include "isdnl1.h" #include "ipac.h" #include <linux/pci.h> extern const char *CardType[]; static const char *gazel_revision = "$Revision: 2.19.2.4 $"; #define R647 1 #define R685 2 #define R753 3 #define R742 4 #define PLX_CNTRL 0x50 /* registre de controle PLX */ #define RESET_GAZEL 0x4 #define RESET_9050 0x40000000 #define PLX_INCSR 0x4C /* registre d'IT du 9050 */ #define INT_ISAC_EN 0x8 /* 1 = enable IT isac */ #define INT_ISAC 0x20 /* 1 = IT isac en cours */ #define INT_HSCX_EN 0x1 /* 1 = enable IT hscx */ #define INT_HSCX 0x4 /* 1 = IT hscx en cours */ #define INT_PCI_EN 0x40 /* 1 = enable IT PCI */ #define INT_IPAC_EN 0x3 /* enable IT ipac */ #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) static inline u_char readreg(unsigned int adr, u_short off) { return bytein(adr + off); } static inline void writereg(unsigned int adr, u_short off, u_char data) { byteout(adr + off, data); } static inline void read_fifo(unsigned int adr, u_char * data, int size) { insb(adr, data, size); } static void write_fifo(unsigned int adr, u_char * data, int size) { outsb(adr, data, size); } static inline u_char readreg_ipac(unsigned int adr, u_short off) { register u_char ret; byteout(adr, off); ret = bytein(adr + 4); return ret; } static inline void writereg_ipac(unsigned int adr, u_short off, u_char data) { byteout(adr, off); byteout(adr + 4, data); } static inline void read_fifo_ipac(unsigned int adr, u_short off, u_char * data, int size) { byteout(adr, off); insb(adr + 4, data, size); } static void write_fifo_ipac(unsigned int adr, u_short off, u_char * data, int size) { byteout(adr, off); outsb(adr + 4, data, size); } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: return (readreg(cs->hw.gazel.isac, off2)); case R753: case R742: return (readreg_ipac(cs->hw.gazel.ipac, 0x80 + off2)); } return 0; } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: writereg(cs->hw.gazel.isac, off2, value); break; case R753: case R742: writereg_ipac(cs->hw.gazel.ipac, 0x80 + off2, value); break; } } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: read_fifo(cs->hw.gazel.isacfifo, data, size); break; case R753: case R742: read_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size); break; } } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: write_fifo(cs->hw.gazel.isacfifo, data, size); break; case R753: case R742: write_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size); break; } } static void ReadHSCXfifo(struct IsdnCardState *cs, int hscx, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: read_fifo(cs->hw.gazel.hscxfifo[hscx], data, size); break; case R753: case R742: read_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size); break; } } static void WriteHSCXfifo(struct IsdnCardState *cs, int hscx, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: write_fifo(cs->hw.gazel.hscxfifo[hscx], data, size); break; case R753: case R742: write_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size); break; } } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: return (readreg(cs->hw.gazel.hscx[hscx], off2)); case R753: case R742: return (readreg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2)); } return 0; } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: writereg(cs->hw.gazel.hscx[hscx], off2, value); break; case R753: case R742: writereg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2, value); break; } } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) ReadHSCX(cs, nr, reg) #define WRITEHSCX(cs, nr, reg, data) WriteHSCX(cs, nr, reg, data) #define READHSCXFIFO(cs, nr, ptr, cnt) ReadHSCXfifo(cs, nr, ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) WriteHSCXfifo(cs, nr, ptr, cnt) #include "hscx_irq.c" static irqreturn_t gazel_interrupt(int intno, void *dev_id, struct pt_regs *regs) { #define MAXCOUNT 5 struct IsdnCardState *cs = dev_id; u_char valisac, valhscx; int count = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); do { valhscx = ReadHSCX(cs, 1, HSCX_ISTA); if (valhscx) hscx_int_main(cs, valhscx); valisac = ReadISAC(cs, ISAC_ISTA); if (valisac) isac_interrupt(cs, valisac); count++; } while ((valhscx || valisac) && (count < MAXCOUNT)); WriteHSCX(cs, 0, HSCX_MASK, 0xFF); WriteHSCX(cs, 1, HSCX_MASK, 0xFF); WriteISAC(cs, ISAC_MASK, 0xFF); WriteISAC(cs, ISAC_MASK, 0x0); WriteHSCX(cs, 0, HSCX_MASK, 0x0); WriteHSCX(cs, 1, HSCX_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t gazel_interrupt_ipac(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u_char ista, val; int count = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); ista = ReadISAC(cs, IPAC_ISTA - 0x80); do { if (ista & 0x0f) { val = ReadHSCX(cs, 1, HSCX_ISTA); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) { hscx_int_main(cs, val); } } if (ista & 0x20) { val = 0xfe & ReadISAC(cs, ISAC_ISTA); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = ReadISAC(cs, IPAC_ISTA - 0x80); count++; } while ((ista & 0x3f) && (count < MAXCOUNT)); WriteISAC(cs, IPAC_MASK - 0x80, 0xFF); WriteISAC(cs, IPAC_MASK - 0x80, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_gazel(struct IsdnCardState *cs) { unsigned int i; switch (cs->subtyp) { case R647: for (i = 0x0000; i < 0xC000; i += 0x1000) release_region(i + cs->hw.gazel.hscx[0], 16); release_region(0xC000 + cs->hw.gazel.hscx[0], 1); break; case R685: release_region(cs->hw.gazel.hscx[0], 0x100); release_region(cs->hw.gazel.cfg_reg, 0x80); break; case R753: release_region(cs->hw.gazel.ipac, 0x8); release_region(cs->hw.gazel.cfg_reg, 0x80); break; case R742: release_region(cs->hw.gazel.ipac, 8); break; } } static int reset_gazel(struct IsdnCardState *cs) { unsigned long plxcntrl, addr = cs->hw.gazel.cfg_reg; switch (cs->subtyp) { case R647: writereg(addr, 0, 0); HZDELAY(10); writereg(addr, 0, 1); HZDELAY(2); break; case R685: plxcntrl = inl(addr + PLX_CNTRL); plxcntrl |= (RESET_9050 + RESET_GAZEL); outl(plxcntrl, addr + PLX_CNTRL); plxcntrl &= ~(RESET_9050 + RESET_GAZEL); HZDELAY(4); outl(plxcntrl, addr + PLX_CNTRL); HZDELAY(10); outb(INT_ISAC_EN + INT_HSCX_EN + INT_PCI_EN, addr + PLX_INCSR); break; case R753: plxcntrl = inl(addr + PLX_CNTRL); plxcntrl |= (RESET_9050 + RESET_GAZEL); outl(plxcntrl, addr + PLX_CNTRL); plxcntrl &= ~(RESET_9050 + RESET_GAZEL); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20); HZDELAY(4); outl(plxcntrl, addr + PLX_CNTRL); HZDELAY(10); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00); WriteISAC(cs, IPAC_ACFG - 0x80, 0xff); WriteISAC(cs, IPAC_AOE - 0x80, 0x0); WriteISAC(cs, IPAC_MASK - 0x80, 0xff); WriteISAC(cs, IPAC_CONF - 0x80, 0x1); outb(INT_IPAC_EN + INT_PCI_EN, addr + PLX_INCSR); WriteISAC(cs, IPAC_MASK - 0x80, 0xc0); break; case R742: WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20); HZDELAY(4); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00); WriteISAC(cs, IPAC_ACFG - 0x80, 0xff); WriteISAC(cs, IPAC_AOE - 0x80, 0x0); WriteISAC(cs, IPAC_MASK - 0x80, 0xff); WriteISAC(cs, IPAC_CONF - 0x80, 0x1); WriteISAC(cs, IPAC_MASK - 0x80, 0xc0); break; } return (0); } static int Gazel_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_gazel(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_gazel(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithscxisac(cs, 1); if ((cs->subtyp==R647)||(cs->subtyp==R685)) { int i; for (i=0;i<(2+MAX_WAITING_CALLS);i++) { cs->bcs[i].hw.hscx.tsaxr0 = 0x1f; cs->bcs[i].hw.hscx.tsaxr1 = 0x23; } } spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } static int reserve_regions(struct IsdnCard *card, struct IsdnCardState *cs) { unsigned int i, j, base = 0, adr = 0, len = 0; switch (cs->subtyp) { case R647: base = cs->hw.gazel.hscx[0]; if (!request_region(adr = (0xC000 + base), len = 1, "gazel")) goto error; for (i = 0x0000; i < 0xC000; i += 0x1000) { if (!request_region(adr = (i + base), len = 16, "gazel")) goto error; } if (i != 0xC000) { for (j = 0; j < i; j+= 0x1000) release_region(j + base, 16); release_region(0xC000 + base, 1); goto error; } break; case R685: if (!request_region(adr = cs->hw.gazel.hscx[0], len = 0x100, "gazel")) goto error; if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) { release_region(cs->hw.gazel.hscx[0],0x100); goto error; } break; case R753: if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel")) goto error; if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) { release_region(cs->hw.gazel.ipac, 8); goto error; } break; case R742: if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel")) goto error; break; } return 0; error: printk(KERN_WARNING "Gazel: %s io ports 0x%x-0x%x already in use\n", CardType[cs->typ], adr, adr + len); return 1; } static int __init setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs) { printk(KERN_INFO "Gazel: ISA PnP card automatic recognition\n"); // we got an irq parameter, assume it is an ISA card // R742 decodes address even in not started... // R647 returns FF if not present or not started // eventually needs improvment if (readreg_ipac(card->para[1], IPAC_ID) == 1) cs->subtyp = R742; else cs->subtyp = R647; setup_isac(cs); cs->hw.gazel.cfg_reg = card->para[1] + 0xC000; cs->hw.gazel.ipac = card->para[1]; cs->hw.gazel.isac = card->para[1] + 0x8000; cs->hw.gazel.hscx[0] = card->para[1]; cs->hw.gazel.hscx[1] = card->para[1] + 0x4000; cs->irq = card->para[0]; cs->hw.gazel.isacfifo = cs->hw.gazel.isac; cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0]; cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1]; switch (cs->subtyp) { case R647: printk(KERN_INFO "Gazel: Card ISA R647/R648 found\n"); cs->dc.isac.adf2 = 0x87; printk(KERN_INFO "Gazel: config irq:%d isac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg); printk(KERN_INFO "Gazel: hscx A:0x%X hscx B:0x%X\n", cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]); break; case R742: printk(KERN_INFO "Gazel: Card ISA R742 found\n"); test_and_set_bit(HW_IPAC, &cs->HW_Flags); printk(KERN_INFO "Gazel: config irq:%d ipac:0x%X\n", cs->irq, cs->hw.gazel.ipac); break; } return (0); } static struct pci_dev *dev_tel __initdata = NULL; static int __init setup_gazelpci(struct IsdnCardState *cs) { u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0; u_char pci_irq = 0, found; u_int nbseek, seekcard; printk(KERN_WARNING "Gazel: PCI card automatic recognition\n"); found = 0; seekcard = PCI_DEVICE_ID_PLX_R685; for (nbseek = 0; nbseek < 4; nbseek++) { if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, seekcard, dev_tel))) { if (pci_enable_device(dev_tel)) return 1; pci_irq = dev_tel->irq; pci_ioaddr0 = pci_resource_start(dev_tel, 1); pci_ioaddr1 = pci_resource_start(dev_tel, 2); found = 1; } if (found) break; else { switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: seekcard = PCI_DEVICE_ID_PLX_R753; break; case PCI_DEVICE_ID_PLX_R753: seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO; break; case PCI_DEVICE_ID_PLX_DJINN_ITOO: seekcard = PCI_DEVICE_ID_PLX_OLITEC; break; } } } if (!found) { printk(KERN_WARNING "Gazel: No PCI card found\n"); return (1); } if (!pci_irq) { printk(KERN_WARNING "Gazel: No IRQ for PCI card found\n"); return 1; } cs->hw.gazel.pciaddr[0] = pci_ioaddr0; cs->hw.gazel.pciaddr[1] = pci_ioaddr1; setup_isac(cs); pci_ioaddr1 &= 0xfffe; cs->hw.gazel.cfg_reg = pci_ioaddr0 & 0xfffe; cs->hw.gazel.ipac = pci_ioaddr1; cs->hw.gazel.isac = pci_ioaddr1 + 0x80; cs->hw.gazel.hscx[0] = pci_ioaddr1; cs->hw.gazel.hscx[1] = pci_ioaddr1 + 0x40; cs->hw.gazel.isacfifo = cs->hw.gazel.isac; cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0]; cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1]; cs->irq = pci_irq; cs->irq_flags |= SA_SHIRQ; switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: printk(KERN_INFO "Gazel: Card PCI R685 found\n"); cs->subtyp = R685; cs->dc.isac.adf2 = 0x87; printk(KERN_INFO "Gazel: config irq:%d isac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg); printk(KERN_INFO "Gazel: hscx A:0x%X hscx B:0x%X\n", cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]); break; case PCI_DEVICE_ID_PLX_R753: case PCI_DEVICE_ID_PLX_DJINN_ITOO: case PCI_DEVICE_ID_PLX_OLITEC: printk(KERN_INFO "Gazel: Card PCI R753 found\n"); cs->subtyp = R753; test_and_set_bit(HW_IPAC, &cs->HW_Flags); printk(KERN_INFO "Gazel: config irq:%d ipac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.ipac, cs->hw.gazel.cfg_reg); break; } return (0); } int __init setup_gazel(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; u_char val; strcpy(tmp, gazel_revision); printk(KERN_INFO "Gazel: Driver Revision %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_GAZEL) return (0); if (card->para[0]) { if (setup_gazelisa(card, cs)) return (0); } else { #ifdef CONFIG_PCI if (setup_gazelpci(cs)) return (0); #else printk(KERN_WARNING "Gazel: Card PCI requested and NO_PCI_BIOS, unable to config\n"); return (0); #endif /* CONFIG_PCI */ } if (reserve_regions(card, cs)) { return (0); } if (reset_gazel(cs)) { printk(KERN_WARNING "Gazel: wrong IRQ\n"); release_io_gazel(cs); return (0); } cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Gazel_card_msg; switch (cs->subtyp) { case R647: case R685: cs->irq_func = &gazel_interrupt; ISACVersion(cs, "Gazel:"); if (HscxVersion(cs, "Gazel:")) { printk(KERN_WARNING "Gazel: wrong HSCX versions check IO address\n"); release_io_gazel(cs); return (0); } break; case R742: case R753: cs->irq_func = &gazel_interrupt_ipac; val = ReadISAC(cs, IPAC_ID - 0x80); printk(KERN_INFO "Gazel: IPAC version %x\n", val); break; } return (1); }
zrafa/linuxkernel
linux-2.6.17.new/drivers/isdn/hisax/gazel.c
C
gpl-2.0
16,305
/* * arch/x86/kernel/nmi-selftest.c * * Testsuite for NMI: IPIs * * Started by Don Zickus: * (using lib/locking-selftest.c as a guide) * * Copyright (C) 2011 Red Hat, Inc., Don Zickus <dzickus@redhat.com> */ #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/percpu.h> #include <asm/apic.h> #include <asm/nmi.h> #define SUCCESS 0 #define FAILURE 1 #define TIMEOUT 2 static int __initdata nmi_fail; /* check to see if NMI IPIs work on this machine */ static DECLARE_BITMAP(nmi_ipi_mask, NR_CPUS) __initdata; static int __initdata testcase_total; static int __initdata testcase_successes; static int __initdata expected_testcase_failures; static int __initdata unexpected_testcase_failures; static int __initdata unexpected_testcase_unknowns; static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) { unexpected_testcase_unknowns++; return NMI_HANDLED; } static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); } static void __init cleanup_nmi_testsuite(void) { unregister_nmi_handler(NMI_UNKNOWN, "nmi_selftest_unk"); } static int __init test_nmi_ipi_callback(unsigned int val, struct pt_regs *regs) { int cpu = raw_smp_processor_id(); if (cpumask_test_and_clear_cpu(cpu, to_cpumask(nmi_ipi_mask))) return NMI_HANDLED; return NMI_DONE; } static void __init test_nmi_ipi(struct cpumask *mask) { unsigned long timeout; if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, NMI_FLAG_FIRST, "nmi_selftest")) { nmi_fail = FAILURE; return; } /* sync above data before sending NMI */ wmb(); apic->send_IPI_mask(mask, NMI_VECTOR); /* Don't wait longer than a second */ timeout = USEC_PER_SEC; while (!cpumask_empty(mask) && timeout--) udelay(1); /* What happens if we timeout, do we still unregister?? */ unregister_nmi_handler(NMI_LOCAL, "nmi_selftest"); if (!timeout) nmi_fail = TIMEOUT; return; } static void __init remote_ipi(void) { cpumask_copy(to_cpumask(nmi_ipi_mask), cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), to_cpumask(nmi_ipi_mask)); if (!cpumask_empty(to_cpumask(nmi_ipi_mask))) test_nmi_ipi(to_cpumask(nmi_ipi_mask)); } static void __init local_ipi(void) { cpumask_clear(to_cpumask(nmi_ipi_mask)); cpumask_set_cpu(smp_processor_id(), to_cpumask(nmi_ipi_mask)); test_nmi_ipi(to_cpumask(nmi_ipi_mask)); } static void __init reset_nmi(void) { nmi_fail = 0; } static void __init dotest(void (*testcase_fn)(void), int expected) { testcase_fn(); /* * Filter out expected failures: */ if (nmi_fail != expected) { unexpected_testcase_failures++; if (nmi_fail == FAILURE) printk(KERN_CONT "FAILED |"); else if (nmi_fail == TIMEOUT) printk(KERN_CONT "TIMEOUT|"); else printk(KERN_CONT "ERROR |"); dump_stack(); } else { testcase_successes++; printk(KERN_CONT " ok |"); } testcase_total++; reset_nmi(); } static inline void __init print_testname(const char *testname) { printk("%12s:", testname); } void __init nmi_selftest(void) { init_nmi_testsuite(); /* * Run the testsuite: */ printk("----------------\n"); printk("| NMI testsuite:\n"); printk("--------------------\n"); print_testname("remote IPI"); dotest(remote_ipi, SUCCESS); printk(KERN_CONT "\n"); print_testname("local IPI"); dotest(local_ipi, SUCCESS); printk(KERN_CONT "\n"); cleanup_nmi_testsuite(); if (unexpected_testcase_failures) { printk("--------------------\n"); printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", unexpected_testcase_failures, testcase_total); printk("-----------------------------------------------------------------\n"); } else if (expected_testcase_failures && testcase_successes) { printk("--------------------\n"); printk("%3d out of %3d testcases failed, as expected. |\n", expected_testcase_failures, testcase_total); printk("----------------------------------------------------\n"); } else if (expected_testcase_failures && !testcase_successes) { printk("--------------------\n"); printk("All %3d testcases failed, as expected. |\n", expected_testcase_failures); printk("----------------------------------------\n"); } else { printk("--------------------\n"); printk("Good, all %3d testcases passed! |\n", testcase_successes); printk("---------------------------------\n"); } }
danielgpalmer/linux-picosam9g45
arch/x86/kernel/nmi_selftest.c
C
gpl-2.0
4,533
/* * drivers/gpu/drm/omapdrm/omap_plane.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob.clark@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "drm_flip_work.h" #include "omap_drv.h" #include "omap_dmm_tiler.h" /* some hackery because omapdss has an 'enum omap_plane' (which would be * better named omap_plane_id).. and compiler seems unhappy about having * both a 'struct omap_plane' and 'enum omap_plane' */ #define omap_plane _omap_plane /* * plane funcs */ struct callback { void (*fxn)(void *); void *arg; }; #define to_omap_plane(x) container_of(x, struct omap_plane, base) struct omap_plane { struct drm_plane base; int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */ const char *name; struct omap_overlay_info info; struct omap_drm_apply apply; /* position/orientation of scanout within the fb: */ struct omap_drm_window win; bool enabled; /* last fb that we pinned: */ struct drm_framebuffer *pinned_fb; uint32_t nformats; uint32_t formats[32]; struct omap_drm_irq error_irq; /* for deferring bo unpin's until next post_apply(): */ struct drm_flip_work unpin_work; // XXX maybe get rid of this and handle vblank in crtc too? struct callback apply_done_cb; }; static void unpin_worker(struct drm_flip_work *work, void *val) { struct omap_plane *omap_plane = container_of(work, struct omap_plane, unpin_work); struct drm_device *dev = omap_plane->base.dev; /* * omap_framebuffer_pin/unpin are always called from priv->wq, * so there's no need for locking here. */ omap_framebuffer_unpin(val); mutex_lock(&dev->mode_config.mutex); drm_framebuffer_unreference(val); mutex_unlock(&dev->mode_config.mutex); } /* update which fb (if any) is pinned for scanout */ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) { struct omap_plane *omap_plane = to_omap_plane(plane); struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; if (pinned_fb != fb) { int ret = 0; DBG("%p -> %p", pinned_fb, fb); if (fb) { drm_framebuffer_reference(fb); ret = omap_framebuffer_pin(fb); } if (pinned_fb) drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb); if (ret) { dev_err(plane->dev->dev, "could not swap %p -> %p\n", omap_plane->pinned_fb, fb); drm_framebuffer_unreference(fb); omap_plane->pinned_fb = NULL; return ret; } omap_plane->pinned_fb = fb; } return 0; } static void omap_plane_pre_apply(struct omap_drm_apply *apply) { struct omap_plane *omap_plane = container_of(apply, struct omap_plane, apply); struct omap_drm_window *win = &omap_plane->win; struct drm_plane *plane = &omap_plane->base; struct drm_device *dev = plane->dev; struct omap_overlay_info *info = &omap_plane->info; struct drm_crtc *crtc = plane->crtc; enum omap_channel channel; bool enabled = omap_plane->enabled && crtc; bool ilace, replication; int ret; DBG("%s, enabled=%d", omap_plane->name, enabled); /* if fb has changed, pin new fb: */ update_pin(plane, enabled ? plane->fb : NULL); if (!enabled) { dispc_ovl_enable(omap_plane->id, false); return; } channel = omap_crtc_channel(crtc); /* update scanout: */ omap_framebuffer_update_scanout(plane->fb, win, info); DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width, info->out_height, info->screen_width); DBG("%d,%d %pad %pad", info->pos_x, info->pos_y, &info->paddr, &info->p_uv_addr); /* TODO: */ ilace = false; replication = false; dispc_ovl_set_channel_out(omap_plane->id, channel); /* and finally, update omapdss: */ ret = dispc_ovl_setup(omap_plane->id, info, replication, omap_crtc_timings(crtc), false); if (ret) { dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret); return; } dispc_ovl_enable(omap_plane->id, true); } static void omap_plane_post_apply(struct omap_drm_apply *apply) { struct omap_plane *omap_plane = container_of(apply, struct omap_plane, apply); struct drm_plane *plane = &omap_plane->base; struct omap_drm_private *priv = plane->dev->dev_private; struct omap_overlay_info *info = &omap_plane->info; struct callback cb; cb = omap_plane->apply_done_cb; omap_plane->apply_done_cb.fxn = NULL; drm_flip_work_commit(&omap_plane->unpin_work, priv->wq); if (cb.fxn) cb.fxn(cb.arg); if (omap_plane->enabled) { omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y, info->out_width, info->out_height); } } static int apply(struct drm_plane *plane) { if (plane->crtc) { struct omap_plane *omap_plane = to_omap_plane(plane); return omap_crtc_apply(plane->crtc, &omap_plane->apply); } return 0; } int omap_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, void (*fxn)(void *), void *arg) { struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_drm_window *win = &omap_plane->win; int i; /* * Check whether this plane supports the fb pixel format. * I don't think this should really be needed, but it looks like the * drm core only checks the format for planes, not for the crtc. So * when setting the format for crtc, without this check we would * get an error later when trying to program the color format into the * HW. */ for (i = 0; i < plane->format_count; i++) if (fb->pixel_format == plane->format_types[i]) break; if (i == plane->format_count) { DBG("Invalid pixel format %s", drm_get_format_name(fb->pixel_format)); return -EINVAL; } win->crtc_x = crtc_x; win->crtc_y = crtc_y; win->crtc_w = crtc_w; win->crtc_h = crtc_h; /* src values are in Q16 fixed point, convert to integer: */ win->src_x = src_x >> 16; win->src_y = src_y >> 16; win->src_w = src_w >> 16; win->src_h = src_h >> 16; if (fxn) { /* omap_crtc should ensure that a new page flip * isn't permitted while there is one pending: */ BUG_ON(omap_plane->apply_done_cb.fxn); omap_plane->apply_done_cb.fxn = fxn; omap_plane->apply_done_cb.arg = arg; } plane->fb = fb; plane->crtc = crtc; return apply(plane); } static int omap_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct omap_plane *omap_plane = to_omap_plane(plane); omap_plane->enabled = true; if (plane->fb) drm_framebuffer_unreference(plane->fb); drm_framebuffer_reference(fb); /* omap_plane_mode_set() takes adjusted src */ switch (omap_plane->win.rotation & 0xf) { case BIT(DRM_ROTATE_90): case BIT(DRM_ROTATE_270): swap(src_w, src_h); break; } return omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, NULL, NULL); } static int omap_plane_disable(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); omap_plane->win.rotation = BIT(DRM_ROTATE_0); return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF); } static void omap_plane_destroy(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); DBG("%s", omap_plane->name); omap_irq_unregister(plane->dev, &omap_plane->error_irq); omap_plane_disable(plane); drm_plane_cleanup(plane); drm_flip_work_cleanup(&omap_plane->unpin_work); kfree(omap_plane); } int omap_plane_dpms(struct drm_plane *plane, int mode) { struct omap_plane *omap_plane = to_omap_plane(plane); bool enabled = (mode == DRM_MODE_DPMS_ON); int ret = 0; if (enabled != omap_plane->enabled) { omap_plane->enabled = enabled; ret = apply(plane); } return ret; } /* helper to install properties which are common to planes and crtcs */ void omap_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { struct drm_device *dev = plane->dev; struct omap_drm_private *priv = dev->dev_private; struct drm_property *prop; if (priv->has_dmm) { prop = priv->rotation_prop; if (!prop) { const struct drm_prop_enum_list props[] = { { DRM_ROTATE_0, "rotate-0" }, { DRM_ROTATE_90, "rotate-90" }, { DRM_ROTATE_180, "rotate-180" }, { DRM_ROTATE_270, "rotate-270" }, { DRM_REFLECT_X, "reflect-x" }, { DRM_REFLECT_Y, "reflect-y" }, }; prop = drm_property_create_bitmask(dev, 0, "rotation", props, ARRAY_SIZE(props)); if (prop == NULL) return; priv->rotation_prop = prop; } drm_object_attach_property(obj, prop, 0); } prop = priv->zorder_prop; if (!prop) { prop = drm_property_create_range(dev, 0, "zorder", 0, 3); if (prop == NULL) return; priv->zorder_prop = prop; } drm_object_attach_property(obj, prop, 0); } int omap_plane_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val) { struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_drm_private *priv = plane->dev->dev_private; int ret = -EINVAL; if (property == priv->rotation_prop) { DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val); omap_plane->win.rotation = val; ret = apply(plane); } else if (property == priv->zorder_prop) { DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val); omap_plane->info.zorder = val; ret = apply(plane); } return ret; } static const struct drm_plane_funcs omap_plane_funcs = { .update_plane = omap_plane_update, .disable_plane = omap_plane_disable, .destroy = omap_plane_destroy, .set_property = omap_plane_set_property, }; static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) { struct omap_plane *omap_plane = container_of(irq, struct omap_plane, error_irq); DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name, irqstatus); } static const char *plane_names[] = { [OMAP_DSS_GFX] = "gfx", [OMAP_DSS_VIDEO1] = "vid1", [OMAP_DSS_VIDEO2] = "vid2", [OMAP_DSS_VIDEO3] = "vid3", }; static const uint32_t error_irqs[] = { [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, int id, bool private_plane) { struct omap_drm_private *priv = dev->dev_private; struct drm_plane *plane = NULL; struct omap_plane *omap_plane; struct omap_overlay_info *info; int ret; DBG("%s: priv=%d", plane_names[id], private_plane); omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); if (!omap_plane) goto fail; ret = drm_flip_work_init(&omap_plane->unpin_work, 16, "unpin", unpin_worker); if (ret) { dev_err(dev->dev, "could not allocate unpin FIFO\n"); goto fail; } omap_plane->nformats = omap_framebuffer_get_formats( omap_plane->formats, ARRAY_SIZE(omap_plane->formats), dss_feat_get_supported_color_modes(id)); omap_plane->id = id; omap_plane->name = plane_names[id]; plane = &omap_plane->base; omap_plane->apply.pre_apply = omap_plane_pre_apply; omap_plane->apply.post_apply = omap_plane_post_apply; omap_plane->error_irq.irqmask = error_irqs[id]; omap_plane->error_irq.irq = omap_plane_error_irq; omap_irq_register(dev, &omap_plane->error_irq); drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs, omap_plane->formats, omap_plane->nformats, private_plane); omap_plane_install_properties(plane, &plane->base); /* get our starting configuration, set defaults for parameters * we don't currently use, etc: */ info = &omap_plane->info; info->rotation_type = OMAP_DSS_ROT_DMA; info->rotation = OMAP_DSS_ROT_0; info->global_alpha = 0xff; info->mirror = 0; /* Set defaults depending on whether we are a CRTC or overlay * layer. * TODO add ioctl to give userspace an API to change this.. this * will come in a subsequent patch. */ if (private_plane) omap_plane->info.zorder = 0; else omap_plane->info.zorder = id; return plane; fail: if (plane) omap_plane_destroy(plane); return NULL; }
ninjablocks/kernel-VAR-SOM-AMxx
drivers/gpu/drm/omapdrm/omap_plane.c
C
gpl-2.0
12,702
#include "git-compat-util.h" #include "cache.h" #include "branch.h" #include "refs.h" #include "remote.h" #include "commit.h" #include "worktree.h" struct tracking { struct refspec spec; char *src; const char *remote; int matches; }; static int find_tracked_branch(struct remote *remote, void *priv) { struct tracking *tracking = priv; if (!remote_find_tracking(remote, &tracking->spec)) { if (++tracking->matches == 1) { tracking->src = tracking->spec.src; tracking->remote = remote->name; } else { free(tracking->spec.src); if (tracking->src) { free(tracking->src); tracking->src = NULL; } } tracking->spec.src = NULL; } return 0; } static int should_setup_rebase(const char *origin) { switch (autorebase) { case AUTOREBASE_NEVER: return 0; case AUTOREBASE_LOCAL: return origin == NULL; case AUTOREBASE_REMOTE: return origin != NULL; case AUTOREBASE_ALWAYS: return 1; } return 0; } void install_branch_config(int flag, const char *local, const char *origin, const char *remote) { const char *shortname = NULL; struct strbuf key = STRBUF_INIT; int rebasing = should_setup_rebase(origin); if (skip_prefix(remote, "refs/heads/", &shortname) && !strcmp(local, shortname) && !origin) { warning(_("Not setting branch %s as its own upstream."), local); return; } strbuf_addf(&key, "branch.%s.remote", local); git_config_set(key.buf, origin ? origin : "."); strbuf_reset(&key); strbuf_addf(&key, "branch.%s.merge", local); git_config_set(key.buf, remote); if (rebasing) { strbuf_reset(&key); strbuf_addf(&key, "branch.%s.rebase", local); git_config_set(key.buf, "true"); } strbuf_release(&key); if (flag & BRANCH_CONFIG_VERBOSE) { if (shortname) { if (origin) printf_ln(rebasing ? _("Branch %s set up to track remote branch %s from %s by rebasing.") : _("Branch %s set up to track remote branch %s from %s."), local, shortname, origin); else printf_ln(rebasing ? _("Branch %s set up to track local branch %s by rebasing.") : _("Branch %s set up to track local branch %s."), local, shortname); } else { if (origin) printf_ln(rebasing ? _("Branch %s set up to track remote ref %s by rebasing.") : _("Branch %s set up to track remote ref %s."), local, remote); else printf_ln(rebasing ? _("Branch %s set up to track local ref %s by rebasing.") : _("Branch %s set up to track local ref %s."), local, remote); } } } /* * This is called when new_ref is branched off of orig_ref, and tries * to infer the settings for branch.<new_ref>.{remote,merge} from the * config. */ static int setup_tracking(const char *new_ref, const char *orig_ref, enum branch_track track, int quiet) { struct tracking tracking; int config_flags = quiet ? 0 : BRANCH_CONFIG_VERBOSE; memset(&tracking, 0, sizeof(tracking)); tracking.spec.dst = (char *)orig_ref; if (for_each_remote(find_tracked_branch, &tracking)) return 1; if (!tracking.matches) switch (track) { case BRANCH_TRACK_ALWAYS: case BRANCH_TRACK_EXPLICIT: case BRANCH_TRACK_OVERRIDE: break; default: return 1; } if (tracking.matches > 1) return error(_("Not tracking: ambiguous information for ref %s"), orig_ref); install_branch_config(config_flags, new_ref, tracking.remote, tracking.src ? tracking.src : orig_ref); free(tracking.src); return 0; } int read_branch_desc(struct strbuf *buf, const char *branch_name) { char *v = NULL; struct strbuf name = STRBUF_INIT; strbuf_addf(&name, "branch.%s.description", branch_name); if (git_config_get_string(name.buf, &v)) { strbuf_release(&name); return -1; } strbuf_addstr(buf, v); free(v); strbuf_release(&name); return 0; } int validate_new_branchname(const char *name, struct strbuf *ref, int force, int attr_only) { if (strbuf_check_branch_ref(ref, name)) die(_("'%s' is not a valid branch name."), name); if (!ref_exists(ref->buf)) return 0; else if (!force && !attr_only) die(_("A branch named '%s' already exists."), ref->buf + strlen("refs/heads/")); if (!attr_only) { const char *head; unsigned char sha1[20]; head = resolve_ref_unsafe("HEAD", 0, sha1, NULL); if (!is_bare_repository() && head && !strcmp(head, ref->buf)) die(_("Cannot force update the current branch.")); } return 1; } static int check_tracking_branch(struct remote *remote, void *cb_data) { char *tracking_branch = cb_data; struct refspec query; memset(&query, 0, sizeof(struct refspec)); query.dst = tracking_branch; return !remote_find_tracking(remote, &query); } static int validate_remote_tracking_branch(char *ref) { return !for_each_remote(check_tracking_branch, ref); } static const char upstream_not_branch[] = N_("Cannot setup tracking information; starting point '%s' is not a branch."); static const char upstream_missing[] = N_("the requested upstream branch '%s' does not exist"); static const char upstream_advice[] = N_("\n" "If you are planning on basing your work on an upstream\n" "branch that already exists at the remote, you may need to\n" "run \"git fetch\" to retrieve it.\n" "\n" "If you are planning to push out a new local branch that\n" "will track its remote counterpart, you may want to use\n" "\"git push -u\" to set the upstream config as you push."); void create_branch(const char *head, const char *name, const char *start_name, int force, int reflog, int clobber_head, int quiet, enum branch_track track) { struct commit *commit; unsigned char sha1[20]; char *real_ref, msg[PATH_MAX + 20]; struct strbuf ref = STRBUF_INIT; int forcing = 0; int dont_change_ref = 0; int explicit_tracking = 0; if (track == BRANCH_TRACK_EXPLICIT || track == BRANCH_TRACK_OVERRIDE) explicit_tracking = 1; if (validate_new_branchname(name, &ref, force, track == BRANCH_TRACK_OVERRIDE || clobber_head)) { if (!force) dont_change_ref = 1; else forcing = 1; } real_ref = NULL; if (get_sha1(start_name, sha1)) { if (explicit_tracking) { if (advice_set_upstream_failure) { error(_(upstream_missing), start_name); advise(_(upstream_advice)); exit(1); } die(_(upstream_missing), start_name); } die(_("Not a valid object name: '%s'."), start_name); } switch (dwim_ref(start_name, strlen(start_name), sha1, &real_ref)) { case 0: /* Not branching from any existing branch */ if (explicit_tracking) die(_(upstream_not_branch), start_name); break; case 1: /* Unique completion -- good, only if it is a real branch */ if (!starts_with(real_ref, "refs/heads/") && validate_remote_tracking_branch(real_ref)) { if (explicit_tracking) die(_(upstream_not_branch), start_name); else real_ref = NULL; } break; default: die(_("Ambiguous object name: '%s'."), start_name); break; } if ((commit = lookup_commit_reference(sha1)) == NULL) die(_("Not a valid branch point: '%s'."), start_name); hashcpy(sha1, commit->object.oid.hash); if (forcing) snprintf(msg, sizeof msg, "branch: Reset to %s", start_name); else if (!dont_change_ref) snprintf(msg, sizeof msg, "branch: Created from %s", start_name); if (reflog) log_all_ref_updates = 1; if (!dont_change_ref) { struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; transaction = ref_transaction_begin(&err); if (!transaction || ref_transaction_update(transaction, ref.buf, sha1, forcing ? NULL : null_sha1, 0, msg, &err) || ref_transaction_commit(transaction, &err)) die("%s", err.buf); ref_transaction_free(transaction); strbuf_release(&err); } if (real_ref && track) setup_tracking(ref.buf + 11, real_ref, track, quiet); strbuf_release(&ref); free(real_ref); } void remove_branch_state(void) { unlink(git_path_cherry_pick_head()); unlink(git_path_revert_head()); unlink(git_path_merge_head()); unlink(git_path_merge_rr()); unlink(git_path_merge_msg()); unlink(git_path_merge_mode()); unlink(git_path_squash_msg()); } void die_if_checked_out(const char *branch) { char *existing; existing = find_shared_symref("HEAD", branch); if (existing) { skip_prefix(branch, "refs/heads/", &branch); die(_("'%s' is already checked out at '%s'"), branch, existing); } }
formorer/pkg-cgit
git/branch.c
C
gpl-2.0
8,331
/*--------------------------------------------------------------------*/ /*--- Signal-related libc stuff. m_libcsignal.c ---*/ /*--------------------------------------------------------------------*/ /* This file is part of Valgrind, a dynamic binary instrumentation framework. Copyright (C) 2000-2017 Julian Seward jseward@acm.org This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. The GNU General Public License is contained in the file COPYING. */ #include "pub_core_basics.h" #include "pub_core_debuglog.h" #include "pub_core_vki.h" #include "pub_core_vkiscnums.h" #include "pub_core_libcbase.h" #include "pub_core_libcassert.h" #include "pub_core_syscall.h" #include "pub_core_libcsignal.h" /* self */ #if !defined(VGO_solaris) # define _VKI_MAXSIG (_VKI_NSIG - 1) #endif STATIC_ASSERT((_VKI_MAXSIG % _VKI_NSIG_BPW) != 0); /* IMPORTANT: on Darwin it is essential to use the _nocancel versions of syscalls rather than the vanilla version, if a _nocancel version is available. See docs/internals/Darwin-notes.txt for the reason why. */ /* sigemptyset, sigfullset, sigaddset and sigdelset return 0 on success and -1 on error. */ /* In the sigset routines below, be aware that _VKI_NSIG_BPW can be either 32 or 64, and hence the sig[] words can either be 32- or 64-bits. And which they are it doesn't necessarily follow from the host word size. */ /* Functions VG_(isemptysigset) and VG_(isfullsigset) check only bits that represent valid signals (i.e. signals <= _VKI_MAXSIG). The same applies for the comparison in VG_(iseqsigset). This is important because when a signal set is received from an operating system then bits which represent signals > _VKI_MAXSIG can have unexpected values for Valgrind. This is mainly specific to the Solaris kernel which clears these bits. */ Int VG_(sigfillset)( vki_sigset_t* set ) { Int i; if (set == NULL) return -1; for (i = 0; i < _VKI_NSIG_WORDS; i++) set->sig[i] = ~0; return 0; } Int VG_(sigemptyset)( vki_sigset_t* set ) { Int i; if (set == NULL) return -1; for (i = 0; i < _VKI_NSIG_WORDS; i++) set->sig[i] = 0; return 0; } Bool VG_(isemptysigset)( const vki_sigset_t* set ) { Int i; vg_assert(set != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) { if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) { /* Full word check. */ if (set->sig[i] != 0) return False; } else { /* Partial word check. */ ULong mask = ((ULong)1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1; if ((set->sig[i] & mask) != 0) return False; break; } } return True; } Bool VG_(isfullsigset)( const vki_sigset_t* set ) { Int i; vg_assert(set != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) { if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) { /* Full word check. */ if (set->sig[i] != ~0) return False; } else { /* Partial word check. */ ULong mask = ((ULong)1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1; if ((set->sig[i] & mask) != mask) return False; break; } } return True; } Bool VG_(iseqsigset)( const vki_sigset_t* set1, const vki_sigset_t* set2 ) { Int i; vg_assert(set1 != NULL && set2 != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) { if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) { /* Full word comparison. */ if (set1->sig[i] != set2->sig[i]) return False; } else { /* Partial word comparison. */ ULong mask = ((ULong)1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1; if ((set1->sig[i] & mask) != (set2->sig[i] & mask)) return False; break; } } return True; } Int VG_(sigaddset)( vki_sigset_t* set, Int signum ) { if (set == NULL) return -1; if (signum < 1 || signum > _VKI_NSIG) return -1; signum--; set->sig[signum / _VKI_NSIG_BPW] |= (1ULL << (signum % _VKI_NSIG_BPW)); return 0; } Int VG_(sigdelset)( vki_sigset_t* set, Int signum ) { if (set == NULL) return -1; if (signum < 1 || signum > _VKI_NSIG) return -1; signum--; set->sig[signum / _VKI_NSIG_BPW] &= ~(1ULL << (signum % _VKI_NSIG_BPW)); return 0; } Int VG_(sigismember) ( const vki_sigset_t* set, Int signum ) { if (set == NULL) return 0; if (signum < 1 || signum > _VKI_NSIG) return 0; signum--; if (1 & ((set->sig[signum / _VKI_NSIG_BPW]) >> (signum % _VKI_NSIG_BPW))) return 1; else return 0; } /* Add all signals in src to dst. */ void VG_(sigaddset_from_set)( vki_sigset_t* dst, const vki_sigset_t* src ) { Int i; vg_assert(dst != NULL && src != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) dst->sig[i] |= src->sig[i]; } /* Remove all signals in src from dst. */ void VG_(sigdelset_from_set)( vki_sigset_t* dst, const vki_sigset_t* src ) { Int i; vg_assert(dst != NULL && src != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) dst->sig[i] &= ~(src->sig[i]); } /* dst = dst `intersect` src. */ void VG_(sigintersectset)( vki_sigset_t* dst, const vki_sigset_t* src ) { Int i; vg_assert(dst != NULL && src != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) dst->sig[i] &= src->sig[i]; } /* dst = ~src */ void VG_(sigcomplementset)( vki_sigset_t* dst, const vki_sigset_t* src ) { Int i; vg_assert(dst != NULL && src != NULL); for (i = 0; i < _VKI_NSIG_WORDS; i++) dst->sig[i] = ~ src->sig[i]; } /* The functions sigaction, sigprocmask, sigpending and sigsuspend return 0 on success and -1 on error. */ Int VG_(sigprocmask)( Int how, const vki_sigset_t* set, vki_sigset_t* oldset) { # if defined(VGO_linux) || defined(VGO_solaris) # if defined(__NR_rt_sigprocmask) SysRes res = VG_(do_syscall4)(__NR_rt_sigprocmask, how, (UWord)set, (UWord)oldset, _VKI_NSIG_WORDS * sizeof(UWord)); # else SysRes res = VG_(do_syscall3)(__NR_sigprocmask, how, (UWord)set, (UWord)oldset); # endif # elif defined(VGO_darwin) /* On Darwin, __NR_sigprocmask appears to affect the entire process, not just this thread. Hence need to use __NR___pthread_sigmask instead. */ SysRes res = VG_(do_syscall3)(__NR___pthread_sigmask, how, (UWord)set, (UWord)oldset); # else # error "Unknown OS" # endif return sr_isError(res) ? -1 : 0; } #if defined(VGO_darwin) /* A helper function for sigaction on Darwin. */ static void darwin_signal_demux(void* a1, UWord a2, UWord a3, void* a4, void* a5) { VG_(debugLog)(2, "libcsignal", "PRE demux sig, a2 = %lu, signo = %lu\n", a2, a3); if (a2 == 1) ((void(*)(int))a1) (a3); else ((void(*)(int,void*,void*))a1) (a3,a4,a5); VG_(debugLog)(2, "libcsignal", "POST demux sig, a2 = %lu, signo = %lu\n", a2, a3); VG_(do_syscall2)(__NR_sigreturn, (UWord)a5, 0x1E); /* NOTREACHED */ __asm__ __volatile__("ud2"); } #endif Int VG_(sigaction) ( Int signum, const vki_sigaction_toK_t* act, vki_sigaction_fromK_t* oldact) { # if defined(VGO_linux) /* Normal case: vki_sigaction_toK_t and vki_sigaction_fromK_t are identical types. */ SysRes res = VG_(do_syscall4)(__NR_rt_sigaction, signum, (UWord)act, (UWord)oldact, _VKI_NSIG_WORDS * sizeof(UWord)); return sr_isError(res) ? -1 : 0; # elif defined(VGO_darwin) /* If we're passing a new action to the kernel, make a copy of the new action, install our own sa_tramp field in it, and ignore whatever we were provided with. This is OK because all the sigaction requests come from m_signals, and are not directly what the client program requested, so there is no chance that we will inadvertently ignore the sa_tramp field requested by the client. (In fact m_signals does ignore it when building signal frames for the client, but that's a completely different matter). If we're receiving an old action from the kernel, be very paranoid and make sure the kernel doesn't trash bits of memory that we don't expect it to. */ SysRes res; vki_sigaction_toK_t actCopy; struct { ULong before[2]; vki_sigaction_fromK_t oa; ULong after[2]; } oldactCopy; vki_sigaction_toK_t* real_act; vki_sigaction_fromK_t* real_oldact; real_act = act ? &actCopy : NULL; real_oldact = oldact ? &oldactCopy.oa : NULL; VG_(memset)(&oldactCopy, 0x55, sizeof(oldactCopy)); if (real_act) { *real_act = *act; real_act->sa_tramp = (void*)&darwin_signal_demux; } res = VG_(do_syscall3)(__NR_sigaction, signum, (UWord)real_act, (UWord)real_oldact); if (real_oldact) { vg_assert(oldactCopy.before[0] == 0x5555555555555555ULL); vg_assert(oldactCopy.before[1] == 0x5555555555555555ULL); vg_assert(oldactCopy.after[0] == 0x5555555555555555ULL); vg_assert(oldactCopy.after[1] == 0x5555555555555555ULL); *oldact = *real_oldact; } return sr_isError(res) ? -1 : 0; # elif defined(VGO_solaris) /* vki_sigaction_toK_t and vki_sigaction_fromK_t are identical types. */ SysRes res = VG_(do_syscall3)(__NR_sigaction, signum, (UWord)act, (UWord)oldact); return sr_isError(res) ? -1 : 0; # else # error "Unsupported OS" # endif } /* See explanation in pub_core_libcsignal.h. */ void VG_(convert_sigaction_fromK_to_toK)( const vki_sigaction_fromK_t* fromK, /*OUT*/vki_sigaction_toK_t* toK ) { # if defined(VGO_linux) || defined(VGO_solaris) *toK = *fromK; # elif defined(VGO_darwin) toK->ksa_handler = fromK->ksa_handler; toK->sa_tramp = NULL; /* the cause of all the difficulty */ toK->sa_mask = fromK->sa_mask; toK->sa_flags = fromK->sa_flags; # else # error "Unsupported OS" # endif } Int VG_(kill)( Int pid, Int signo ) { # if defined(VGO_linux) || defined(VGO_solaris) SysRes res = VG_(do_syscall2)(__NR_kill, pid, signo); # elif defined(VGO_darwin) SysRes res = VG_(do_syscall3)(__NR_kill, pid, signo, 1/*posix-compliant*/); # else # error "Unsupported OS" # endif return sr_isError(res) ? -1 : 0; } Int VG_(tkill)( Int lwpid, Int signo ) { # if defined(__NR_tkill) SysRes res = VG_(mk_SysRes_Error)(VKI_ENOSYS); res = VG_(do_syscall2)(__NR_tkill, lwpid, signo); if (sr_isError(res) && sr_Err(res) == VKI_ENOSYS) res = VG_(do_syscall2)(__NR_kill, lwpid, signo); return sr_isError(res) ? -1 : 0; # elif defined(VGO_darwin) // Note that the __pthread_kill syscall takes a Mach thread, not a pthread. SysRes res; res = VG_(do_syscall2)(__NR___pthread_kill, lwpid, signo); return sr_isError(res) ? -1 : 0; # elif defined(VGO_solaris) SysRes res; # if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL) # if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID) res = VG_(do_syscall6)(__NR_lwp_sigqueue, 0, lwpid, signo, 0, VKI_SI_LWP, 0); # else res = VG_(do_syscall5)(__NR_lwp_sigqueue, lwpid, signo, 0, VKI_SI_LWP, 0); # endif # else res = VG_(do_syscall2)(__NR_lwp_kill, lwpid, signo); # endif return sr_isError(res) ? -1 : 0; # else # error "Unsupported plat" # endif } /* ---------------------- sigtimedwait_zero ----------------------- */ /* A cut-down version of POSIX sigtimedwait: poll for pending signals mentioned in the sigset_t, and if any are present, select one arbitrarily, return its number (which must be > 0), and put auxiliary info about it in the siginfo_t, and make it not-pending-any-more. If none are pending, return zero. The _zero refers to the fact that there is zero timeout, so if no signals are pending it returns immediately. Perhaps a better name would be 'sigpoll'. Returns -1 on error, 0 if no signals pending, and n > 0 if signal n was selected. The Linux implementation is trivial: do the corresponding syscall. The Darwin implementation is horrible and probably broken in a dozen obscure ways. I suspect it's only thread-safe because V forces single-threadedness. */ /* ---------- sigtimedwait_zero: Linux ----------- */ #if defined(VGO_linux) Int VG_(sigtimedwait_zero)( const vki_sigset_t *set, vki_siginfo_t *info ) { static const struct vki_timespec zero = { 0, 0 }; SysRes res = VG_(do_syscall4)(__NR_rt_sigtimedwait, (UWord)set, (UWord)info, (UWord)&zero, sizeof(*set)); return sr_isError(res) ? -1 : sr_Res(res); } /* ---------- sigtimedwait_zero: Darwin ----------- */ #elif defined(VGO_darwin) //static void show_set ( HChar* str, const vki_sigset_t* set ) { // Int i; // VG_(printf)("%s { ", str); // for (i = 1; i <= _VKI_NSIG; i++) { // if (VG_(sigismember)(set, i)) // VG_(printf)("%u ", i); // } // VG_(printf)("}\n"); //} /* The general idea is: - use sigpending to find out which signals are pending - choose one - temporarily set its handler to sigtimedwait_zero_handler - use sigsuspend atomically unblock it and wait for the signal. Upon return, sigsuspend restores the signal mask to what it was to start with. - Restore the handler for the signal to whatever it was before. */ /* A signal handler which does nothing (it doesn't need to). It does however check that it's not handing a sync signal for which returning is meaningless. */ static void sigtimedwait_zero_handler ( Int sig ) { /* XXX this is wrong -- get rid of these. We could get _any_ signal here */ vg_assert(sig != VKI_SIGILL); vg_assert(sig != VKI_SIGSEGV); vg_assert(sig != VKI_SIGBUS); vg_assert(sig != VKI_SIGTRAP); /* do nothing */ } Int VG_(sigtimedwait_zero)( const vki_sigset_t *set, vki_siginfo_t *info ) { const Bool debug = False; Int i, ir; SysRes sr; vki_sigset_t pending, blocked, allbutone; vki_sigaction_toK_t sa, saved_sa2; vki_sigaction_fromK_t saved_sa; //show_set("STWZ: looking for", set); /* Find out what's pending: Darwin sigpending */ sr = VG_(do_syscall1)(__NR_sigpending, (UWord)&pending); vg_assert(!sr_isError(sr)); /* don't try for signals not in 'set' */ /* pending = pending `intersect` set */ VG_(sigintersectset)(&pending, (const vki_sigset_t*)set); /* don't try for signals not blocked at the moment */ ir = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &blocked); vg_assert(ir == 0); /* pending = pending `intersect` blocked */ VG_(sigintersectset)(&pending, &blocked); /* decide which signal we're going to snarf */ for (i = 1; i < _VKI_NSIG; i++) if (VG_(sigismember)(&pending,i)) break; if (i == _VKI_NSIG) return 0; if (debug) VG_(debugLog)(0, "libcsignal", "sigtimedwait_zero: snarfing signal %d\n", i ); /* fetch signal i. pre: i is blocked and pending pre: we are the only thread running */ /* Set up alternative signal handler */ VG_(sigfillset)(&sa.sa_mask); sa.ksa_handler = &sigtimedwait_zero_handler; sa.sa_flags = 0; ir = VG_(sigaction)(i, &sa, &saved_sa); vg_assert(ir == 0); /* Switch signal masks and wait for the signal. This should happen immediately, since we've already established it is pending and blocked. */ VG_(sigfillset)(&allbutone); VG_(sigdelset)(&allbutone, i); /* Note: pass the sig mask by value here, not reference (!) */ vg_assert(_VKI_NSIG_WORDS == 1); sr = VG_(do_syscall3)(__NR_sigsuspend_nocancel, (UWord)allbutone.sig[0], 0,0); if (debug) VG_(debugLog)(0, "libcsignal", "sigtimedwait_zero: sigsuspend got " "res: %s %#lx\n", sr_isError(sr) ? "FAIL" : "SUCCESS", sr_isError(sr) ? sr_Err(sr) : sr_Res(sr)); vg_assert(sr_isError(sr)); vg_assert(sr_Err(sr) == VKI_EINTR); /* Restore signal's handler to whatever it was before */ VG_(convert_sigaction_fromK_to_toK)( &saved_sa, &saved_sa2 ); ir = VG_(sigaction)(i, &saved_sa2, NULL); vg_assert(ir == 0); /* This is bogus - we could get more info from the sighandler. */ VG_(memset)( info, 0, sizeof(*info) ); info->si_signo = i; return i; } #elif defined(VGO_solaris) Int VG_(sigtimedwait_zero)( const vki_sigset_t *set, vki_siginfo_t *info ) { /* Trivial as on Linux. */ static const struct vki_timespec zero = { 0, 0 }; SysRes res = VG_(do_syscall3)(__NR_sigtimedwait, (UWord)set, (UWord)info, (UWord)&zero); return sr_isError(res) ? -1 : sr_Res(res); } #else # error "Unknown OS" #endif /*--------------------------------------------------------------------*/ /*--- end ---*/ /*--------------------------------------------------------------------*/
bmerry/datagrind
coregrind/m_libcsignal.c
C
gpl-2.0
17,995
/* refchg.f -- translated by f2c (version 19980913). You must link the resulting object file with the libraries: -lf2c -lm (in that order) */ #include "f2c.h" /* Table of constant values */ static integer c__2 = 2; /* $Procedure REFCHG (Reference frame Change) */ /* Subroutine */ int refchg_(integer *frame1, integer *frame2, doublereal *et, doublereal *rotate) { /* System generated locals */ integer i__1, i__2, i__3, i__4, i__5, i__6, i__7; /* Builtin functions */ integer s_rnge(char *, integer, char *, integer); /* Local variables */ integer node; logical done; integer cent, this__; extern /* Subroutine */ int zznofcon_(doublereal *, integer *, integer *, integer *, integer *, char *, ftnlen); integer i__, j, frame[10]; extern /* Subroutine */ int chkin_(char *, ftnlen), ident_(doublereal *); integer class__; logical found; integer relto; extern /* Subroutine */ int xpose_(doublereal *, doublereal *), zzrxr_( doublereal *, integer *, doublereal *); extern logical failed_(void); integer cmnode; extern integer isrchi_(integer *, integer *, integer *); integer clssid; extern /* Subroutine */ int frinfo_(integer *, integer *, integer *, integer *, logical *); logical gotone; char errmsg[1840]; extern /* Subroutine */ int chkout_(char *, ftnlen), setmsg_(char *, ftnlen), errint_(char *, integer *, ftnlen), sigerr_(char *, ftnlen), rotget_(integer *, doublereal *, doublereal *, integer *, logical *); extern logical return_(void); doublereal tmprot[9] /* was [3][3] */; integer inc, get; doublereal rot[126] /* was [3][3][14] */; integer put; doublereal rot2[18] /* was [3][3][2] */; /* $ Abstract */ /* Return the transformation matrix from one */ /* frame to another. */ /* $ Disclaimer */ /* THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE */ /* CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. */ /* GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE */ /* ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE */ /* PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" */ /* TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY */ /* WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A */ /* PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC */ /* SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE */ /* SOFTWARE AND RELATED MATERIALS, HOWEVER USED. */ /* IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA */ /* BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT */ /* LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, */ /* INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, */ /* REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE */ /* REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. */ /* RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF */ /* THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY */ /* CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE */ /* ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. */ /* $ Required_Reading */ /* None. */ /* $ Keywords */ /* FRAMES */ /* $ Declarations */ /* $ Disclaimer */ /* THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE */ /* CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. */ /* GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE */ /* ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE */ /* PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" */ /* TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY */ /* WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A */ /* PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC */ /* SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE */ /* SOFTWARE AND RELATED MATERIALS, HOWEVER USED. */ /* IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA */ /* BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT */ /* LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, */ /* INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, */ /* REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE */ /* REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. */ /* RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF */ /* THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY */ /* CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE */ /* ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. */ /* Include File: SPICELIB Error Handling Parameters */ /* errhnd.inc Version 2 18-JUN-1997 (WLT) */ /* The size of the long error message was */ /* reduced from 25*80 to 23*80 so that it */ /* will be accepted by the Microsoft Power Station */ /* FORTRAN compiler which has an upper bound */ /* of 1900 for the length of a character string. */ /* errhnd.inc Version 1 29-JUL-1997 (NJB) */ /* Maximum length of the long error message: */ /* Maximum length of the short error message: */ /* End Include File: SPICELIB Error Handling Parameters */ /* $ Abstract */ /* The parameters below form an enumerated list of the recognized */ /* frame types. They are: INERTL, PCK, CK, TK, DYN. The meanings */ /* are outlined below. */ /* $ Disclaimer */ /* THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE */ /* CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. */ /* GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE */ /* ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE */ /* PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" */ /* TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY */ /* WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A */ /* PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC */ /* SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE */ /* SOFTWARE AND RELATED MATERIALS, HOWEVER USED. */ /* IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA */ /* BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT */ /* LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, */ /* INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, */ /* REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE */ /* REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. */ /* RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF */ /* THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY */ /* CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE */ /* ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. */ /* $ Parameters */ /* INERTL an inertial frame that is listed in the routine */ /* CHGIRF and that requires no external file to */ /* compute the transformation from or to any other */ /* inertial frame. */ /* PCK is a frame that is specified relative to some */ /* INERTL frame and that has an IAU model that */ /* may be retrieved from the PCK system via a call */ /* to the routine TISBOD. */ /* CK is a frame defined by a C-kernel. */ /* TK is a "text kernel" frame. These frames are offset */ /* from their associated "relative" frames by a */ /* constant rotation. */ /* DYN is a "dynamic" frame. These currently are */ /* parameterized, built-in frames where the full frame */ /* definition depends on parameters supplied via a */ /* frame kernel. */ /* ALL indicates any of the above classes. This parameter */ /* is used in APIs that fetch information about frames */ /* of a specified class. */ /* $ Author_and_Institution */ /* N.J. Bachman (JPL) */ /* W.L. Taber (JPL) */ /* $ Literature_References */ /* None. */ /* $ Version */ /* - SPICELIB Version 4.0.0, 08-MAY-2012 (NJB) */ /* The parameter ALL was added to support frame fetch APIs. */ /* - SPICELIB Version 3.0.0, 28-MAY-2004 (NJB) */ /* The parameter DYN was added to support the dynamic frame class. */ /* - SPICELIB Version 2.0.0, 12-DEC-1996 (WLT) */ /* Various unused frames types were removed and the */ /* frame time TK was added. */ /* - SPICELIB Version 1.0.0, 10-DEC-1995 (WLT) */ /* -& */ /* End of INCLUDE file frmtyp.inc */ /* $ Brief_I/O */ /* VARIABLE I/O DESCRIPTION */ /* -------- --- -------------------------------------------------- */ /* FRAME1 I the frame id-code for some reference frame */ /* FRAME2 I the frame id-code for some reference frame */ /* ET I an epoch in TDB seconds past J2000. */ /* ROTATE O a rotation matrix */ /* $ Detailed_Input */ /* FRAME1 is the frame id-code in which some positions */ /* are known. */ /* FRAME2 is the frame id-code for some frame in which you */ /* would like to represent positions. */ /* ET is the epoch at which to compute the transformation */ /* matrix. This epoch should be in TDB seconds past */ /* the ephemeris epoch of J2000. */ /* $ Detailed_Output */ /* ROTATE is a 3 x 3 rotaion matrix that can be used to */ /* transform positions relative to the frame */ /* correspsonding to frame FRAME2 to positions relative */ /* to the frame FRAME2. More explicitely, if POS is */ /* the position of some object relative to the */ /* reference frame of FRAME1 then POS2 is the position */ /* of the same object relative to FRAME2 where POS2 is */ /* computed via the subroutine call below */ /* CALL MXV ( ROTATE, POS, POS2 ) */ /* $ Parameters */ /* None. */ /* $ Exceptions */ /* 1) If either of the reference frames is unrecognized, the error */ /* SPICE(UNKNOWNFRAME) will be signalled. */ /* 2) If the auxillary information needed to compute a non-inertial */ /* frame is not available an error will be diagnosed and signalled */ /* by a routine in the call tree of this routine. */ /* $ Files */ /* None. */ /* $ Particulars */ /* This routine allows you to compute the rotation matrix */ /* between two reference frames. */ /* $ Examples */ /* Suppose that you have a position POS1 at epoch ET */ /* relative to FRAME1 and wish to determine its representation */ /* POS2 relative to FRAME2. The following subroutine calls */ /* would suffice to make this rotation. */ /* CALL REFCHG ( FRAME1, FRAME2, ET, ROTATE ) */ /* CALL MXV ( ROTATE, POS1, POS2 ) */ /* $ Restrictions */ /* None. */ /* $ Literature_References */ /* None. */ /* $ Author_and_Institution */ /* W.L. Taber (JPL) */ /* $ Version */ /* - SPICELIB Version 2.0.0, 14-DEC-2008 (NJB) */ /* Upgraded long error message associated with frame */ /* connection failure. */ /* - SPICELIB Version 1.2.0, 26-APR-2004 (NJB) */ /* Another typo was corrected in the long error message, and */ /* in a comment. */ /* - SPICELIB Version 1.1.0, 23-MAY-2000 (WLT) */ /* A typo was corrected in the long error message. */ /* - SPICELIB Version 1.0.0, 9-JUL-1998 (WLT) */ /* -& */ /* $ Index_Entries */ /* Rotate positions from one frame to another */ /* -& */ /* SPICE functions */ /* Local Paramters */ /* The root of all reference frames is J2000 (Frame ID = 1). */ /* Local Variables */ /* ROT contains the rotations from FRAME1 to FRAME2 */ /* ROT(1...3,1...3,I) has the rotation from FRAME(I) */ /* to FRAME(I+1). We make extra room in ROT because we */ /* plan to add rotations beyond the obvious chain from */ /* FRAME1 to a root node. */ /* ROT2 is used to store intermediate rotation from */ /* FRAME2 to some node in the chain from FRAME1 to PCK or */ /* INERTL frames. */ /* FRAME contains the frames we transform from in going from */ /* FRAME1 to FRAME2. FRAME(1) = FRAME1 by construction. */ /* NODE counts the number of rotations needed to go */ /* from FRAME1 to FRAME2. */ /* Standard SPICE error handling. */ if (return_()) { return 0; } chkin_("REFCHG", (ftnlen)6); /* Do the obvious thing first. If FRAME1 and FRAME2 are the */ /* same then we simply return the identity matrix. */ if (*frame1 == *frame2) { ident_(rotate); chkout_("REFCHG", (ftnlen)6); return 0; } /* Now perform the obvious check to make sure that both */ /* frames are recognized. */ frinfo_(frame1, &cent, &class__, &clssid, &found); if (! found) { setmsg_("The number # is not a recognized id-code for a reference fr" "ame. ", (ftnlen)64); errint_("#", frame1, (ftnlen)1); sigerr_("SPICE(UNKNOWNFRAME)", (ftnlen)19); chkout_("REFCHG", (ftnlen)6); return 0; } frinfo_(frame2, &cent, &class__, &clssid, &found); if (! found) { setmsg_("The number # is not a recognized id-code for a reference fr" "ame. ", (ftnlen)64); errint_("#", frame2, (ftnlen)1); sigerr_("SPICE(UNKNOWNFRAME)", (ftnlen)19); chkout_("REFCHG", (ftnlen)6); return 0; } node = 1; frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)287)] = *frame1; found = TRUE_; /* Follow the chain of rotations until we run into */ /* one that rotates to J2000 (frame id = 1) or we hit FRAME2. */ while(frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)293)] != 1 && node < 10 && frame[(i__2 = node - 1) < 10 && 0 <= i__2 ? i__2 : s_rnge("frame", i__2, "refc" "hg_", (ftnlen)293)] != *frame2 && found) { /* Find out what rotation is available for this */ /* frame. */ rotget_(&frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge( "frame", i__1, "refchg_", (ftnlen)301)], et, &rot[(i__2 = ( node * 3 + 1) * 3 - 12) < 126 && 0 <= i__2 ? i__2 : s_rnge( "rot", i__2, "refchg_", (ftnlen)301)], &frame[(i__3 = node) < 10 && 0 <= i__3 ? i__3 : s_rnge("frame", i__3, "refchg_", ( ftnlen)301)], &found); if (found) { /* We found a rotation matrix. ROT(1,1,NODE) */ /* now contains the rotation from FRAME(NODE) */ /* to FRAME(NODE+1). We need to look up the information */ /* for the next NODE. */ ++node; } } done = frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)317)] == 1 || frame[(i__2 = node - 1) < 10 && 0 <= i__2 ? i__2 : s_rnge("frame", i__2, "refchg_", (ftnlen) 317)] == *frame2 || ! found; while(! done) { /* The only way to get to this point is to have run out of */ /* room in the array of reference frame rotation */ /* buffers. We will now build the rotation from */ /* the previous NODE to whatever the next node in the */ /* chain is. We'll do this until we get to one of the */ /* root classes or we run into FRAME2. */ rotget_(&frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge( "frame", i__1, "refchg_", (ftnlen)331)], et, &rot[(i__2 = ( node * 3 + 1) * 3 - 12) < 126 && 0 <= i__2 ? i__2 : s_rnge( "rot", i__2, "refchg_", (ftnlen)331)], &relto, &found); if (found) { /* Recall that ROT(1,1,NODE-1) contains the rotation */ /* from FRAME(NODE-1) to FRAME(NODE). We are going to replace */ /* FRAME(NODE) with the frame indicated by RELTO. This means */ /* that ROT(1,1,NODE-1) should be replaced with the */ /* rotation from FRAME(NODE) to RELTO. */ frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)342)] = relto; zzrxr_(&rot[(i__1 = ((node - 1) * 3 + 1) * 3 - 12) < 126 && 0 <= i__1 ? i__1 : s_rnge("rot", i__1, "refchg_", (ftnlen)343)] , &c__2, tmprot); for (i__ = 1; i__ <= 3; ++i__) { for (j = 1; j <= 3; ++j) { rot[(i__1 = i__ + (j + (node - 1) * 3) * 3 - 13) < 126 && 0 <= i__1 ? i__1 : s_rnge("rot", i__1, "refchg_", (ftnlen)347)] = tmprot[(i__2 = i__ + j * 3 - 4) < 9 && 0 <= i__2 ? i__2 : s_rnge("tmprot", i__2, "refchg_", (ftnlen)347)]; } } } /* We are done if the class of the last frame is J2000 */ /* or if the last frame is FRAME2 or if we simply couldn't get */ /* another rotation. */ done = frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge( "frame", i__1, "refchg_", (ftnlen)357)] == 1 || frame[(i__2 = node - 1) < 10 && 0 <= i__2 ? i__2 : s_rnge("frame", i__2, "refchg_", (ftnlen)357)] == *frame2 || ! found; } /* Right now we have the following situation. We have in hand */ /* a collection of rotations between frames. (Assuming */ /* that is that NODE .GT. 1. If NODE .EQ. 1 then we have */ /* no rotations computed yet. */ /* ROT(1...3, 1...3, 1 ) rotates FRAME1 to FRAME(2) */ /* ROT(1...3, 1...3, 2 ) rotates FRAME(2) to FRAME(3) */ /* ROT(1...3, 1...3, 3 ) rotates FRAME(3) to FRAME(4) */ /* . */ /* . */ /* . */ /* ROT(1...3, 1...3, NODE-1 ) rotates FRAME(NODE-1) */ /* to FRAME(NODE) */ /* One of the following situations is true. */ /* 1) FRAME(NODE) is the root of all frames, J2000. */ /* 2) FRAME(NODE) is the same as FRAME2 */ /* 3) There is no rotation from FRAME(NODE) to another */ /* more fundamental frame. The chain of rotations */ /* from FRAME1 stops at FRAME(NODE). This means that the */ /* "frame atlas" is incomplete because we can't get to the */ /* root frame. */ /* We now have to do essentially the same thing for FRAME2. */ if (frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)395)] == *frame2) { /* We can handle this one immediately with the private routine */ /* ZZRXR which multiplies a series of matrices. */ i__1 = node - 1; zzrxr_(rot, &i__1, rotate); chkout_("REFCHG", (ftnlen)6); return 0; } /* We didn't luck out above. So we follow the chain of */ /* rotation for FRAME2. Note that at the moment the */ /* chain of rotations from FRAME2 to other frames */ /* does not share a node in the chain for FRAME1. */ /* ( GOTONE = .FALSE. ) . */ this__ = *frame2; gotone = FALSE_; /* First see if there is any chain to follow. */ done = this__ == 1; /* Set up the matrices ROT2(,,1) and ROT(,,2) and set up */ /* PUT and GET pointers so that we know where to GET the partial */ /* rotation from and where to PUT partial results. */ if (! done) { put = 1; get = 1; inc = 1; } /* Follow the chain of rotations until we run into */ /* one that rotates to the root frame or we land in the */ /* chain of nodes for FRAME1. */ /* Note that this time we will simply keep track of the full */ /* rotation from FRAME2 to the last node. */ while(! done) { /* Find out what rotation is available for this */ /* frame. */ if (this__ == *frame2) { /* This is the first pass, just put the rotation */ /* directly into ROT2(,,PUT). */ rotget_(&this__, et, &rot2[(i__1 = (put * 3 + 1) * 3 - 12) < 18 && 0 <= i__1 ? i__1 : s_rnge("rot2", i__1, "refchg_", ( ftnlen)452)], &relto, &found); if (found) { this__ = relto; get = put; put += inc; inc = -inc; cmnode = isrchi_(&this__, &node, frame); gotone = cmnode > 0; } } else { /* Fetch the rotation into a temporary spot TMPROT */ rotget_(&this__, et, tmprot, &relto, &found); if (found) { /* Next multiply TMPROT on the right by the last partial */ /* product (in ROT2(,,GET) ). We do this in line. */ for (i__ = 1; i__ <= 3; ++i__) { for (j = 1; j <= 3; ++j) { rot2[(i__1 = i__ + (j + put * 3) * 3 - 13) < 18 && 0 <= i__1 ? i__1 : s_rnge("rot2", i__1, "refch" "g_", (ftnlen)478)] = tmprot[(i__2 = i__ - 1) < 9 && 0 <= i__2 ? i__2 : s_rnge("tmprot", i__2, "refchg_", (ftnlen)478)] * rot2[(i__3 = (j + get * 3) * 3 - 12) < 18 && 0 <= i__3 ? i__3 : s_rnge("rot2", i__3, "refchg_", ( ftnlen)478)] + tmprot[(i__4 = i__ + 2) < 9 && 0 <= i__4 ? i__4 : s_rnge("tmprot", i__4, "refchg_", (ftnlen)478)] * rot2[(i__5 = (j + get * 3) * 3 - 11) < 18 && 0 <= i__5 ? i__5 : s_rnge("rot2", i__5, "refchg_", (ftnlen)478)] + tmprot[(i__6 = i__ + 5) < 9 && 0 <= i__6 ? i__6 : s_rnge("tmprot", i__6, "refchg_", ( ftnlen)478)] * rot2[(i__7 = (j + get * 3) * 3 - 10) < 18 && 0 <= i__7 ? i__7 : s_rnge("rot2" , i__7, "refchg_", (ftnlen)478)]; } } /* Adjust GET and PUT so that GET points to the slots */ /* where we just stored the result of our multiply and */ /* so that PUT points to the next available storage */ /* locations. */ get = put; put += inc; inc = -inc; this__ = relto; cmnode = isrchi_(&this__, &node, frame); gotone = cmnode > 0; } } /* See if we have a common node and determine whether or not */ /* we are done with this loop. */ done = this__ == 1 || gotone || ! found; } /* There are two possible scenarios. Either the chain of */ /* rotations from FRAME2 ran into a node in the chain for */ /* FRAME1 or it didn't. (The common node might very well be */ /* the root node.) If we didn't run into a common one, then */ /* the two chains don't intersect and there is no way to */ /* get from FRAME1 to FRAME2. */ if (! gotone) { zznofcon_(et, frame1, &frame[(i__1 = node - 1) < 10 && 0 <= i__1 ? i__1 : s_rnge("frame", i__1, "refchg_", (ftnlen)525)], frame2, &this__, errmsg, (ftnlen)1840); if (failed_()) { /* We were unable to create the error message. This */ /* unfortunate situation could arise if a frame kernel */ /* is corrupted. */ chkout_("REFCHG", (ftnlen)6); return 0; } /* The normal case: signal an error with a descriptive long */ /* error message. */ setmsg_(errmsg, (ftnlen)1840); sigerr_("SPICE(NOFRAMECONNECT)", (ftnlen)21); chkout_("REFCHG", (ftnlen)6); return 0; } /* Recall that we have the following. */ /* ROT(1...3, 1...3, 1 ) rotates FRAME(1) to FRAME(2) */ /* ROT(1...3, 1...3, 2 ) rotates FRAME(2) to FRAME(3) */ /* ROT(1...3, 1...3, 3 ) rotates FRAME(3) to FRAME(4) */ /* ROT(1...3, 1...3, CMNODE-1) rotates FRAME(CMNODE-1) */ /* to FRAME(CMNODE) */ /* and that ROT2(1,1,GET) rotates from FRAME2 to CMNODE. */ /* Hence the inverse of ROT2(1,1,GET) rotates from CMNODE */ /* to FRAME2. */ /* If we compute the inverse of ROT2 and store it in */ /* the next available slot of ROT (.i.e. ROT(1,1,CMNODE) */ /* we can simply apply our custom routine that multiplies a */ /* sequence of rotation matrices together to get the */ /* result from FRAME1 to FRAME2. */ xpose_(&rot2[(i__1 = (get * 3 + 1) * 3 - 12) < 18 && 0 <= i__1 ? i__1 : s_rnge("rot2", i__1, "refchg_", (ftnlen)568)], &rot[(i__2 = ( cmnode * 3 + 1) * 3 - 12) < 126 && 0 <= i__2 ? i__2 : s_rnge( "rot", i__2, "refchg_", (ftnlen)568)]); zzrxr_(rot, &cmnode, rotate); chkout_("REFCHG", (ftnlen)6); return 0; } /* refchg_ */
darioizzo/pykep
src/third_party/cspice/refchg.c
C
gpl-3.0
24,383
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "H5Fmodule.h" /* This source code file is part of the H5F module */ /* Packages needed by this file... */ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* File access */ /* PRIVATE PROTOTYPES */ /*------------------------------------------------------------------------- * Function: H5F_fake_alloc * * Purpose: Allocate a "fake" file structure, for various routines to * use for encoding/decoding data structures using internal API * routines that need a file structure, but don't ultimately * depend on having a "real" file. * * Return: Success: Pointer to 'faked up' file structure * Failure: NULL * * Programmer: Quincey Koziol * Oct 2, 2006 * *------------------------------------------------------------------------- */ H5F_t * H5F_fake_alloc(uint8_t sizeof_size) { H5F_t *f = NULL; /* Pointer to fake file struct */ H5F_t *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) /* Allocate faked file struct */ if (NULL == (f = H5FL_CALLOC(H5F_t))) HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, NULL, "can't allocate top file structure") if (NULL == (f->shared = H5FL_CALLOC(H5F_shared_t))) HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, NULL, "can't allocate shared file structure") /* Only set fields necessary for clients */ if (sizeof_size == 0) f->shared->sizeof_size = H5F_OBJ_SIZE_SIZE; else f->shared->sizeof_size = sizeof_size; /* Set return value */ ret_value = f; done: if (!ret_value) H5F_fake_free(f); FUNC_LEAVE_NOAPI(ret_value) } /* end H5F_fake_alloc() */ /*------------------------------------------------------------------------- * Function: H5F_fake_free * * Purpose: Free a "fake" file structure. * * Return: Success: non-negative * Failure: negative * * Programmer: Quincey Koziol * Oct 2, 2006 * *------------------------------------------------------------------------- */ herr_t H5F_fake_free(H5F_t *f) { FUNC_ENTER_NOAPI_NOINIT_NOERR /* Free faked file struct */ if (f) { /* Destroy shared file struct */ if (f->shared) f->shared = H5FL_FREE(H5F_shared_t, f->shared); f = H5FL_FREE(H5F_t, f); } /* end if */ FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5F_fake_free() */
su2code/SU2
externals/cgns/hdf5/H5Ffake.c
C
lgpl-2.1
3,425
/* * General DV muxer/demuxer * Copyright (c) 2003 Roman Shaposhnik * * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth * of DV technical info. * * Raw DV format * Copyright (c) 2002 Fabrice Bellard * * 50 Mbps (DVCPRO50) support * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <time.h> #include <stdarg.h> #include "avformat.h" #include "internal.h" #include "libavcodec/dv_profile.h" #include "libavcodec/dvdata.h" #include "dv.h" #include "libavutil/fifo.h" #include "libavutil/mathematics.h" #include "libavutil/intreadwrite.h" #include "libavutil/opt.h" #include "libavutil/timecode.h" #define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio struct DVMuxContext { AVClass *av_class; const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */ int n_ast; /* number of stereo audio streams (up to 2) */ AVStream *ast[2]; /* stereo audio streams */ AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */ int frames; /* current frame number */ int64_t start_time; /* recording start time */ int has_audio; /* frame under contruction has audio */ int has_video; /* frame under contruction has video */ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */ AVTimecode tc; /* timecode context */ }; static const int dv_aaux_packs_dist[12][9] = { { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, }; static int dv_audio_frame_size(const DVprofile* sys, int frame) { return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist) / sizeof(sys->audio_samples_dist[0]))]; } static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf, ...) { struct tm tc; time_t ct; uint32_t timecode; va_list ap; buf[0] = (uint8_t)pack_id; switch (pack_id) { case dv_timecode: timecode = av_timecode_get_smpte_from_framenum(&c->tc, c->frames); timecode |= 1<<23 | 1<<15 | 1<<7 | 1<<6; // biphase and binary group flags AV_WB32(buf + 1, timecode); break; case dv_audio_source: /* AAUX source pack */ va_start(ap, buf); buf[1] = (1 << 7) | /* locked mode -- SMPTE only supports locked mode */ (1 << 6) | /* reserved -- always 1 */ (dv_audio_frame_size(c->sys, c->frames) - c->sys->audio_min_samples[0]); /* # of samples */ buf[2] = (0 << 7) | /* multi-stereo */ (0 << 5) | /* #of audio channels per block: 0 -- 1 channel */ (0 << 4) | /* pair bit: 0 -- one pair of channels */ !!va_arg(ap, int); /* audio mode */ buf[3] = (1 << 7) | /* res */ (1 << 6) | /* multi-language flag */ (c->sys->dsf << 5) | /* system: 60fields/50fields */ (c->sys->n_difchan & 2); /* definition: 0 -- 25Mbps, 2 -- 50Mbps */ buf[4] = (1 << 7) | /* emphasis: 1 -- off */ (0 << 6) | /* emphasis time constant: 0 -- reserved */ (0 << 3) | /* frequency: 0 -- 48kHz, 1 -- 44,1kHz, 2 -- 32kHz */ 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */ va_end(ap); break; case dv_audio_control: buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */ (1 << 4) | /* input source: 1 -- digital input */ (3 << 2) | /* compression: 3 -- no information */ 0; /* misc. info/SMPTE emphasis off */ buf[2] = (1 << 7) | /* recording start point: 1 -- no */ (1 << 6) | /* recording end point: 1 -- no */ (1 << 3) | /* recording mode: 1 -- original */ 7; buf[3] = (1 << 7) | /* direction: 1 -- forward */ (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0x20 : /* speed */ c->sys->ltc_divisor * 4); buf[4] = (1 << 7) | /* reserved -- always 1 */ 0x7f; /* genre category */ break; case dv_audio_recdate: case dv_video_recdate: /* VAUX recording date */ ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num, c->sys->time_base.den, AV_ROUND_DOWN); ff_brktimegm(ct, &tc); buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */ /* 0xff is very likely to be "unknown" */ buf[2] = (3 << 6) | /* reserved -- always 1 */ ((tc.tm_mday / 10) << 4) | /* Tens of day */ (tc.tm_mday % 10); /* Units of day */ buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */ ((tc.tm_mon / 10) << 4) | /* Tens of month */ (tc.tm_mon % 10); /* Units of month */ buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */ (tc.tm_year % 10); /* Units of year */ break; case dv_audio_rectime: /* AAUX recording time */ case dv_video_rectime: /* VAUX recording time */ ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num, c->sys->time_base.den, AV_ROUND_DOWN); ff_brktimegm(ct, &tc); buf[1] = (3 << 6) | /* reserved -- always 1 */ 0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */ buf[2] = (1 << 7) | /* reserved -- always 1 */ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */ (tc.tm_sec % 10); /* Units of seconds */ buf[3] = (1 << 7) | /* reserved -- always 1 */ ((tc.tm_min / 10) << 4) | /* Tens of minutes */ (tc.tm_min % 10); /* Units of minutes */ buf[4] = (3 << 6) | /* reserved -- always 1 */ ((tc.tm_hour / 10) << 4) | /* Tens of hours */ (tc.tm_hour % 10); /* Units of hours */ break; default: buf[1] = buf[2] = buf[3] = buf[4] = 0xff; } return 5; } static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr) { int i, j, d, of, size; size = 4 * dv_audio_frame_size(c->sys, c->frames); frame_ptr += channel * c->sys->difseg_size * 150 * 80; for (i = 0; i < c->sys->difseg_size; i++) { frame_ptr += 6 * 80; /* skip DIF segment header */ for (j = 0; j < 9; j++) { dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3], i >= c->sys->difseg_size/2); for (d = 8; d < 80; d+=2) { of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride; if (of*2 >= size) continue; frame_ptr[d] = *av_fifo_peek2(c->audio_data[channel], of*2+1); // FIXME: maybe we have to admit frame_ptr[d+1] = *av_fifo_peek2(c->audio_data[channel], of*2); // that DV is a big-endian PCM } frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } } static void dv_inject_metadata(DVMuxContext *c, uint8_t* frame) { int j, k; uint8_t* buf; for (buf = frame; buf < frame + c->sys->frame_size; buf += 150 * 80) { /* DV subcode: 2nd and 3d DIFs */ for (j = 80; j < 80 * 3; j += 80) { for (k = 6; k < 6 * 8; k += 8) dv_write_pack(dv_timecode, c, &buf[j+k]); if (((long)(buf-frame)/(c->sys->frame_size/(c->sys->difseg_size*c->sys->n_difchan))%c->sys->difseg_size) > 5) { /* FIXME: is this really needed ? */ dv_write_pack(dv_video_recdate, c, &buf[j+14]); dv_write_pack(dv_video_rectime, c, &buf[j+22]); dv_write_pack(dv_video_recdate, c, &buf[j+38]); dv_write_pack(dv_video_rectime, c, &buf[j+46]); } } /* DV VAUX: 4th, 5th and 6th 3DIFs */ for (j = 80*3 + 3; j < 80*6; j += 80) { dv_write_pack(dv_video_recdate, c, &buf[j+5*2]); dv_write_pack(dv_video_rectime, c, &buf[j+5*3]); dv_write_pack(dv_video_recdate, c, &buf[j+5*11]); dv_write_pack(dv_video_rectime, c, &buf[j+5*12]); } } } /* * The following 3 functions constitute our interface to the world */ static int dv_assemble_frame(DVMuxContext *c, AVStream* st, uint8_t* data, int data_size, uint8_t** frame) { int i, reqasize; *frame = &c->frame_buf[0]; reqasize = 4 * dv_audio_frame_size(c->sys, c->frames); switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: /* FIXME: we have to have more sensible approach than this one */ if (c->has_video) av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames); memcpy(*frame, data, c->sys->frame_size); c->has_video = 1; break; case AVMEDIA_TYPE_AUDIO: for (i = 0; i < c->n_ast && st != c->ast[i]; i++); /* FIXME: we have to have more sensible approach than this one */ if (av_fifo_size(c->audio_data[i]) + data_size >= 100*MAX_AUDIO_FRAME_SIZE) av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames); av_fifo_generic_write(c->audio_data[i], data, data_size, NULL); /* Let us see if we've got enough audio for one DV frame. */ c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i); break; default: break; } /* Let us see if we have enough data to construct one DV frame. */ if (c->has_video == 1 && c->has_audio + 1 == 1 << c->n_ast) { dv_inject_metadata(c, *frame); c->has_audio = 0; for (i=0; i < c->n_ast; i++) { dv_inject_audio(c, i, *frame); av_fifo_drain(c->audio_data[i], reqasize); c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i); } c->has_video = 0; c->frames++; return c->sys->frame_size; } return 0; } static DVMuxContext* dv_init_mux(AVFormatContext* s) { DVMuxContext *c = s->priv_data; AVStream *vst = NULL; AVDictionaryEntry *t; int i; /* we support at most 1 video and 2 audio streams */ if (s->nb_streams > 3) return NULL; c->n_ast = 0; c->ast[0] = c->ast[1] = NULL; /* We have to sort out where audio and where video stream is */ for (i=0; i<s->nb_streams; i++) { switch (s->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (vst) return NULL; vst = s->streams[i]; break; case AVMEDIA_TYPE_AUDIO: if (c->n_ast > 1) return NULL; c->ast[c->n_ast++] = s->streams[i]; break; default: goto bail_out; } } /* Some checks -- DV format is very picky about its incoming streams */ if (!vst || vst->codec->codec_id != AV_CODEC_ID_DVVIDEO) goto bail_out; for (i=0; i<c->n_ast; i++) { if (c->ast[i] && (c->ast[i]->codec->codec_id != AV_CODEC_ID_PCM_S16LE || c->ast[i]->codec->sample_rate != 48000 || c->ast[i]->codec->channels != 2)) goto bail_out; } c->sys = avpriv_dv_codec_profile(vst->codec); if (!c->sys) goto bail_out; if ((c->n_ast > 1) && (c->sys->n_difchan < 2)) { /* only 1 stereo pair is allowed in 25Mbps mode */ goto bail_out; } /* Ok, everything seems to be in working order */ c->frames = 0; c->has_audio = 0; c->has_video = 0; if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) c->start_time = ff_iso8601_to_unix_time(t->value); for (i=0; i < c->n_ast; i++) { if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*MAX_AUDIO_FRAME_SIZE))) { while (i > 0) { i--; av_fifo_free(c->audio_data[i]); } goto bail_out; } } return c; bail_out: return NULL; } static void dv_delete_mux(DVMuxContext *c) { int i; for (i=0; i < c->n_ast; i++) av_fifo_free(c->audio_data[i]); } static int dv_write_header(AVFormatContext *s) { AVRational rate; DVMuxContext *dvc = s->priv_data; AVDictionaryEntry *tcr = av_dict_get(s->metadata, "timecode", NULL, 0); if (!dv_init_mux(s)) { av_log(s, AV_LOG_ERROR, "Can't initialize DV format!\n" "Make sure that you supply exactly two streams:\n" " video: 25fps or 29.97fps, audio: 2ch/48kHz/PCM\n" " (50Mbps allows an optional second audio stream)\n"); return -1; } rate.num = dvc->sys->ltc_divisor; rate.den = 1; if (!tcr) { // no global timecode, look into the streams int i; for (i = 0; i < s->nb_streams; i++) { tcr = av_dict_get(s->streams[i]->metadata, "timecode", NULL, 0); if (tcr) break; } } if (tcr) return av_timecode_init_from_string(&dvc->tc, rate, tcr->value, s); return av_timecode_init(&dvc->tc, rate, 0, 0, s); } static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt) { uint8_t* frame; int fsize; fsize = dv_assemble_frame(s->priv_data, s->streams[pkt->stream_index], pkt->data, pkt->size, &frame); if (fsize > 0) { avio_write(s->pb, frame, fsize); avio_flush(s->pb); } return 0; } /* * We might end up with some extra A/V data without matching counterpart. * E.g. video data without enough audio to write the complete frame. * Currently we simply drop the last frame. I don't know whether this * is the best strategy of all */ static int dv_write_trailer(struct AVFormatContext *s) { dv_delete_mux(s->priv_data); return 0; } AVOutputFormat ff_dv_muxer = { .name = "dv", .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .extensions = "dv", .priv_data_size = sizeof(DVMuxContext), .audio_codec = AV_CODEC_ID_PCM_S16LE, .video_codec = AV_CODEC_ID_DVVIDEO, .write_header = dv_write_header, .write_packet = dv_write_packet, .write_trailer = dv_write_trailer, };
leighpauls/k2cro4
third_party/ffmpeg/libavformat/dvenc.c
C
bsd-3-clause
16,245
/* Capstone Disassembly Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2015 */ #ifdef CAPSTONE_HAS_SPARC #include "../../utils.h" #include "../../MCRegisterInfo.h" #include "SparcDisassembler.h" #include "SparcInstPrinter.h" #include "SparcMapping.h" static cs_err init(cs_struct *ud) { MCRegisterInfo *mri; // verify if requested mode is valid if (ud->mode & ~(CS_MODE_BIG_ENDIAN | CS_MODE_V9)) return CS_ERR_MODE; mri = cs_mem_malloc(sizeof(*mri)); Sparc_init(mri); ud->printer = Sparc_printInst; ud->printer_info = mri; ud->getinsn_info = mri; ud->disasm = Sparc_getInstruction; ud->post_printer = Sparc_post_printer; ud->reg_name = Sparc_reg_name; ud->insn_id = Sparc_get_insn_id; ud->insn_name = Sparc_insn_name; ud->group_name = Sparc_group_name; return CS_ERR_OK; } static cs_err option(cs_struct *handle, cs_opt_type type, size_t value) { if (type == CS_OPT_SYNTAX) handle->syntax = (int) value; return CS_ERR_OK; } void Sparc_enable(void) { arch_init[CS_ARCH_SPARC] = init; arch_option[CS_ARCH_SPARC] = option; // support this arch all_arch |= (1 << CS_ARCH_SPARC); } #endif
v3n/ProDBG
src/native/external/capstone/arch/Sparc/SparcModule.c
C
mit
1,133
/* */ #include <linux/init.h> #include <linux/cache.h> #include <linux/sched.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/io.h> #include "proto.h" #include "irq_impl.h" /* */ static unsigned int cached_irq_mask = 0xffff; static DEFINE_SPINLOCK(i8259_irq_lock); static inline void i8259_update_irq_hw(unsigned int irq, unsigned long mask) { int port = 0x21; if (irq & 8) mask >>= 8; if (irq & 8) port = 0xA1; outb(mask, port); } inline void i8259a_enable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); spin_unlock(&i8259_irq_lock); } static inline void __i8259a_disable_irq(unsigned int irq) { i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq); } void i8259a_disable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); __i8259a_disable_irq(d->irq); spin_unlock(&i8259_irq_lock); } void i8259a_mask_and_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; spin_lock(&i8259_irq_lock); __i8259a_disable_irq(irq); /* */ if (irq >= 8) { outb(0xE0 | (irq - 8), 0xa0); /* */ irq = 2; } outb(0xE0 | irq, 0x20); /* */ spin_unlock(&i8259_irq_lock); } struct irq_chip i8259a_irq_type = { .name = "XT-PIC", .irq_unmask = i8259a_enable_irq, .irq_mask = i8259a_disable_irq, .irq_mask_ack = i8259a_mask_and_ack_irq, }; void __init init_i8259a_irqs(void) { static struct irqaction cascade = { .handler = no_action, .name = "cascade", }; long i; outb(0xff, 0x21); /* */ outb(0xff, 0xA1); /* */ for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); } setup_irq(2, &cascade); } #if defined(CONFIG_ALPHA_GENERIC) # define IACK_SC alpha_mv.iack_sc #elif defined(CONFIG_ALPHA_APECS) # define IACK_SC APECS_IACK_SC #elif defined(CONFIG_ALPHA_LCA) # define IACK_SC LCA_IACK_SC #elif defined(CONFIG_ALPHA_CIA) # define IACK_SC CIA_IACK_SC #elif defined(CONFIG_ALPHA_PYXIS) # define IACK_SC PYXIS_IACK_SC #elif defined(CONFIG_ALPHA_TITAN) # define IACK_SC TITAN_IACK_SC #elif defined(CONFIG_ALPHA_TSUNAMI) # define IACK_SC TSUNAMI_IACK_SC #elif defined(CONFIG_ALPHA_IRONGATE) # define IACK_SC IRONGATE_IACK_SC #endif /* */ #if defined(IACK_SC) void isa_device_interrupt(unsigned long vector) { /* */ int j = *(vuip) IACK_SC; j &= 0xff; handle_irq(j); } #endif #if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC) void isa_no_iack_sc_device_interrupt(unsigned long vector) { unsigned long pic; /* */ /* */ pic = inb(0x20) | (inb(0xA0) << 8); /* */ pic &= 0xFFFB; /* */ while (pic) { int j = ffz(~pic); pic &= pic - 1; handle_irq(j); } } #endif
holyangel/LGE_G3
arch/alpha/kernel/irq_i8259.c
C
gpl-2.0
3,962
/* * (C) Copyright IBM Deutschland Entwicklung GmbH 2006 * * Author: Maxim Shchetynin <maxim@de.ibm.com> * * Axon DDR2 device driver. * It registers one block device per Axon's DDR2 memory bank found on a system. * Block devices are called axonram?, their major and minor numbers are * available in /proc/devices, /proc/partitions or in /sys/block/axonram?/dev. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/genhd.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/irqreturn.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/page.h> #include <asm/prom.h> #define AXON_RAM_MODULE_NAME "axonram" #define AXON_RAM_DEVICE_NAME "axonram" #define AXON_RAM_MINORS_PER_DISK 16 #define AXON_RAM_BLOCK_SHIFT PAGE_SHIFT #define AXON_RAM_BLOCK_SIZE 1 << AXON_RAM_BLOCK_SHIFT #define AXON_RAM_SECTOR_SHIFT 9 #define AXON_RAM_SECTOR_SIZE 1 << AXON_RAM_SECTOR_SHIFT #define AXON_RAM_IRQ_FLAGS IRQF_SHARED | IRQF_TRIGGER_RISING static int azfs_major, azfs_minor; struct axon_ram_bank { struct platform_device *device; struct gendisk *disk; unsigned int irq_id; unsigned long ph_addr; unsigned long io_addr; unsigned long size; unsigned long ecc_counter; }; static ssize_t axon_ram_sysfs_ecc(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *device = to_platform_device(dev); struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank); return sprintf(buf, "%ld\n", bank->ecc_counter); } static DEVICE_ATTR(ecc, S_IRUGO, axon_ram_sysfs_ecc, NULL); /* */ static irqreturn_t axon_ram_irq_handler(int irq, void *dev) { struct platform_device *device = dev; struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank); dev_err(&device->dev, "Correctable memory error occurred\n"); bank->ecc_counter++; return IRQ_HANDLED; } /* */ static void axon_ram_make_request(struct request_queue *queue, struct bio *bio) { struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; struct bio_vec *vec; unsigned int transfered; unsigned short idx; phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; bio_for_each_segment(vec, bio, idx) { if (unlikely(phys_mem + vec->bv_len > phys_end)) { bio_io_error(bio); return; } user_mem = page_address(vec->bv_page) + vec->bv_offset; if (bio_data_dir(bio) == READ) memcpy(user_mem, (void *) phys_mem, vec->bv_len); else memcpy((void *) phys_mem, user_mem, vec->bv_len); phys_mem += vec->bv_len; transfered += vec->bv_len; } bio_endio(bio, 0); } /* */ static int axon_ram_direct_access(struct block_device *device, sector_t sector, void **kaddr, unsigned long *pfn) { struct axon_ram_bank *bank = device->bd_disk->private_data; loff_t offset; offset = sector; if (device->bd_part != NULL) offset += device->bd_part->start_sect; offset <<= AXON_RAM_SECTOR_SHIFT; if (offset >= bank->size) { dev_err(&bank->device->dev, "Access outside of address space\n"); return -ERANGE; } *kaddr = (void *)(bank->ph_addr + offset); *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; return 0; } static const struct block_device_operations axon_ram_devops = { .owner = THIS_MODULE, .direct_access = axon_ram_direct_access }; /* */ static int axon_ram_probe(struct platform_device *device) { static int axon_ram_bank_id = -1; struct axon_ram_bank *bank; struct resource resource; int rc = 0; axon_ram_bank_id++; dev_info(&device->dev, "Found memory controller on %s\n", device->dev.of_node->full_name); bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL); if (bank == NULL) { dev_err(&device->dev, "Out of memory\n"); rc = -ENOMEM; goto failed; } device->dev.platform_data = bank; bank->device = device; if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) { dev_err(&device->dev, "Cannot access device tree\n"); rc = -EFAULT; goto failed; } bank->size = resource_size(&resource); if (bank->size == 0) { dev_err(&device->dev, "No DDR2 memory found for %s%d\n", AXON_RAM_DEVICE_NAME, axon_ram_bank_id); rc = -ENODEV; goto failed; } dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n", AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); bank->ph_addr = resource.start; bank->io_addr = (unsigned long) ioremap_prot( bank->ph_addr, bank->size, _PAGE_NO_CACHE); if (bank->io_addr == 0) { dev_err(&device->dev, "ioremap() failed\n"); rc = -EFAULT; goto failed; } bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK); if (bank->disk == NULL) { dev_err(&device->dev, "Cannot register disk\n"); rc = -EFAULT; goto failed; } bank->disk->major = azfs_major; bank->disk->first_minor = azfs_minor; bank->disk->fops = &axon_ram_devops; bank->disk->private_data = bank; bank->disk->driverfs_dev = &device->dev; sprintf(bank->disk->disk_name, "%s%d", AXON_RAM_DEVICE_NAME, axon_ram_bank_id); bank->disk->queue = blk_alloc_queue(GFP_KERNEL); if (bank->disk->queue == NULL) { dev_err(&device->dev, "Cannot register disk queue\n"); rc = -EFAULT; goto failed; } set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); blk_queue_make_request(bank->disk->queue, axon_ram_make_request); blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); add_disk(bank->disk); bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0); if (bank->irq_id == NO_IRQ) { dev_err(&device->dev, "Cannot access ECC interrupt ID\n"); rc = -EFAULT; goto failed; } rc = request_irq(bank->irq_id, axon_ram_irq_handler, AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device); if (rc != 0) { dev_err(&device->dev, "Cannot register ECC interrupt handler\n"); bank->irq_id = NO_IRQ; rc = -EFAULT; goto failed; } rc = device_create_file(&device->dev, &dev_attr_ecc); if (rc != 0) { dev_err(&device->dev, "Cannot create sysfs file\n"); rc = -EFAULT; goto failed; } azfs_minor += bank->disk->minors; return 0; failed: if (bank != NULL) { if (bank->irq_id != NO_IRQ) free_irq(bank->irq_id, device); if (bank->disk != NULL) { if (bank->disk->major > 0) unregister_blkdev(bank->disk->major, bank->disk->disk_name); del_gendisk(bank->disk); } device->dev.platform_data = NULL; if (bank->io_addr != 0) iounmap((void __iomem *) bank->io_addr); kfree(bank); } return rc; } /* */ static int axon_ram_remove(struct platform_device *device) { struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank || !bank->disk); device_remove_file(&device->dev, &dev_attr_ecc); free_irq(bank->irq_id, device); del_gendisk(bank->disk); iounmap((void __iomem *) bank->io_addr); kfree(bank); return 0; } static struct of_device_id axon_ram_device_id[] = { { .type = "dma-memory" }, {} }; static struct platform_driver axon_ram_driver = { .probe = axon_ram_probe, .remove = axon_ram_remove, .driver = { .name = AXON_RAM_MODULE_NAME, .owner = THIS_MODULE, .of_match_table = axon_ram_device_id, }, }; /* */ static int __init axon_ram_init(void) { azfs_major = register_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); if (azfs_major < 0) { printk(KERN_ERR "%s cannot become block device major number\n", AXON_RAM_MODULE_NAME); return -EFAULT; } azfs_minor = 0; return platform_driver_register(&axon_ram_driver); } /* */ static void __exit axon_ram_exit(void) { platform_driver_unregister(&axon_ram_driver); unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); } module_init(axon_ram_init); module_exit(axon_ram_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Shchetynin <maxim@de.ibm.com>"); MODULE_DESCRIPTION("Axon DDR2 RAM device driver for IBM Cell BE");
curbthepain/revkernel_us990
arch/powerpc/sysdev/axonram.c
C
gpl-2.0
9,433
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * Directory entry file type support and forward compatibility hooks * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 * Hash Tree Directory indexing (c) * Daniel Phillips, 2001 * Hash Tree Directory indexing porting * Christopher Li, 2002 * Hash Tree Directory indexing cleanup * Theodore Ts'o, 2002 */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/time.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/bio.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * define how far ahead to read directories while searching them. */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) { struct buffer_head *bh; int err; if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && ((inode->i_size >> 10) >= EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) return ERR_PTR(-ENOSPC); *block = inode->i_size >> inode->i_sb->s_blocksize_bits; bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE); if (IS_ERR(bh)) return bh; inode->i_size += inode->i_sb->s_blocksize; EXT4_I(inode)->i_disksize = inode->i_size; BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); ext4_std_error(inode->i_sb, err); return ERR_PTR(err); } return bh; } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent); typedef enum { EITHER, INDEX, DIRENT } dirblock_type_t; #define ext4_read_dirblock(inode, block, type) \ __ext4_read_dirblock((inode), (block), (type), __func__, __LINE__) static struct buffer_head *__ext4_read_dirblock(struct inode *inode, ext4_lblk_t block, dirblock_type_t type, const char *func, unsigned int line) { struct buffer_head *bh; struct ext4_dir_entry *dirent; int is_dx_block = 0; bh = ext4_bread(NULL, inode, block, 0); if (IS_ERR(bh)) { __ext4_warning(inode->i_sb, func, line, "inode #%lu: lblock %lu: comm %s: " "error %ld reading directory block", inode->i_ino, (unsigned long)block, current->comm, PTR_ERR(bh)); return bh; } if (!bh) { ext4_error_inode(inode, func, line, block, "Directory hole found"); return ERR_PTR(-EFSCORRUPTED); } dirent = (struct ext4_dir_entry *) bh->b_data; /* Determine whether or not we have an index block */ if (is_dx(inode)) { if (block == 0) is_dx_block = 1; else if (ext4_rec_len_from_disk(dirent->rec_len, inode->i_sb->s_blocksize) == inode->i_sb->s_blocksize) is_dx_block = 1; } if (!is_dx_block && type == INDEX) { ext4_error_inode(inode, func, line, block, "directory leaf block found instead of index block"); return ERR_PTR(-EFSCORRUPTED); } if (!ext4_has_metadata_csum(inode->i_sb) || buffer_verified(bh)) return bh; /* * An empty leaf block can get mistaken for a index block; for * this reason, we can only check the index checksum when the * caller is sure it should be an index block. */ if (is_dx_block && type == INDEX) { if (ext4_dx_csum_verify(inode, dirent)) set_buffer_verified(bh); else { ext4_error_inode(inode, func, line, block, "Directory index failed checksum"); brelse(bh); return ERR_PTR(-EFSBADCRC); } } if (!is_dx_block) { if (ext4_dirent_csum_verify(inode, dirent)) set_buffer_verified(bh); else { ext4_error_inode(inode, func, line, block, "Directory block failed checksum"); brelse(bh); return ERR_PTR(-EFSBADCRC); } } return bh; } #ifndef assert #define assert(test) J_ASSERT(test) #endif #ifdef DX_DEBUG #define dxtrace(command) command #else #define dxtrace(command) #endif struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; /* * dx_root_info is laid out so that if it should somehow get overlaid by a * dirent the two low bits of the hash version will be zero. Therefore, the * hash version mod 4 should never be 0. Sincerely, the paranoia department. */ struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; /* 8 */ u8 indirect_levels; u8 unused_flags; } info; struct dx_entry entries[0]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; /* * This goes at the end of each htree block. */ struct dx_tail { u32 dt_reserved; __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */ }; static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); static inline unsigned dx_get_hash(struct dx_entry *entry); static void dx_set_hash(struct dx_entry *entry, unsigned value); static unsigned dx_get_count(struct dx_entry *entries); static unsigned dx_get_limit(struct dx_entry *entries); static void dx_set_count(struct dx_entry *entries, unsigned value); static void dx_set_limit(struct dx_entry *entries, unsigned value); static unsigned dx_root_limit(struct inode *dir, unsigned infosize); static unsigned dx_node_limit(struct inode *dir); static struct dx_frame *dx_probe(struct ext4_filename *fname, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame); static void dx_release(struct dx_frame *frames); static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, struct dx_map_entry *offsets, int count, unsigned blocksize); static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize); static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block); static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir); static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode); /* checksumming functions */ void initialize_dirent_tail(struct ext4_dir_entry_tail *t, unsigned int blocksize) { memset(t, 0, sizeof(struct ext4_dir_entry_tail)); t->det_rec_len = ext4_rec_len_to_disk( sizeof(struct ext4_dir_entry_tail), blocksize); t->det_reserved_ft = EXT4_FT_DIR_CSUM; } /* Walk through a dirent block to find a checksum "dirent" at the tail */ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct ext4_dir_entry *de) { struct ext4_dir_entry_tail *t; #ifdef PARANOID struct ext4_dir_entry *d, *top; d = de; top = (struct ext4_dir_entry *)(((void *)de) + (EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct ext4_dir_entry_tail))); while (d < top && d->rec_len) d = (struct ext4_dir_entry *)(((void *)d) + le16_to_cpu(d->rec_len)); if (d != top) return NULL; t = (struct ext4_dir_entry_tail *)d; #else t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb)); #endif if (t->det_reserved_zero1 || le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; return t; } static __le32 ext4_dirent_csum(struct inode *inode, struct ext4_dir_entry *dirent, int size) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); return cpu_to_le32(csum); } #define warn_no_space_for_csum(inode) \ __warn_no_space_for_csum((inode), __func__, __LINE__) static void __warn_no_space_for_csum(struct inode *inode, const char *func, unsigned int line) { __ext4_warning_inode(inode, func, line, "No space for directory leaf checksum. Please run e2fsck -D."); } int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!ext4_has_metadata_csum(inode->i_sb)) return 1; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return 0; } if (t->det_checksum != ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent)) return 0; return 1; } static void ext4_dirent_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!ext4_has_metadata_csum(inode->i_sb)) return; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return; } t->det_checksum = ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent); } int ext4_handle_dirty_dirent_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dirent, int *offset) { struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) count_offset = 8; else if (le16_to_cpu(dirent->rec_len) == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); if (le16_to_cpu(dp->rec_len) != EXT4_BLOCK_SIZE(inode->i_sb) - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || root->info_length != sizeof(struct dx_root_info)) return NULL; count_offset = 32; } else return NULL; if (offset) *offset = count_offset; return (struct dx_countlimit *)(((void *)dirent) + count_offset); } static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, int count_offset, int count, struct dx_tail *t) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; int size; __u32 dummy_csum = 0; int offset = offsetof(struct dx_tail, dt_checksum); size = count_offset + (count * sizeof(struct dx_entry)); csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); csum = ext4_chksum(sbi, csum, (__u8 *)t, offset); csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); return cpu_to_le32(csum); } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!ext4_has_metadata_csum(inode->i_sb)) return 1; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return 0; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return 0; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset, count, t)) return 0; return 1; } static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!ext4_has_metadata_csum(inode->i_sb)) return; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t); } static inline int ext4_handle_dirty_dx_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } /* * p is at least 6 bytes before the end of page */ static inline struct ext4_dir_entry_2 * ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize) { return (struct ext4_dir_entry_2 *)((char *)p + ext4_rec_len_from_disk(p->rec_len, blocksize)); } /* * Future: use high four bits of block for coalesce-on-delete flags * Mask them off for now. */ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry) { return le32_to_cpu(entry->block) & 0x0fffffff; } static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value) { entry->block = cpu_to_le32(value); } static inline unsigned dx_get_hash(struct dx_entry *entry) { return le32_to_cpu(entry->hash); } static inline void dx_set_hash(struct dx_entry *entry, unsigned value) { entry->hash = cpu_to_le32(value); } static inline unsigned dx_get_count(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->count); } static inline unsigned dx_get_limit(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->limit); } static inline void dx_set_count(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); } static inline void dx_set_limit(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); } static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - EXT4_DIR_REC_LEN(2) - infosize; if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit(struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } /* * Debug */ #ifdef DX_DEBUG static void dx_show_index(char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); printk(KERN_DEBUG "%s index", label); for (i = 0; i < n; i++) { printk(KERN_CONT " %x->%lu", i ? dx_get_hash(entries + i) : 0, (unsigned long)dx_get_block(entries + i)); } printk(KERN_CONT "\n"); } struct stats { unsigned names; unsigned space; unsigned bcount; }; static struct stats dx_show_leaf(struct inode *dir, struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; printk("names: "); while ((char *) de < base + size) { if (de->inode) { if (show_names) { #ifdef CONFIG_EXT4_FS_ENCRYPTION int len; char *name; struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0); int res = 0; name = de->name; len = de->name_len; if (ext4_encrypted_inode(dir)) res = fscrypt_get_encryption_info(dir); if (res) { printk(KERN_WARNING "Error setting up" " fname crypto: %d\n", res); } if (!fscrypt_has_encryption_key(dir)) { /* Directory is not encrypted */ ext4fs_dirhash(de->name, de->name_len, &h); printk("%*.s:(U)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); } else { struct fscrypt_str de_name = FSTR_INIT(name, len); /* Directory is encrypted */ res = fscrypt_fname_alloc_buffer( dir, len, &fname_crypto_str); if (res) printk(KERN_WARNING "Error " "allocating crypto " "buffer--skipping " "crypto\n"); res = fscrypt_fname_disk_to_usr(dir, 0, 0, &de_name, &fname_crypto_str); if (res) { printk(KERN_WARNING "Error " "converting filename " "from disk to usr" "\n"); name = "??"; len = 2; } else { name = fname_crypto_str.name; len = fname_crypto_str.len; } ext4fs_dirhash(de->name, de->name_len, &h); printk("%*.s:(E)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); fscrypt_fname_free_buffer( &fname_crypto_str); } #else int len = de->name_len; char *name = de->name; ext4fs_dirhash(de->name, de->name_len, &h); printk("%*.s:%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); #endif } space += EXT4_DIR_REC_LEN(de->name_len); names++; } de = ext4_next_entry(de, size); } printk(KERN_CONT "(%i)\n", names); return (struct stats) { names, space, 1 }; } struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count = dx_get_count(entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { ext4_lblk_t block = dx_get_block(entries); ext4_lblk_t hash = i ? dx_get_hash(entries): 0; u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); bh = ext4_bread(NULL,dir, block, 0); if (!bh || IS_ERR(bh)) continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; brelse(bh); } if (bcount) printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", levels ? "" : " ", names, space/bcount, (space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; } #endif /* DX_DEBUG */ /* * Probe for a directory leaf block to search. * * dx_probe can return ERR_BAD_DX_DIR, which means there was a format * error in the directory index, and the caller should fall back to * searching the directory normally. The callers of dx_probe **MUST** * check for this error code, and make sure it never gets reflected * back to userspace. */ static struct dx_frame * dx_probe(struct ext4_filename *fname, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct dx_frame *frame = frame_in; struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR); u32 hash; memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0])); frame->bh = ext4_read_dirblock(dir, 0, INDEX); if (IS_ERR(frame->bh)) return (struct dx_frame *) frame->bh; root = (struct dx_root *) frame->bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext4_warning_inode(dir, "Unrecognised inode hash code %u", root->info.hash_version); goto fail; } if (fname) hinfo = &fname->hinfo; hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (fname && fname_name(fname)) ext4fs_dirhash(fname_name(fname), fname_len(fname), hinfo); hash = hinfo->hash; if (root->info.unused_flags & 1) { ext4_warning_inode(dir, "Unimplemented hash flags: %#06x", root->info.unused_flags); goto fail; } indirect = root->info.indirect_levels; if (indirect >= ext4_dir_htree_level(dir->i_sb)) { ext4_warning(dir->i_sb, "Directory (ino: %lu) htree depth %#06x exceed" "supported value", dir->i_ino, ext4_dir_htree_level(dir->i_sb)); if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) { ext4_warning(dir->i_sb, "Enable large directory " "feature to access it"); } goto fail; } entries = (struct dx_entry *)(((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning_inode(dir, "dx entry: limit %u != root limit %u", dx_get_limit(entries), dx_root_limit(dir, root->info.info_length)); goto fail; } dxtrace(printk("Look up %x", hash)); while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning_inode(dir, "dx entry: count %u beyond limit %u", count, dx_get_limit(entries)); goto fail; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p) / 2; dxtrace(printk(KERN_CONT ".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } if (0) { // linear search cross check unsigned n = count - 1; at = entries; while (n--) { dxtrace(printk(KERN_CONT ",")); if (dx_get_hash(++at) > hash) { at--; break; } } assert (at == p - 1); } at = p - 1; dxtrace(printk(KERN_CONT " %x->%u\n", at == entries ? 0 : dx_get_hash(at), dx_get_block(at))); frame->entries = entries; frame->at = at; if (!indirect--) return frame; frame++; frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); if (IS_ERR(frame->bh)) { ret_err = (struct dx_frame *) frame->bh; frame->bh = NULL; goto fail; } entries = ((struct dx_node *) frame->bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit(dir)) { ext4_warning_inode(dir, "dx entry: limit %u != node limit %u", dx_get_limit(entries), dx_node_limit(dir)); goto fail; } } fail: while (frame >= frame_in) { brelse(frame->bh); frame--; } if (ret_err == ERR_PTR(ERR_BAD_DX_DIR)) ext4_warning_inode(dir, "Corrupt directory, running e2fsck is recommended"); return ret_err; } static void dx_release(struct dx_frame *frames) { struct dx_root_info *info; int i; if (frames[0].bh == NULL) return; info = &((struct dx_root *)frames[0].bh->b_data)->info; for (i = 0; i <= info->indirect_levels; i++) { if (frames[i].bh == NULL) break; brelse(frames[i].bh); frames[i].bh = NULL; } } /* * This function increments the frame pointer to search the next leaf * block, and reads in the necessary intervening nodes if the search * should be necessary. Whether or not the search is necessary is * controlled by the hash parameter. If the hash value is even, then * the search is only continued if the next block starts with that * hash value. This is used if we are searching for a specific file. * * If the hash value is HASH_NB_ALWAYS, then always go to the next block. * * This function returns 1 if the caller should continue to search, * or 0 if it should not. If there is an error reading one of the * index blocks, it will a negative error code. * * If start_hash is non-null, it will be filled in with the starting * hash of the next page. */ static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash) { struct dx_frame *p; struct buffer_head *bh; int num_frames = 0; __u32 bhash; p = frame; /* * Find the next leaf page by incrementing the frame pointer. * If we run out of entries in the interior node, loop around and * increment pointer in the parent node. When we break out of * this loop, num_frames indicates the number of interior * nodes need to be read. */ while (1) { if (++(p->at) < p->entries + dx_get_count(p->entries)) break; if (p == frames) return 0; num_frames++; p--; } /* * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the * desired contiuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); if (start_hash) *start_hash = bhash; if ((hash & 1) == 0) { if ((bhash & ~1) != hash) return 0; } /* * If the hash is HASH_NB_ALWAYS, we always go to the next * block so no check is necessary */ while (num_frames--) { bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX); if (IS_ERR(bh)) return PTR_ERR(bh); p++; brelse(p->bh); p->bh = bh; p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; } return 1; } /* * This function fills a red-black tree with information from a * directory block. It returns the number directory entries loaded * into the tree. If there is an error it is returned in err. */ static int htree_dirblock_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash) { struct buffer_head *bh; struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str; dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); bh = ext4_read_dirblock(dir, block, DIRENT); if (IS_ERR(bh)) return PTR_ERR(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); #ifdef CONFIG_EXT4_FS_ENCRYPTION /* Check if the directory is encrypted */ if (ext4_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err < 0) { brelse(bh); return err; } err = fscrypt_fname_alloc_buffer(dir, EXT4_NAME_LEN, &fname_crypto_str); if (err < 0) { brelse(bh); return err; } } #endif for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { /* silently ignore the rest of the block */ break; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; if (!ext4_encrypted_inode(dir)) { tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de, &tmp_str); } else { int save_len = fname_crypto_str.len; struct fscrypt_str de_name = FSTR_INIT(de->name, de->name_len); /* Directory is encrypted */ err = fscrypt_fname_disk_to_usr(dir, hinfo->hash, hinfo->minor_hash, &de_name, &fname_crypto_str); if (err) { count = err; goto errout; } err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de, &fname_crypto_str); fname_crypto_str.len = save_len; } if (err != 0) { count = err; goto errout; } count++; } errout: brelse(bh); #ifdef CONFIG_EXT4_FS_ENCRYPTION fscrypt_fname_free_buffer(&fname_crypto_str); #endif return count; } /* * This function fills a red-black tree with information from a * directory. We start scanning the directory in hash order, starting * at start_hash and start_minor_hash. * * This function returns the number of entries inserted into the tree, * or a negative error code. */ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext4_dir_entry_2 *de; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct inode *dir; ext4_lblk_t block; int count = 0; int ret, err; __u32 hashval; struct fscrypt_str tmp_str; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = file_inode(dir_file); if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; count = htree_inlinedir_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash, &has_inline_data); if (has_inline_data) { *next_hash = ~0; return count; } } count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir, &hinfo, frames); if (IS_ERR(frame)) return PTR_ERR(frame); /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, 0, 0, de, &tmp_str); if (err != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, 2, 0, de, &tmp_str); if (err != 0) goto errout; count++; } while (1) { if (fatal_signal_pending(current)) { err = -ERESTARTSYS; goto errout; } cond_resched(); block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " "next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); } static inline int search_dirblock(struct buffer_head *bh, struct inode *dir, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, fname, offset, res_dir); } /* * Directory block splitting, compacting */ /* * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { ext4fs_dirhash(de->name, de->name_len, &h); map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; map_tail->size = le16_to_cpu(de->rec_len); count++; cond_resched(); } /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext4_next_entry(de, blocksize); } return count; } /* Sort map by hash value */ static void dx_sort_map (struct dx_map_entry *map, unsigned count) { struct dx_map_entry *p, *q, *top = map + count - 1; int more; /* Combsort until bubble sort doesn't suck */ while (count > 2) { count = count*10/13; if (count - 9 < 2) /* 9, 10 -> 11 */ count = 11; for (p = top, q = p - count; q >= map; p--, q--) if (p->hash < q->hash) swap(*p, *q); } /* Garden variety bubble sort */ do { more = 0; q = top; while (q-- > map) { if (q[1].hash >= q[0].hash) continue; swap(*(q+1), *q); more = 1; } } while(more); } static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) { struct dx_entry *entries = frame->entries; struct dx_entry *old = frame->at, *new = old + 1; int count = dx_get_count(entries); assert(count < dx_get_limit(entries)); assert(old < entries + count); memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_set_hash(new, hash); dx_set_block(new, block); dx_set_count(entries, count + 1); } /* * Test whether a directory entry matches the filename being searched for. * * Return: %true if the directory entry matches, otherwise %false. */ static inline bool ext4_match(const struct ext4_filename *fname, const struct ext4_dir_entry_2 *de) { struct fscrypt_name f; if (!de->inode) return false; f.usr_fname = fname->usr_fname; f.disk_name = fname->disk_name; #ifdef CONFIG_EXT4_FS_ENCRYPTION f.crypto_buf = fname->crypto_buf; #endif return fscrypt_match_name(&f, de->name, de->name_len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; int de_len; de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ if ((char *) de + de->name_len <= dlimit && ext4_match(fname, de)) { /* found a match - just to be sure, do * a full check */ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, offset)) return -1; *res_dir = de; return 1; } /* prevent looping on a bad block */ de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); if (de_len <= 0) return -1; offset += de_len; de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); } return 0; } static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, struct ext4_dir_entry *de) { struct super_block *sb = dir->i_sb; if (!is_dx(dir)) return 0; if (block == 0) return 1; if (de->inode == 0 && ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) == sb->s_blocksize) return 1; return 0; } /* * ext4_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * The returned buffer_head has ->b_count elevated. The caller is expected * to brelse() it when appropriate. */ static struct buffer_head * ext4_find_entry (struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block; const u8 *name = d_name->name; size_t ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ size_t ra_ptr = 0; /* Current index into readahead buffer */ ext4_lblk_t nblocks; int i, namelen, retval; struct ext4_filename fname; *res_dir = NULL; sb = dir->i_sb; namelen = d_name->len; if (namelen > EXT4_NAME_LEN) return NULL; retval = ext4_fname_setup_filename(dir, d_name, 1, &fname); if (retval == -ENOENT) return NULL; if (retval) return ERR_PTR(retval); if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, &fname, res_dir, &has_inline_data); if (has_inline_data) { if (inlined) *inlined = 1; goto cleanup_and_exit; } } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { ret = ext4_dx_find_entry(dir, &fname, res_dir); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR) goto cleanup_and_exit; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (!nblocks) { ret = NULL; goto cleanup_and_exit; } start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; if (block < start) ra_max = start - block; else ra_max = nblocks - block; ra_max = min(ra_max, ARRAY_SIZE(bh_use)); retval = ext4_bread_batch(dir, block, ra_max, false /* wait */, bh_use); if (retval) { ret = ERR_PTR(retval); ra_max = 0; goto cleanup_and_exit; } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { EXT4_ERROR_INODE(dir, "reading directory lblock %lu", (unsigned long) block); brelse(bh); ret = ERR_PTR(-EIO); goto cleanup_and_exit; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); ret = ERR_PTR(-EFSBADCRC); goto cleanup_and_exit; } set_buffer_verified(bh); i = search_dirblock(bh, dir, &fname, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); ext4_fname_free_filename(&fname); return ret; } static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir) { struct super_block * sb = dir->i_sb; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct buffer_head *bh; ext4_lblk_t block; int retval; #ifdef CONFIG_EXT4_FS_ENCRYPTION *res_dir = NULL; #endif frame = dx_probe(fname, dir, NULL, frames); if (IS_ERR(frame)) return (struct buffer_head *) frame; do { block = dx_get_block(frame->at); bh = ext4_read_dirblock(dir, block, DIRENT); if (IS_ERR(bh)) goto errout; retval = search_dirblock(bh, dir, fname, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (retval == 1) goto success; brelse(bh); if (retval == -1) { bh = ERR_PTR(ERR_BAD_DX_DIR); goto errout; } /* Check to see if we should continue to search */ retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame, frames, NULL); if (retval < 0) { ext4_warning_inode(dir, "error %d reading directory index block", retval); bh = ERR_PTR(retval); goto errout; } } while (retval == 1); bh = NULL; errout: dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name)); success: dx_release(frames); return bh; } static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; struct ext4_dir_entry_2 *de; struct buffer_head *bh; int err; err = fscrypt_prepare_lookup(dir, dentry, flags); if (err) return ERR_PTR(err); if (dentry->d_name.len > EXT4_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (IS_ERR(bh)) return (struct dentry *) bh; inode = NULL; if (bh) { __u32 ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(dir->i_sb, ino)) { EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EFSCORRUPTED); } if (unlikely(ino == dir->i_ino)) { EXT4_ERROR_INODE(dir, "'%pd' linked to parent dir", dentry); return ERR_PTR(-EFSCORRUPTED); } inode = ext4_iget_normal(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", ino); return ERR_PTR(-EFSCORRUPTED); } if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { ext4_warning(inode->i_sb, "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); iput(inode); return ERR_PTR(-EPERM); } } return d_splice_alias(inode, dentry); } struct dentry *ext4_get_parent(struct dentry *child) { __u32 ino; static const struct qstr dotdot = QSTR_INIT("..", 2); struct ext4_dir_entry_2 * de; struct buffer_head *bh; bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL); if (IS_ERR(bh)) return (struct dentry *) bh; if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(child->d_sb, ino)) { EXT4_ERROR_INODE(d_inode(child), "bad parent inode number: %u", ino); return ERR_PTR(-EFSCORRUPTED); } return d_obtain_alias(ext4_iget_normal(child->d_sb, ino)); } /* * Move count entries from end of map between two memory locations. * Returns pointer to last entry moved. */ static struct ext4_dir_entry_2 * dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, unsigned blocksize) { unsigned rec_len = 0; while (count--) { struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); rec_len = EXT4_DIR_REC_LEN(de->name_len); memcpy (to, de, rec_len); ((struct ext4_dir_entry_2 *) to)->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); de->inode = 0; map++; to += rec_len; } return (struct ext4_dir_entry_2 *) (to - rec_len); } /* * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) { struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; unsigned rec_len = 0; prev = to = de; while ((char*)de < base + blocksize) { next = ext4_next_entry(de, blocksize); if (de->inode && de->name_len) { rec_len = EXT4_DIR_REC_LEN(de->name_len); if (de > to) memmove(to, de, rec_len); to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); prev = to; to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len); } de = next; } return prev; } /* * Split a full leaf block to make room for a new dir entry. * Allocate a new block, and move entries so that they are approx. equally full. * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, struct dx_hash_info *hinfo) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; struct buffer_head *bh2; ext4_lblk_t newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; struct ext4_dir_entry_tail *t; int csum_size = 0; int err = 0, i; if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); bh2 = ext4_append(handle, dir, &newblock); if (IS_ERR(bh2)) { brelse(*bh); *bh = NULL; return (struct ext4_dir_entry_2 *) bh2; } BUFFER_TRACE(*bh, "get_write_access"); err = ext4_journal_get_write_access(handle, *bh); if (err) goto journal_error; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; data2 = bh2->b_data; /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); /* Split the existing block in the middle, size-wise */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { /* is more than half of this entry in 2nd half of the block? */ if (size + map[i].size/2 > blocksize/2) break; size += map[i].size; move++; } /* map index at which we will split */ split = count - move; hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", (unsigned long)dx_get_block(frame->at), hash2, split, count-split)); /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); de = dx_pack_dirents(data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de2, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data2, blocksize); initialize_dirent_tail(t, blocksize); t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } dx_insert_block(frame, hash2 + continued, newblock); err = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; brelse(bh2); dxtrace(dx_show_index("frame", frame->entries)); return de; journal_error: brelse(*bh); brelse(bh2); *bh = NULL; ext4_std_error(dir->i_sb, err); return ERR_PTR(err); } int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, struct ext4_filename *fname, struct ext4_dir_entry_2 **dest_de) { struct ext4_dir_entry_2 *de; unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname)); int nlen, rlen; unsigned int offset = 0; char *top; de = (struct ext4_dir_entry_2 *)buf; top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, buf, buf_size, offset)) return -EFSCORRUPTED; if (ext4_match(fname, de)) return -EEXIST; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) break; de = (struct ext4_dir_entry_2 *)((char *)de + rlen); offset += rlen; } if ((char *) de > top) return -ENOSPC; *dest_de = de; return 0; } void ext4_insert_dentry(struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, struct ext4_filename *fname) { int nlen, rlen; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); de = de1; } de->file_type = EXT4_FT_UNKNOWN; de->inode = cpu_to_le32(inode->i_ino); ext4_set_de_type(inode->i_sb, de, inode->i_mode); de->name_len = fname_len(fname); memcpy(de->name, fname_name(fname), fname_len(fname)); } /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large * enough for new directory entry. If de is NULL, then * add_dirent_to_buf will attempt search the directory block for * space. It will return -ENOSPC if no space is available, and -EIO * and -EEXIST if directory entry already exists. */ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, fname, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(inode, de, blocksize, fname); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = current_time(dir); ext4_update_dx_flag(dir); inode_inc_iversion(dir); ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; } /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. */ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode, struct buffer_head *bh) { struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct dx_entry *entries; struct ext4_dir_entry_2 *de, *de2; struct ext4_dir_entry_tail *t; char *data1, *top; unsigned len; int retval; unsigned blocksize; ext4_lblk_t block; struct fake_dirent *fde; int csum_size = 0; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); BUFFER_TRACE(bh, "get_write_access"); retval = ext4_journal_get_write_access(handle, bh); if (retval) { ext4_std_error(dir->i_sb, retval); brelse(bh); return retval; } root = (struct dx_root *) bh->b_data; /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); if ((char *) de >= (((char *) root) + blocksize)) { EXT4_ERROR_INODE(dir, "invalid rec_len for '..'"); brelse(bh); return -EFSCORRUPTED; } len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block); if (IS_ERR(bh2)) { brelse(bh); return PTR_ERR(bh2); } ext4_set_inode_flag(dir, EXT4_INODE_INDEX); data1 = bh2->b_data; memcpy (data1, de, len); de = (struct ext4_dir_entry_2 *) data1; top = data1 + len; while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) de = de2; de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), blocksize); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; entries = root->entries; dx_set_block(entries, 1); dx_set_count(entries, 1); dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); /* Initialize as for dx_probe */ fname->hinfo.hash_version = root->info.hash_version; if (fname->hinfo.hash_version <= DX_HASH_TEA) fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; ext4fs_dirhash(fname_name(fname), fname_len(fname), &fname->hinfo); memset(frames, 0, sizeof(frames)); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (retval) goto out_frames; retval = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (retval) goto out_frames; de = do_split(handle,dir, &bh2, frame, &fname->hinfo); if (IS_ERR(de)) { retval = PTR_ERR(de); goto out_frames; } retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2); out_frames: /* * Even if the block split failed, we have to properly write * out all the changes we did so far. Otherwise we can end up * with corrupted filesystem. */ if (retval) ext4_mark_inode_dirty(handle, dir); dx_release(frames); brelse(bh2); return retval; } /* * ext4_add_entry() * * adds a file entry to the specified directory, using the same * semantics as ext4_find_entry(). It returns NULL if it failed. * * NOTE!! The inode part of 'de' is left at 0 - which means you * may not sleep between calling this and putting something into * the entry, as someone else might have used it while you slept. */ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); struct buffer_head *bh = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; struct super_block *sb; struct ext4_filename fname; int retval; int dx_fallback=0; unsigned blocksize; ext4_lblk_t block, blocks; int csum_size = 0; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; blocksize = sb->s_blocksize; if (!dentry->d_name.len) return -EINVAL; retval = ext4_fname_setup_filename(dir, &dentry->d_name, 0, &fname); if (retval) return retval; if (ext4_has_inline_data(dir)) { retval = ext4_try_add_inline_entry(handle, &fname, dir, inode); if (retval < 0) goto out; if (retval == 1) { retval = 0; goto out; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, &fname, dir, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) goto out; ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0; block < blocks; block++) { bh = ext4_read_dirblock(dir, block, DIRENT); if (IS_ERR(bh)) { retval = PTR_ERR(bh); bh = NULL; goto out; } retval = add_dirent_to_buf(handle, &fname, dir, inode, NULL, bh); if (retval != -ENOSPC) goto out; if (blocks == 1 && !dx_fallback && ext4_has_feature_dir_index(sb)) { retval = make_indexed_dir(handle, &fname, dir, inode, bh); bh = NULL; /* make_indexed_dir releases bh */ goto out; } brelse(bh); } bh = ext4_append(handle, dir, &block); if (IS_ERR(bh)) { retval = PTR_ERR(bh); bh = NULL; goto out; } de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(bh->b_data, blocksize); initialize_dirent_tail(t, blocksize); } retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh); out: ext4_fname_free_filename(&fname); brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); return retval; } /* * Returns 0 for success, or a negative error value */ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode) { struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct dx_entry *entries, *at; struct buffer_head *bh; struct super_block *sb = dir->i_sb; struct ext4_dir_entry_2 *de; int restart; int err; again: restart = 0; frame = dx_probe(fname, dir, NULL, frames); if (IS_ERR(frame)) return PTR_ERR(frame); entries = frame->entries; at = frame->at; bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT); if (IS_ERR(bh)) { err = PTR_ERR(bh); bh = NULL; goto cleanup; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto journal_error; err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh); if (err != -ENOSPC) goto cleanup; err = 0; /* Block full, should compress but for now just split */ dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", dx_get_count(entries), dx_get_limit(entries))); /* Need to split index? */ if (dx_get_count(entries) == dx_get_limit(entries)) { ext4_lblk_t newblock; int levels = frame - frames + 1; unsigned int icount; int add_level = 1; struct dx_entry *entries2; struct dx_node *node2; struct buffer_head *bh2; while (frame > frames) { if (dx_get_count((frame - 1)->entries) < dx_get_limit((frame - 1)->entries)) { add_level = 0; break; } frame--; /* split higher index block */ at = frame->at; entries = frame->entries; restart = 1; } if (add_level && levels == ext4_dir_htree_level(sb)) { ext4_warning(sb, "Directory (ino: %lu) index full, " "reach max htree level :%d", dir->i_ino, levels); if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) { ext4_warning(sb, "Large directory feature is " "not enabled on this " "filesystem"); } err = -ENOSPC; goto cleanup; } icount = dx_get_count(entries); bh2 = ext4_append(handle, dir, &newblock); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto cleanup; } node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize, sb->s_blocksize); BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; if (!add_level) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); dxtrace(printk(KERN_DEBUG "Split index %i/%i\n", icount1, icount2)); BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ err = ext4_journal_get_write_access(handle, (frame - 1)->bh); if (err) goto journal_error; memcpy((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); dx_set_count(entries, icount1); dx_set_count(entries2, icount2); dx_set_limit(entries2, dx_node_limit(dir)); /* Which index block gets the new entry? */ if (at - entries >= icount1) { frame->at = at = at - entries - icount1 + entries2; frame->entries = entries = entries2; swap(frame->bh, bh2); } dx_insert_block((frame - 1), hash2, newblock); dxtrace(dx_show_index("node", frame->entries)); dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext4_handle_dirty_dx_node(handle, dir, bh2); if (err) goto journal_error; brelse (bh2); err = ext4_handle_dirty_dx_node(handle, dir, (frame - 1)->bh); if (err) goto journal_error; if (restart) { err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); goto journal_error; } } else { struct dx_root *dxroot; memcpy((char *) entries2, (char *) entries, icount * sizeof(struct dx_entry)); dx_set_limit(entries2, dx_node_limit(dir)); /* Set up root */ dx_set_count(entries, 1); dx_set_block(entries + 0, newblock); dxroot = (struct dx_root *)frames[0].bh->b_data; dxroot->info.indirect_levels += 1; dxtrace(printk(KERN_DEBUG "Creating %d level index...\n", info->indirect_levels)); err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, bh2); brelse(bh2); restart = 1; goto journal_error; } } de = do_split(handle, dir, &bh, frame, &fname->hinfo); if (IS_ERR(de)) { err = PTR_ERR(de); goto cleanup; } err = add_dirent_to_buf(handle, fname, dir, inode, de, bh); goto cleanup; journal_error: ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */ cleanup: brelse(bh); dx_release(frames); /* @restart is true means htree-path has been changed, we need to * repeat dx_probe() to find out valid htree-path */ if (restart && err == 0) goto again; return err; } /* * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, int buf_size, int csum_size) { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; int i; i = 0; pde = NULL; de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) pde->rec_len = ext4_rec_len_to_disk( ext4_rec_len_from_disk(pde->rec_len, blocksize) + ext4_rec_len_from_disk(de->rec_len, blocksize), blocksize); else de->inode = 0; inode_inc_iversion(dir); return 0; } i += ext4_rec_len_from_disk(de->rec_len, blocksize); pde = de; de = ext4_next_entry(de, blocksize); } return -ENOENT; } static int ext4_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh) { int err, csum_size = 0; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; err = ext4_delete_inline_entry(handle, dir, de_del, bh, &has_inline_data); if (has_inline_data) return err; } if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (unlikely(err)) goto out; err = ext4_generic_delete_entry(handle, dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (unlikely(err)) goto out; return 0; out: if (err != -ENOENT) ext4_std_error(dir->i_sb, err); return err; } /* * Set directory link count to 1 if nlinks > EXT4_LINK_MAX, or if nlinks == 2 * since this indicates that nlinks count was previously 1 to avoid overflowing * the 16-bit i_links_count field on disk. Directories with i_nlink == 1 mean * that subdirectory link counts are not being maintained accurately. * * The caller has already checked for i_nlink overflow in case the DIR_LINK * feature is not enabled and returned -EMLINK. The is_dx() check is a proxy * for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set * on regular files) and to avoid creating huge/slow non-HTREE directories. */ static void ext4_inc_count(handle_t *handle, struct inode *inode) { inc_nlink(inode); if (is_dx(inode) && (inode->i_nlink > EXT4_LINK_MAX || inode->i_nlink == 2)) set_nlink(inode, 1); } /* * If a directory had nlink == 1, then we should let it be 1. This indicates * directory has >EXT4_LINK_MAX subdirs. */ static void ext4_dec_count(handle_t *handle, struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); } static int ext4_add_nondir(handle_t *handle, struct dentry *dentry, struct inode *inode) { int err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); unlock_new_inode(inode); d_instantiate(dentry, inode); return 0; } drop_nlink(inode); unlock_new_inode(inode); iput(inode); return err; } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { handle_t *handle; struct inode *inode; int err, credits, retries = 0; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) ext4_handle_sync(handle); } if (handle) ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, credits, retries = 0; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) ext4_handle_sync(handle); } if (handle) ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; int err, retries = 0; err = dquot_initialize(dir); if (err) return err; retry: inode = ext4_new_inode_start_handle(dir, mode, NULL, 0, NULL, EXT4_HT_DIR, EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + 4 + EXT4_XATTR_TRANS_BLOCKS); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); d_tmpfile(dentry, inode); err = ext4_orphan_add(handle, inode); if (err) goto err_unlock_inode; mark_inode_dirty(inode); unlock_new_inode(inode); } if (handle) ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; err_unlock_inode: ext4_journal_stop(handle); unlock_new_inode(inode); return err; } struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len) { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(parent_ino); de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - (csum_size + EXT4_DIR_REC_LEN(1)), blocksize); else de->rec_len = ext4_rec_len_to_disk( EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); return ext4_next_entry(de, blocksize); } static int ext4_init_new_dir(handle_t *handle, struct inode *dir, struct inode *inode) { struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; ext4_lblk_t block = 0; unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err; if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { err = ext4_try_create_inline_dir(handle, dir, inode); if (err < 0 && err != -ENOSPC) goto out; if (!err) goto out; } inode->i_size = 0; dir_block = ext4_append(handle, inode, &block); if (IS_ERR(dir_block)) return PTR_ERR(dir_block); de = (struct ext4_dir_entry_2 *)dir_block->b_data; ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); set_nlink(inode, 2); if (csum_size) { t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize); initialize_dirent_tail(t, blocksize); } BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) goto out; set_buffer_verified(dir_block); out: brelse(dir_block); return err; } static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; int err, credits, retries = 0; if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; err = ext4_init_new_dir(handle, dir, inode); if (err) goto out_clear_inode; err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); if (err) { out_clear_inode: clear_nlink(inode); unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; } ext4_inc_count(handle, dir); ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; unlock_new_inode(inode); d_instantiate(dentry, inode); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); out_stop: if (handle) ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * routine to check that the specified directory is empty (for rmdir) */ bool ext4_empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; struct ext4_dir_entry_2 *de, *de1; struct super_block *sb; if (ext4_has_inline_data(inode)) { int has_inline_data = 1; int ret; ret = empty_inline_dir(inode, &has_inline_data); if (has_inline_data) return ret; } sb = inode->i_sb; if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { EXT4_ERROR_INODE(inode, "invalid size"); return true; } bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) return true; de = (struct ext4_dir_entry_2 *) bh->b_data; de1 = ext4_next_entry(de, sb->s_blocksize); if (le32_to_cpu(de->inode) != inode->i_ino || le32_to_cpu(de1->inode) == 0 || strcmp(".", de->name) || strcmp("..", de1->name)) { ext4_warning_inode(inode, "directory missing '.' and/or '..'"); brelse(bh); return true; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); de = ext4_next_entry(de1, sb->s_blocksize); while (offset < inode->i_size) { if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { unsigned int lblock; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); bh = ext4_read_dirblock(inode, lblock, EITHER); if (IS_ERR(bh)) return true; de = (struct ext4_dir_entry_2 *) bh->b_data; } if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset)) { de = (struct ext4_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } if (le32_to_cpu(de->inode)) { brelse(bh); return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); } brelse(bh); return true; } /* * ext4_orphan_add() links an unlinked or truncated inode into a list of * such inodes, starting at the superblock, in case we crash before the * file is closed/deleted, or in case the inode truncate spans multiple * transactions and the last transaction is not recovered after a crash. * * At filesystem recovery time, we walk this list deleting unlinked * inodes and truncating linked inodes in ext4_orphan_cleanup(). * * Orphan list manipulation functions must be called under i_mutex unless * we are just creating the inode or deleting it. */ int ext4_orphan_add(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_iloc iloc; int err = 0, rc; bool dirty = false; if (!sbi->s_journal || is_bad_inode(inode)) return 0; WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && !inode_is_locked(inode)); /* * Exit early if inode already is on orphan list. This is a big speedup * since we don't have to contend on the global s_orphan_lock. */ if (!list_empty(&EXT4_I(inode)->i_orphan)) return 0; /* * Orphan handling is only valid for files with data blocks * being truncated, or files being unlinked. Note that we either * hold i_mutex, or the inode can not be referenced from outside, * so i_nlink should not be bumped due to race */ J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out; mutex_lock(&sbi->s_orphan_lock); /* * Due to previous errors inode may be already a part of on-disk * orphan list. If so skip on-disk list modification. */ if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) > (le32_to_cpu(sbi->s_es->s_inodes_count))) { /* Insert this inode at the head of the on-disk orphan list */ NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan); sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); dirty = true; } list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan); mutex_unlock(&sbi->s_orphan_lock); if (dirty) { err = ext4_handle_dirty_super(handle, sb); rc = ext4_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; if (err) { /* * We have to remove inode from in-memory list if * addition to on disk orphan list failed. Stray orphan * list entries can cause panics at unmount time. */ mutex_lock(&sbi->s_orphan_lock); list_del_init(&EXT4_I(inode)->i_orphan); mutex_unlock(&sbi->s_orphan_lock); } } jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); jbd_debug(4, "orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out: ext4_std_error(sb, err); return err; } /* * ext4_orphan_del() removes an unlinked or truncated inode from the list * of such inodes stored on disk, because it is finally being cleaned up. */ int ext4_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 ino_next; struct ext4_iloc iloc; int err = 0; if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS)) return 0; WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && !inode_is_locked(inode)); /* Do this quick check before taking global s_orphan_lock. */ if (list_empty(&ei->i_orphan)) return 0; if (handle) { /* Grab inode buffer early before taking global s_orphan_lock */ err = ext4_reserve_inode_write(handle, inode, &iloc); } mutex_lock(&sbi->s_orphan_lock); jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); prev = ei->i_orphan.prev; list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle || err) { mutex_unlock(&sbi->s_orphan_lock); goto out_err; } ino_next = NEXT_ORPHAN(inode); if (prev == &sbi->s_orphan) { jbd_debug(4, "superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) { mutex_unlock(&sbi->s_orphan_lock); goto out_brelse; } sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); mutex_unlock(&sbi->s_orphan_lock); err = ext4_handle_dirty_super(handle, inode->i_sb); } else { struct ext4_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; jbd_debug(4, "orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) { mutex_unlock(&sbi->s_orphan_lock); goto out_brelse; } NEXT_ORPHAN(i_prev) = ino_next; err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); mutex_unlock(&sbi->s_orphan_lock); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext4_mark_iloc_dirty(handle, inode, &iloc); out_err: ext4_std_error(inode->i_sb, err); return err; out_brelse: brelse(iloc.bh); goto out_err; } static int ext4_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) return -EIO; /* Initialize quotas before so that eventual writes go in * separate transaction */ retval = dquot_initialize(dir); if (retval) return retval; retval = dquot_initialize(d_inode(dentry)); if (retval) return retval; retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) goto end_rmdir; inode = d_inode(dentry); retval = -EFSCORRUPTED; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!ext4_empty_dir(inode)) goto end_rmdir; handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_rmdir; } if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_rmdir; if (!EXT4_DIR_LINK_EMPTY(inode)) ext4_warning_inode(inode, "empty directory '%.*s' has too many links (%u)", dentry->d_name.len, dentry->d_name.name, inode->i_nlink); inode->i_version++; clear_nlink(inode); /* There's no need to set i_disksize: the fact that i_nlink is * zero will ensure that the right thing happens during any * recovery. */ inode->i_size = 0; ext4_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_dec_count(handle, dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); end_rmdir: brelse(bh); if (handle) ext4_journal_stop(handle); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) return -EIO; trace_ext4_unlink_enter(dir, dentry); /* Initialize quotas before so that eventual writes go * in separate transaction */ retval = dquot_initialize(dir); if (retval) return retval; retval = dquot_initialize(d_inode(dentry)); if (retval) return retval; retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) goto end_unlink; inode = d_inode(dentry); retval = -EFSCORRUPTED; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_unlink; handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_unlink; } if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); if (inode->i_nlink == 0) { ext4_warning_inode(inode, "Deleting file '%.*s' with no links", dentry->d_name.len, dentry->d_name.name); set_nlink(inode, 1); } retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = current_time(inode); ext4_mark_inode_dirty(handle, inode); end_unlink: brelse(bh); if (handle) ext4_journal_stop(handle); trace_ext4_unlink_exit(dentry, retval); return retval; } static int ext4_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { handle_t *handle; struct inode *inode; int err, len = strlen(symname); int credits; bool encryption_required; struct fscrypt_str disk_link; struct fscrypt_symlink_data *sd = NULL; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) return -EIO; disk_link.len = len + 1; disk_link.name = (char *) symname; encryption_required = (ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))); if (encryption_required) { err = fscrypt_get_encryption_info(dir); if (err) return err; if (!fscrypt_has_encryption_key(dir)) return -ENOKEY; disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + sizeof(struct fscrypt_symlink_data)); sd = kzalloc(disk_link.len, GFP_KERNEL); if (!sd) return -ENOMEM; } if (disk_link.len > dir->i_sb->s_blocksize) { err = -ENAMETOOLONG; goto err_free_sd; } err = dquot_initialize(dir); if (err) goto err_free_sd; if ((disk_link.len > EXT4_N_BLOCKS * 4)) { /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, * group descriptor, sb, inode block, quota blocks, and * possibly selinux xattr blocks. */ credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + EXT4_XATTR_TRANS_BLOCKS; } else { /* * Fast symlink. We have to add entry to directory * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS), * allocate new inode (bitmap, group descriptor, inode block, * quota blocks, sb is already counted in previous macros). */ credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3; } inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); if (IS_ERR(inode)) { if (handle) ext4_journal_stop(handle); err = PTR_ERR(inode); goto err_free_sd; } if (encryption_required) { struct qstr istr; struct fscrypt_str ostr = FSTR_INIT(sd->encrypted_path, disk_link.len); istr.name = (const unsigned char *) symname; istr.len = len; err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); if (err) goto err_drop_inode; sd->len = cpu_to_le16(ostr.len); disk_link.name = (char *) sd; inode->i_op = &ext4_encrypted_symlink_inode_operations; } if ((disk_link.len > EXT4_N_BLOCKS * 4)) { if (!encryption_required) inode->i_op = &ext4_symlink_inode_operations; inode_nohighmem(inode); ext4_set_aops(inode); /* * We cannot call page_symlink() with transaction started * because it calls into ext4_write_begin() which can wait * for transaction commit if we are running out of space * and thus we deadlock. So we have to stop transaction now * and restart it when symlink contents is written. * * To keep fs consistent in case of crash, we have to put inode * to orphan list in the mean time. */ drop_nlink(inode); err = ext4_orphan_add(handle, inode); ext4_journal_stop(handle); handle = NULL; if (err) goto err_drop_inode; err = __page_symlink(inode, disk_link.name, disk_link.len, 1); if (err) goto err_drop_inode; /* * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified */ handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); handle = NULL; goto err_drop_inode; } set_nlink(inode, 1); err = ext4_orphan_del(handle, inode); if (err) goto err_drop_inode; } else { /* clear the extent format for fast symlink */ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); if (!encryption_required) { inode->i_op = &ext4_fast_symlink_inode_operations; inode->i_link = (char *)&EXT4_I(inode)->i_data; } memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name, disk_link.len); inode->i_size = disk_link.len - 1; } EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) ext4_handle_sync(handle); if (handle) ext4_journal_stop(handle); kfree(sd); return err; err_drop_inode: if (handle) ext4_journal_stop(handle); clear_nlink(inode); unlock_new_inode(inode); iput(inode); err_free_sd: kfree(sd); return err; } static int ext4_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { handle_t *handle; struct inode *inode = d_inode(old_dentry); int err, retries = 0; if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; err = fscrypt_prepare_link(old_dentry, dir, dentry); if (err) return err; if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; err = dquot_initialize(dir); if (err) return err; retry: handle = ext4_journal_start(dir, EXT4_HT_DIR, (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode->i_ctime = current_time(inode); ext4_inc_count(handle, inode); ihold(inode); err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); /* this can happen only for tmpfile being * linked the first time */ if (inode->i_nlink == 1) ext4_orphan_del(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); iput(inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * Try to find buffer head where contains the parent block. * It should be the inode block if it is inlined or the 1st block * if it is a normal dir. */ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct inode *inode, int *retval, struct ext4_dir_entry_2 **parent_de, int *inlined) { struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) { *retval = PTR_ERR(bh); return NULL; } *parent_de = ext4_next_entry( (struct ext4_dir_entry_2 *)bh->b_data, inode->i_sb->s_blocksize); return bh; } *inlined = 1; return ext4_get_first_inline_block(inode, parent_de, retval); } struct ext4_renament { struct inode *dir; struct dentry *dentry; struct inode *inode; bool is_dir; int dir_nlink_delta; /* entry for "dentry" */ struct buffer_head *bh; struct ext4_dir_entry_2 *de; int inlined; /* entry for ".." in inode if it's a directory */ struct buffer_head *dir_bh; struct ext4_dir_entry_2 *parent_de; int dir_inlined; }; static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent) { int retval; ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode, &retval, &ent->parent_de, &ent->dir_inlined); if (!ent->dir_bh) return retval; if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino) return -EFSCORRUPTED; BUFFER_TRACE(ent->dir_bh, "get_write_access"); return ext4_journal_get_write_access(handle, ent->dir_bh); } static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent, unsigned dir_ino) { int retval; ent->parent_de->inode = cpu_to_le32(dir_ino); BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata"); if (!ent->dir_inlined) { if (is_dx(ent->inode)) { retval = ext4_handle_dirty_dx_node(handle, ent->inode, ent->dir_bh); } else { retval = ext4_handle_dirty_dirent_node(handle, ent->inode, ent->dir_bh); } } else { retval = ext4_mark_inode_dirty(handle, ent->inode); } if (retval) { ext4_std_error(ent->dir->i_sb, retval); return retval; } return 0; } static int ext4_setent(handle_t *handle, struct ext4_renament *ent, unsigned ino, unsigned file_type) { int retval; BUFFER_TRACE(ent->bh, "get write access"); retval = ext4_journal_get_write_access(handle, ent->bh); if (retval) return retval; ent->de->inode = cpu_to_le32(ino); if (ext4_has_feature_filetype(ent->dir->i_sb)) ent->de->file_type = file_type; ent->dir->i_version++; ent->dir->i_ctime = ent->dir->i_mtime = current_time(ent->dir); ext4_mark_inode_dirty(handle, ent->dir); BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata"); if (!ent->inlined) { retval = ext4_handle_dirty_dirent_node(handle, ent->dir, ent->bh); if (unlikely(retval)) { ext4_std_error(ent->dir->i_sb, retval); return retval; } } brelse(ent->bh); ent->bh = NULL; return 0; } static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, const struct qstr *d_name) { int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; bh = ext4_find_entry(dir, d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (bh) { retval = ext4_delete_entry(handle, dir, de, bh); brelse(bh); } return retval; } static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, int force_reread) { int retval; /* * ent->de could have moved from under us during htree split, so make * sure that we are deleting the right entry. We might also be pointing * to a stale entry in the unused part of ent->bh so just checking inum * and the name isn't enough. */ if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino || ent->de->name_len != ent->dentry->d_name.len || strncmp(ent->de->name, ent->dentry->d_name.name, ent->de->name_len) || force_reread) { retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); } else { retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh); if (retval == -ENOENT) { retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); } } if (retval) { ext4_warning_inode(ent->dir, "Deleting old file: nlink %d, error=%d", ent->dir->i_nlink, retval); } } static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) { if (ent->dir_nlink_delta) { if (ent->dir_nlink_delta == -1) ext4_dec_count(handle, ent->dir); else ext4_inc_count(handle, ent->dir); ext4_mark_inode_dirty(handle, ent->dir); } } static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent, int credits, handle_t **h) { struct inode *wh; handle_t *handle; int retries = 0; /* * for inode block, sb block, group summaries, * and inode bitmap */ credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + EXT4_XATTR_TRANS_BLOCKS + 4); retry: wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE, &ent->dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); if (IS_ERR(wh)) { if (handle) ext4_journal_stop(handle); if (PTR_ERR(wh) == -ENOSPC && ext4_should_retry_alloc(ent->dir->i_sb, &retries)) goto retry; } else { *h = handle; init_special_inode(wh, wh->i_mode, WHITEOUT_DEV); wh->i_op = &ext4_special_inode_operations; } return wh; } /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. * * n.b. old_{dentry,inode) refers to the source dentry/inode * while new_{dentry,inode) refers to the destination dentry/inode * This comes from rename(const char *oldpath, const char *newpath) */ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { handle_t *handle = NULL; struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, .inode = d_inode(new_dentry), }; int force_reread; int retval; struct inode *whiteout = NULL; int credits; u8 old_file_type; if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; retval = dquot_initialize(old.dir); if (retval) return retval; retval = dquot_initialize(new.dir); if (retval) return retval; /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new.inode) { retval = dquot_initialize(new.inode); if (retval) return retval; } old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) goto end_rename; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; goto end_rename; } if (new.bh) { if (!new.inode) { brelse(new.bh); new.bh = NULL; } } if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) ext4_alloc_da_blocks(old.inode); credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); if (!(flags & RENAME_WHITEOUT)) { handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_rename; } } else { whiteout = ext4_whiteout_for_rename(&old, credits, &handle); if (IS_ERR(whiteout)) { retval = PTR_ERR(whiteout); whiteout = NULL; goto end_rename; } } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); if (S_ISDIR(old.inode->i_mode)) { if (new.inode) { retval = -ENOTEMPTY; if (!ext4_empty_dir(new.inode)) goto end_rename; } else { retval = -EMLINK; if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir)) goto end_rename; } retval = ext4_rename_dir_prepare(handle, &old); if (retval) goto end_rename; } /* * If we're renaming a file within an inline_data dir and adding or * setting the new dirent causes a conversion from inline_data to * extents/blockmap, we need to force the dirent delete code to * re-read the directory, or else we end up trying to delete a dirent * from what is now the extent tree root (or a block map). */ force_reread = (new.dir->i_ino == old.dir->i_ino && ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); old_file_type = old.de->file_type; if (whiteout) { /* * Do this before adding a new entry, so the old entry is sure * to be still pointing to the valid old entry. */ retval = ext4_setent(handle, &old, whiteout->i_ino, EXT4_FT_CHRDEV); if (retval) goto end_rename; ext4_mark_inode_dirty(handle, whiteout); } if (!new.bh) { retval = ext4_add_entry(handle, new.dentry, old.inode); if (retval) goto end_rename; } else { retval = ext4_setent(handle, &new, old.inode->i_ino, old_file_type); if (retval) goto end_rename; } if (force_reread) force_reread = !ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA); /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old.inode->i_ctime = current_time(old.inode); ext4_mark_inode_dirty(handle, old.inode); if (!whiteout) { /* * ok, that's it */ ext4_rename_delete(handle, &old, force_reread); } if (new.inode) { ext4_dec_count(handle, new.inode); new.inode->i_ctime = current_time(new.inode); } old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir); ext4_update_dx_flag(old.dir); if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) goto end_rename; ext4_dec_count(handle, old.dir); if (new.inode) { /* checked ext4_empty_dir above, can't have another * parent, ext4_dec_count() won't work for many-linked * dirs */ clear_nlink(new.inode); } else { ext4_inc_count(handle, new.dir); ext4_update_dx_flag(new.dir); ext4_mark_inode_dirty(handle, new.dir); } } ext4_mark_inode_dirty(handle, old.dir); if (new.inode) { ext4_mark_inode_dirty(handle, new.inode); if (!new.inode->i_nlink) ext4_orphan_add(handle, new.inode); } retval = 0; end_rename: brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); if (whiteout) { if (retval) drop_nlink(whiteout); unlock_new_inode(whiteout); iput(whiteout); } if (handle) ext4_journal_stop(handle); return retval; } static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { handle_t *handle = NULL; struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, .inode = d_inode(new_dentry), }; u8 new_file_type; int retval; struct timespec ctime; if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) && !projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid)) || (ext4_test_inode_flag(old_dir, EXT4_INODE_PROJINHERIT) && !projid_eq(EXT4_I(old_dir)->i_projid, EXT4_I(new_dentry->d_inode)->i_projid))) return -EXDEV; retval = dquot_initialize(old.dir); if (retval) return retval; retval = dquot_initialize(new.dir); if (retval) return retval; old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, &old.inlined); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) goto end_rename; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; goto end_rename; } /* RENAME_EXCHANGE case: old *and* new must both exist */ if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino) goto end_rename; handle = ext4_journal_start(old.dir, EXT4_HT_DIR, (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_rename; } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); if (S_ISDIR(old.inode->i_mode)) { old.is_dir = true; retval = ext4_rename_dir_prepare(handle, &old); if (retval) goto end_rename; } if (S_ISDIR(new.inode->i_mode)) { new.is_dir = true; retval = ext4_rename_dir_prepare(handle, &new); if (retval) goto end_rename; } /* * Other than the special case of overwriting a directory, parents' * nlink only needs to be modified if this is a cross directory rename. */ if (old.dir != new.dir && old.is_dir != new.is_dir) { old.dir_nlink_delta = old.is_dir ? -1 : 1; new.dir_nlink_delta = -old.dir_nlink_delta; retval = -EMLINK; if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) || (new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir))) goto end_rename; } new_file_type = new.de->file_type; retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type); if (retval) goto end_rename; retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type); if (retval) goto end_rename; /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ ctime = current_time(old.inode); old.inode->i_ctime = ctime; new.inode->i_ctime = ctime; ext4_mark_inode_dirty(handle, old.inode); ext4_mark_inode_dirty(handle, new.inode); if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) goto end_rename; } if (new.dir_bh) { retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino); if (retval) goto end_rename; } ext4_update_dir_count(handle, &old); ext4_update_dir_count(handle, &new); retval = 0; end_rename: brelse(old.dir_bh); brelse(new.dir_bh); brelse(old.bh); brelse(new.bh); if (handle) ext4_journal_stop(handle); return retval; } static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int err; if (unlikely(ext4_forced_shutdown(EXT4_SB(old_dir->i_sb)))) return -EIO; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (err) return err; if (flags & RENAME_EXCHANGE) { return ext4_cross_rename(old_dir, old_dentry, new_dir, new_dentry); } return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } /* * directories can handle most operations... */ const struct inode_operations ext4_dir_inode_operations = { .create = ext4_create, .lookup = ext4_lookup, .link = ext4_link, .unlink = ext4_unlink, .symlink = ext4_symlink, .mkdir = ext4_mkdir, .rmdir = ext4_rmdir, .mknod = ext4_mknod, .tmpfile = ext4_tmpfile, .rename = ext4_rename2, .setattr = ext4_setattr, .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, .fiemap = ext4_fiemap, }; const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, };
val2k/linux
fs/ext4/namei.c
C
gpl-2.0
104,545
/* comedi/drivers/ni_labpc.c Driver for National Instruments Lab-PC series boards and compatibles Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* */ /* */ #undef LABPC_DEBUG /* */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> #include "../comedidev.h" #include <linux/delay.h> #include <asm/dma.h> #include "8253.h" #include "8255.h" #include "mite.h" #include "comedi_fc.h" #include "ni_labpc.h" #define DRV_NAME "ni_labpc" /* */ #define LABPC_SIZE 32 /* */ #define LABPC_TIMER_BASE 500 /* */ /* */ #define COMMAND1_REG 0x0 #define ADC_GAIN_MASK (0x7 << 4) #define ADC_CHAN_BITS(x) ((x) & 0x7) /* */ #define ADC_SCAN_EN_BIT 0x80 #define COMMAND2_REG 0x1 /* */ #define PRETRIG_BIT 0x1 /* */ #define HWTRIG_BIT 0x2 /* */ #define SWTRIG_BIT 0x4 /* */ #define CASCADE_BIT 0x8 #define DAC_PACED_BIT(channel) (0x40 << ((channel) & 0x1)) #define COMMAND3_REG 0x2 /* */ #define DMA_EN_BIT 0x1 /* */ #define DIO_INTR_EN_BIT 0x2 /* */ #define DMATC_INTR_EN_BIT 0x4 /* */ #define TIMER_INTR_EN_BIT 0x8 /* */ #define ERR_INTR_EN_BIT 0x10 /* */ #define ADC_FNE_INTR_EN_BIT 0x20 #define ADC_CONVERT_REG 0x3 #define DAC_LSB_REG(channel) (0x4 + 2 * ((channel) & 0x1)) #define DAC_MSB_REG(channel) (0x5 + 2 * ((channel) & 0x1)) #define ADC_CLEAR_REG 0x8 #define DMATC_CLEAR_REG 0xa #define TIMER_CLEAR_REG 0xc /* */ #define COMMAND6_REG 0xe /* */ #define ADC_COMMON_BIT 0x1 /* */ #define ADC_UNIP_BIT 0x2 /* */ #define DAC_UNIP_BIT(channel) (0x4 << ((channel) & 0x1)) /* */ #define ADC_FHF_INTR_EN_BIT 0x20 /* */ #define A1_INTR_EN_BIT 0x40 /* */ #define ADC_SCAN_UP_BIT 0x80 #define COMMAND4_REG 0xf /* */ #define INTERVAL_SCAN_EN_BIT 0x1 /* */ #define EXT_SCAN_EN_BIT 0x2 /* */ #define EXT_CONVERT_OUT_BIT 0x4 /* */ #define ADC_DIFF_BIT 0x8 #define EXT_CONVERT_DISABLE_BIT 0x10 /* */ #define COMMAND5_REG 0x1c /* */ #define EEPROM_WRITE_UNPROTECT_BIT 0x4 /* */ #define DITHER_EN_BIT 0x8 /* */ #define CALDAC_LOAD_BIT 0x10 /* */ #define SCLOCK_BIT 0x20 /* */ #define SDATA_BIT 0x40 /* */ #define EEPROM_EN_BIT 0x80 #define INTERVAL_COUNT_REG 0x1e #define INTERVAL_LOAD_REG 0x1f #define INTERVAL_LOAD_BITS 0x1 /* */ #define STATUS1_REG 0x0 /* */ #define DATA_AVAIL_BIT 0x1 /* */ #define OVERRUN_BIT 0x2 /* */ #define OVERFLOW_BIT 0x4 /* */ #define TIMER_BIT 0x8 /* */ #define DMATC_BIT 0x10 /* */ #define EXT_TRIG_BIT 0x40 /* */ #define STATUS2_REG 0x1d /* */ #define EEPROM_OUT_BIT 0x1 /* */ #define A1_TC_BIT 0x2 /* */ #define FNHF_BIT 0x4 #define ADC_FIFO_REG 0xa #define DIO_BASE_REG 0x10 #define COUNTER_A_BASE_REG 0x14 #define COUNTER_A_CONTROL_REG (COUNTER_A_BASE_REG + 0x3) /* */ #define INIT_A0_BITS 0x14 /* */ #define INIT_A1_BITS 0x70 #define COUNTER_B_BASE_REG 0x18 static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int labpc_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t labpc_interrupt(int irq, void *d); static int labpc_drain_fifo(struct comedi_device *dev); #ifdef CONFIG_ISA_DMA_API static void labpc_drain_dma(struct comedi_device *dev); static void handle_isa_dma(struct comedi_device *dev); #endif static void labpc_drain_dregs(struct comedi_device *dev); static int labpc_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int labpc_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int labpc_eeprom_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); #ifdef CONFIG_ISA_DMA_API static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); #endif #ifdef CONFIG_COMEDI_PCI_DRIVERS static int labpc_find_device(struct comedi_device *dev, int bus, int slot); #endif static int labpc_dio_mem_callback(int dir, int port, int data, unsigned long arg); static void labpc_serial_out(struct comedi_device *dev, unsigned int value, unsigned int num_bits); static unsigned int labpc_serial_in(struct comedi_device *dev); static unsigned int labpc_eeprom_read(struct comedi_device *dev, unsigned int address); static unsigned int labpc_eeprom_read_status(struct comedi_device *dev); static int labpc_eeprom_write(struct comedi_device *dev, unsigned int address, unsigned int value); static void write_caldac(struct comedi_device *dev, unsigned int channel, unsigned int value); enum scan_mode { MODE_SINGLE_CHAN, MODE_SINGLE_CHAN_INTERVAL, MODE_MULT_CHAN_UP, MODE_MULT_CHAN_DOWN, }; /* */ #define NUM_LABPC_PLUS_AI_RANGES 16 /* */ static const int labpc_plus_is_unipolar[NUM_LABPC_PLUS_AI_RANGES] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, }; /* */ static const int labpc_plus_ai_gain_bits[NUM_LABPC_PLUS_AI_RANGES] = { 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, }; static const struct comedi_lrange range_labpc_plus_ai = { NUM_LABPC_PLUS_AI_RANGES, { BIP_RANGE(5), BIP_RANGE(4), BIP_RANGE(2.5), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.25), BIP_RANGE(0.1), BIP_RANGE(0.05), UNI_RANGE(10), UNI_RANGE(8), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; #define NUM_LABPC_1200_AI_RANGES 14 /* */ const int labpc_1200_is_unipolar[NUM_LABPC_1200_AI_RANGES] = { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, }; EXPORT_SYMBOL_GPL(labpc_1200_is_unipolar); /* */ const int labpc_1200_ai_gain_bits[NUM_LABPC_1200_AI_RANGES] = { 0x00, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x00, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, }; EXPORT_SYMBOL_GPL(labpc_1200_ai_gain_bits); const struct comedi_lrange range_labpc_1200_ai = { NUM_LABPC_1200_AI_RANGES, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.25), BIP_RANGE(0.1), BIP_RANGE(0.05), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; EXPORT_SYMBOL_GPL(range_labpc_1200_ai); /* */ #define AO_RANGE_IS_UNIPOLAR 0x1 static const struct comedi_lrange range_labpc_ao = { 2, { BIP_RANGE(5), UNI_RANGE(10), } }; /* */ static inline unsigned int labpc_inb(unsigned long address) { return inb(address); } static inline void labpc_outb(unsigned int byte, unsigned long address) { outb(byte, address); } static inline unsigned int labpc_readb(unsigned long address) { return readb((void *)address); } static inline void labpc_writeb(unsigned int byte, unsigned long address) { writeb(byte, (void *)address); } static const struct labpc_board_struct labpc_boards[] = { { .name = "lab-pc-1200", .ai_speed = 10000, .bustype = isa_bustype, .register_layout = labpc_1200_layout, .has_ao = 1, .ai_range_table = &range_labpc_1200_ai, .ai_range_code = labpc_1200_ai_gain_bits, .ai_range_is_unipolar = labpc_1200_is_unipolar, .ai_scan_up = 1, .memory_mapped_io = 0, }, { .name = "lab-pc-1200ai", .ai_speed = 10000, .bustype = isa_bustype, .register_layout = labpc_1200_layout, .has_ao = 0, .ai_range_table = &range_labpc_1200_ai, .ai_range_code = labpc_1200_ai_gain_bits, .ai_range_is_unipolar = labpc_1200_is_unipolar, .ai_scan_up = 1, .memory_mapped_io = 0, }, { .name = "lab-pc+", .ai_speed = 12000, .bustype = isa_bustype, .register_layout = labpc_plus_layout, .has_ao = 1, .ai_range_table = &range_labpc_plus_ai, .ai_range_code = labpc_plus_ai_gain_bits, .ai_range_is_unipolar = labpc_plus_is_unipolar, .ai_scan_up = 0, .memory_mapped_io = 0, }, #ifdef CONFIG_COMEDI_PCI_DRIVERS { .name = "pci-1200", .device_id = 0x161, .ai_speed = 10000, .bustype = pci_bustype, .register_layout = labpc_1200_layout, .has_ao = 1, .ai_range_table = &range_labpc_1200_ai, .ai_range_code = labpc_1200_ai_gain_bits, .ai_range_is_unipolar = labpc_1200_is_unipolar, .ai_scan_up = 1, .memory_mapped_io = 1, }, /* */ { .name = DRV_NAME, .bustype = pci_bustype, }, #endif }; /* */ #define thisboard ((struct labpc_board_struct *)dev->board_ptr) /* */ static const int dma_buffer_size = 0xff00; /* */ static const int sample_size = 2; #define devpriv ((struct labpc_private *)dev->private) static struct comedi_driver driver_labpc = { .driver_name = DRV_NAME, .module = THIS_MODULE, .attach = labpc_attach, .detach = labpc_common_detach, .num_names = ARRAY_SIZE(labpc_boards), .board_name = &labpc_boards[0].name, .offset = sizeof(struct labpc_board_struct), }; #ifdef CONFIG_COMEDI_PCI_DRIVERS static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = { {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x161)}, {0} }; MODULE_DEVICE_TABLE(pci, labpc_pci_table); #endif /* */ static inline int labpc_counter_load(struct comedi_device *dev, unsigned long base_address, unsigned int counter_number, unsigned int count, unsigned int mode) { if (thisboard->memory_mapped_io) return i8254_mm_load((void *)base_address, 0, counter_number, count, mode); else return i8254_load(base_address, 0, counter_number, count, mode); } int labpc_common_attach(struct comedi_device *dev, unsigned long iobase, unsigned int irq, unsigned int dma_chan) { struct comedi_subdevice *s; int i; unsigned long isr_flags; #ifdef CONFIG_ISA_DMA_API unsigned long dma_flags; #endif short lsb, msb; printk(KERN_ERR "comedi%d: ni_labpc: %s, io 0x%lx", dev->minor, thisboard->name, iobase); if (irq) printk(", irq %u", irq); if (dma_chan) printk(", dma %u", dma_chan); printk("\n"); if (iobase == 0) { printk(KERN_ERR "io base address is zero!\n"); return -EINVAL; } /* */ if (thisboard->bustype == isa_bustype) { /* */ if (!request_region(iobase, LABPC_SIZE, driver_labpc.driver_name)) { printk(KERN_ERR "I/O port conflict\n"); return -EIO; } } dev->iobase = iobase; if (thisboard->memory_mapped_io) { devpriv->read_byte = labpc_readb; devpriv->write_byte = labpc_writeb; } else { devpriv->read_byte = labpc_inb; devpriv->write_byte = labpc_outb; } /* */ devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); if (thisboard->register_layout == labpc_1200_layout) { devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); devpriv->write_byte(devpriv->command6_bits, dev->iobase + COMMAND6_REG); } /* */ if (irq) { isr_flags = 0; if (thisboard->bustype == pci_bustype || thisboard->bustype == pcmcia_bustype) isr_flags |= IRQF_SHARED; if (request_irq(irq, labpc_interrupt, isr_flags, driver_labpc.driver_name, dev)) { printk(KERN_ERR "unable to allocate irq %u\n", irq); return -EINVAL; } } dev->irq = irq; #ifdef CONFIG_ISA_DMA_API /* */ if (dma_chan > 3) { printk(KERN_ERR " invalid dma channel %u\n", dma_chan); return -EINVAL; } else if (dma_chan) { /* */ devpriv->dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA); if (devpriv->dma_buffer == NULL) { printk(KERN_ERR " failed to allocate dma buffer\n"); return -ENOMEM; } if (request_dma(dma_chan, driver_labpc.driver_name)) { printk(KERN_ERR " failed to allocate dma channel %u\n", dma_chan); return -EINVAL; } devpriv->dma_chan = dma_chan; dma_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); release_dma_lock(dma_flags); } #endif dev->board_name = thisboard->name; if (alloc_subdevices(dev, 5) < 0) return -ENOMEM; /* */ s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | SDF_DIFF | SDF_CMD_READ; s->n_chan = 8; s->len_chanlist = 8; s->maxdata = (1 << 12) - 1; /* */ s->range_table = thisboard->ai_range_table; s->do_cmd = labpc_ai_cmd; s->do_cmdtest = labpc_ai_cmdtest; s->insn_read = labpc_ai_rinsn; s->cancel = labpc_cancel; /* */ s = dev->subdevices + 1; if (thisboard->has_ao) { /* */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND; s->n_chan = NUM_AO_CHAN; s->maxdata = (1 << 12) - 1; /* */ s->range_table = &range_labpc_ao; s->insn_read = labpc_ao_rinsn; s->insn_write = labpc_ao_winsn; /* */ for (i = 0; i < s->n_chan; i++) { devpriv->ao_value[i] = s->maxdata / 2; lsb = devpriv->ao_value[i] & 0xff; msb = (devpriv->ao_value[i] >> 8) & 0xff; devpriv->write_byte(lsb, dev->iobase + DAC_LSB_REG(i)); devpriv->write_byte(msb, dev->iobase + DAC_MSB_REG(i)); } } else { s->type = COMEDI_SUBD_UNUSED; } /* */ s = dev->subdevices + 2; /* */ if (thisboard->memory_mapped_io) subdev_8255_init(dev, s, labpc_dio_mem_callback, (unsigned long)(dev->iobase + DIO_BASE_REG)); else subdev_8255_init(dev, s, NULL, dev->iobase + DIO_BASE_REG); /* */ s = dev->subdevices + 3; if (thisboard->register_layout == labpc_1200_layout) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 16; s->maxdata = 0xff; s->insn_read = labpc_calib_read_insn; s->insn_write = labpc_calib_write_insn; for (i = 0; i < s->n_chan; i++) write_caldac(dev, i, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* */ s = dev->subdevices + 4; if (thisboard->register_layout == labpc_1200_layout) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = EEPROM_SIZE; s->maxdata = 0xff; s->insn_read = labpc_eeprom_read_insn; s->insn_write = labpc_eeprom_write_insn; for (i = 0; i < EEPROM_SIZE; i++) devpriv->eeprom_data[i] = labpc_eeprom_read(dev, i); #ifdef LABPC_DEBUG printk(KERN_ERR " eeprom:"); for (i = 0; i < EEPROM_SIZE; i++) printk(" %i:0x%x ", i, devpriv->eeprom_data[i]); printk("\n"); #endif } else s->type = COMEDI_SUBD_UNUSED; return 0; } EXPORT_SYMBOL_GPL(labpc_common_attach); static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) { unsigned long iobase = 0; unsigned int irq = 0; unsigned int dma_chan = 0; #ifdef CONFIG_COMEDI_PCI_DRIVERS int retval; #endif /* */ if (alloc_private(dev, sizeof(struct labpc_private)) < 0) return -ENOMEM; /* */ switch (thisboard->bustype) { case isa_bustype: #ifdef CONFIG_ISA_DMA_API iobase = it->options[0]; irq = it->options[1]; dma_chan = it->options[2]; #else printk(KERN_ERR " this driver has not been built with ISA DMA " "support.\n"); return -EINVAL; #endif break; case pci_bustype: #ifdef CONFIG_COMEDI_PCI_DRIVERS retval = labpc_find_device(dev, it->options[0], it->options[1]); if (retval < 0) return retval; retval = mite_setup(devpriv->mite); if (retval < 0) return retval; iobase = (unsigned long)devpriv->mite->daq_io_addr; irq = mite_irq(devpriv->mite); #else printk(KERN_ERR " this driver has not been built with PCI " "support.\n"); return -EINVAL; #endif break; case pcmcia_bustype: printk (" this driver does not support pcmcia cards, use ni_labpc_cs.o\n"); return -EINVAL; break; default: printk(KERN_ERR "bug! couldn't determine board type\n"); return -EINVAL; break; } return labpc_common_attach(dev, iobase, irq, dma_chan); } /* */ #ifdef CONFIG_COMEDI_PCI_DRIVERS static int labpc_find_device(struct comedi_device *dev, int bus, int slot) { struct mite_struct *mite; int i; for (mite = mite_devices; mite; mite = mite->next) { if (mite->used) continue; /* */ if (bus || slot) { if (bus != mite->pcidev->bus->number || slot != PCI_SLOT(mite->pcidev->devfn)) continue; } for (i = 0; i < driver_labpc.num_names; i++) { if (labpc_boards[i].bustype != pci_bustype) continue; if (mite_device_id(mite) == labpc_boards[i].device_id) { devpriv->mite = mite; /* */ dev->board_ptr = &labpc_boards[i]; return 0; } } } printk(KERN_ERR "no device found\n"); mite_list_devices(); return -EIO; } #endif int labpc_common_detach(struct comedi_device *dev) { printk(KERN_ERR "comedi%d: ni_labpc: detach\n", dev->minor); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); #ifdef CONFIG_ISA_DMA_API /* */ kfree(devpriv->dma_buffer); if (devpriv->dma_chan) free_dma(devpriv->dma_chan); #endif if (dev->irq) free_irq(dev->irq, dev); if (thisboard->bustype == isa_bustype && dev->iobase) release_region(dev->iobase, LABPC_SIZE); #ifdef CONFIG_COMEDI_PCI_DRIVERS if (devpriv->mite) mite_unsetup(devpriv->mite); #endif return 0; }; EXPORT_SYMBOL_GPL(labpc_common_detach); static void labpc_clear_adc_fifo(const struct comedi_device *dev) { devpriv->write_byte(0x1, dev->iobase + ADC_CLEAR_REG); devpriv->read_byte(dev->iobase + ADC_FIFO_REG); devpriv->read_byte(dev->iobase + ADC_FIFO_REG); } static int labpc_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); devpriv->command2_bits &= ~SWTRIG_BIT & ~HWTRIG_BIT & ~PRETRIG_BIT; devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); spin_unlock_irqrestore(&dev->spinlock, flags); devpriv->command3_bits = 0; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); return 0; } static enum scan_mode labpc_ai_scan_mode(const struct comedi_cmd *cmd) { if (cmd->chanlist_len == 1) return MODE_SINGLE_CHAN; /* */ if (cmd->chanlist == NULL) return MODE_MULT_CHAN_UP; if (CR_CHAN(cmd->chanlist[0]) == CR_CHAN(cmd->chanlist[1])) return MODE_SINGLE_CHAN_INTERVAL; if (CR_CHAN(cmd->chanlist[0]) < CR_CHAN(cmd->chanlist[1])) return MODE_MULT_CHAN_UP; if (CR_CHAN(cmd->chanlist[0]) > CR_CHAN(cmd->chanlist[1])) return MODE_MULT_CHAN_DOWN; printk(KERN_ERR "ni_labpc: bug! this should never happen\n"); return 0; } static int labpc_ai_chanlist_invalid(const struct comedi_device *dev, const struct comedi_cmd *cmd) { int mode, channel, range, aref, i; if (cmd->chanlist == NULL) return 0; mode = labpc_ai_scan_mode(cmd); if (mode == MODE_SINGLE_CHAN) return 0; if (mode == MODE_SINGLE_CHAN_INTERVAL) { if (cmd->chanlist_len > 0xff) { comedi_error(dev, "ni_labpc: chanlist too long for single channel interval mode\n"); return 1; } } channel = CR_CHAN(cmd->chanlist[0]); range = CR_RANGE(cmd->chanlist[0]); aref = CR_AREF(cmd->chanlist[0]); for (i = 0; i < cmd->chanlist_len; i++) { switch (mode) { case MODE_SINGLE_CHAN_INTERVAL: if (CR_CHAN(cmd->chanlist[i]) != channel) { comedi_error(dev, "channel scanning order specified in chanlist is not supported by hardware.\n"); return 1; } break; case MODE_MULT_CHAN_UP: if (CR_CHAN(cmd->chanlist[i]) != i) { comedi_error(dev, "channel scanning order specified in chanlist is not supported by hardware.\n"); return 1; } break; case MODE_MULT_CHAN_DOWN: if (CR_CHAN(cmd->chanlist[i]) != cmd->chanlist_len - i - 1) { comedi_error(dev, "channel scanning order specified in chanlist is not supported by hardware.\n"); return 1; } break; default: printk(KERN_ERR "ni_labpc: bug! in chanlist check\n"); return 1; break; } if (CR_RANGE(cmd->chanlist[i]) != range) { comedi_error(dev, "entries in chanlist must all have the same range\n"); return 1; } if (CR_AREF(cmd->chanlist[i]) != aref) { comedi_error(dev, "entries in chanlist must all have the same reference\n"); return 1; } } return 0; } static int labpc_use_continuous_mode(const struct comedi_cmd *cmd) { if (labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN) return 1; if (cmd->scan_begin_src == TRIG_FOLLOW) return 1; return 0; } static unsigned int labpc_ai_convert_period(const struct comedi_cmd *cmd) { if (cmd->convert_src != TRIG_TIMER) return 0; if (labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN && cmd->scan_begin_src == TRIG_TIMER) return cmd->scan_begin_arg; return cmd->convert_arg; } static void labpc_set_ai_convert_period(struct comedi_cmd *cmd, unsigned int ns) { if (cmd->convert_src != TRIG_TIMER) return; if (labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN && cmd->scan_begin_src == TRIG_TIMER) { cmd->scan_begin_arg = ns; if (cmd->convert_arg > cmd->scan_begin_arg) cmd->convert_arg = cmd->scan_begin_arg; } else cmd->convert_arg = ns; } static unsigned int labpc_ai_scan_period(const struct comedi_cmd *cmd) { if (cmd->scan_begin_src != TRIG_TIMER) return 0; if (labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN && cmd->convert_src == TRIG_TIMER) return 0; return cmd->scan_begin_arg; } static void labpc_set_ai_scan_period(struct comedi_cmd *cmd, unsigned int ns) { if (cmd->scan_begin_src != TRIG_TIMER) return; if (labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN && cmd->convert_src == TRIG_TIMER) return; cmd->scan_begin_arg = ns; } static int labpc_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp, tmp2; int stop_mask; /* */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; stop_mask = TRIG_COUNT | TRIG_NONE; if (thisboard->register_layout == labpc_1200_layout) stop_mask |= TRIG_EXT; cmd->stop_src &= stop_mask; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_EXT && cmd->stop_src != TRIG_NONE) err++; /* */ if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT) err++; if (err) return 2; /* */ if (cmd->start_arg == TRIG_NOW && cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (!cmd->chanlist_len) err++; if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } /* */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->convert_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->chanlist_len) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } if (cmd->scan_begin_arg < thisboard->ai_speed * cmd->chanlist_len) { cmd->scan_begin_arg = thisboard->ai_speed * cmd->chanlist_len; err++; } } /* */ switch (cmd->stop_src) { case TRIG_COUNT: if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; /* */ default: break; } if (err) return 3; /* */ tmp = cmd->convert_arg; tmp2 = cmd->scan_begin_arg; labpc_adc_timing(dev, cmd); if (tmp != cmd->convert_arg || tmp2 != cmd->scan_begin_arg) err++; if (err) return 4; if (labpc_ai_chanlist_invalid(dev, cmd)) return 5; return 0; } static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int channel, range, aref; #ifdef CONFIG_ISA_DMA_API unsigned long irq_flags; #endif int ret; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; enum transfer_type xfer; unsigned long flags; if (!dev->irq) { comedi_error(dev, "no irq assigned, cannot perform command"); return -1; } range = CR_RANGE(cmd->chanlist[0]); aref = CR_AREF(cmd->chanlist[0]); /* */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->command2_bits &= ~SWTRIG_BIT & ~HWTRIG_BIT & ~PRETRIG_BIT; devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); spin_unlock_irqrestore(&dev->spinlock, flags); devpriv->command3_bits = 0; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); /* */ if (cmd->stop_src == TRIG_COUNT) devpriv->count = cmd->stop_arg * cmd->chanlist_len; /* */ if (cmd->stop_src == TRIG_EXT) { /* */ ret = labpc_counter_load(dev, dev->iobase + COUNTER_A_BASE_REG, 1, 3, 0); if (ret < 0) { comedi_error(dev, "error loading counter a1"); return -1; } } else /* */ devpriv->write_byte(INIT_A1_BITS, dev->iobase + COUNTER_A_CONTROL_REG); #ifdef CONFIG_ISA_DMA_API /* */ if (devpriv->dma_chan && /* */ /* */ (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0 && /* */ thisboard->bustype == isa_bustype) { xfer = isa_dma_transfer; /* */ } else #endif if (thisboard->register_layout == labpc_1200_layout && /* */ (cmd->flags & TRIG_WAKE_EOS) == 0 && /* */ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) { xfer = fifo_half_full_transfer; } else xfer = fifo_not_empty_transfer; devpriv->current_transfer = xfer; /* */ if (thisboard->register_layout == labpc_1200_layout) { /* */ if (aref != AREF_GROUND) devpriv->command6_bits |= ADC_COMMON_BIT; else devpriv->command6_bits &= ~ADC_COMMON_BIT; /* */ if (thisboard->ai_range_is_unipolar[range]) devpriv->command6_bits |= ADC_UNIP_BIT; else devpriv->command6_bits &= ~ADC_UNIP_BIT; /* */ if (xfer == fifo_half_full_transfer) devpriv->command6_bits |= ADC_FHF_INTR_EN_BIT; else devpriv->command6_bits &= ~ADC_FHF_INTR_EN_BIT; /* */ if (cmd->stop_src == TRIG_EXT) devpriv->command6_bits |= A1_INTR_EN_BIT; else devpriv->command6_bits &= ~A1_INTR_EN_BIT; /* */ if (labpc_ai_scan_mode(cmd) == MODE_MULT_CHAN_UP) devpriv->command6_bits |= ADC_SCAN_UP_BIT; else devpriv->command6_bits &= ~ADC_SCAN_UP_BIT; /* */ devpriv->write_byte(devpriv->command6_bits, dev->iobase + COMMAND6_REG); } /* */ devpriv->command1_bits = 0; if (labpc_ai_scan_mode(cmd) == MODE_MULT_CHAN_UP) channel = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]); else channel = CR_CHAN(cmd->chanlist[0]); /* */ if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF) channel *= 2; devpriv->command1_bits |= ADC_CHAN_BITS(channel); devpriv->command1_bits |= thisboard->ai_range_code[range]; devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); /* */ if (labpc_ai_scan_mode(cmd) == MODE_MULT_CHAN_UP || labpc_ai_scan_mode(cmd) == MODE_MULT_CHAN_DOWN) { devpriv->command1_bits |= ADC_SCAN_EN_BIT; /* */ udelay(1); devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); } /* */ devpriv->command4_bits = 0; if (cmd->convert_src != TRIG_EXT) devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; /* */ if (labpc_use_continuous_mode(cmd) == 0) { devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; if (cmd->scan_begin_src == TRIG_EXT) devpriv->command4_bits |= EXT_SCAN_EN_BIT; } /* */ if (aref == AREF_DIFF) devpriv->command4_bits |= ADC_DIFF_BIT; devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); devpriv->write_byte(cmd->chanlist_len, dev->iobase + INTERVAL_COUNT_REG); /* */ devpriv->write_byte(INTERVAL_LOAD_BITS, dev->iobase + INTERVAL_LOAD_REG); if (cmd->convert_src == TRIG_TIMER || cmd->scan_begin_src == TRIG_TIMER) { /* */ labpc_adc_timing(dev, cmd); /* */ ret = labpc_counter_load(dev, dev->iobase + COUNTER_B_BASE_REG, 0, devpriv->divisor_b0, 3); if (ret < 0) { comedi_error(dev, "error loading counter b0"); return -1; } } /* */ if (labpc_ai_convert_period(cmd)) { /* */ ret = labpc_counter_load(dev, dev->iobase + COUNTER_A_BASE_REG, 0, devpriv->divisor_a0, 2); if (ret < 0) { comedi_error(dev, "error loading counter a0"); return -1; } } else devpriv->write_byte(INIT_A0_BITS, dev->iobase + COUNTER_A_CONTROL_REG); /* */ if (labpc_ai_scan_period(cmd)) { /* */ ret = labpc_counter_load(dev, dev->iobase + COUNTER_B_BASE_REG, 1, devpriv->divisor_b1, 2); if (ret < 0) { comedi_error(dev, "error loading counter b1"); return -1; } } labpc_clear_adc_fifo(dev); #ifdef CONFIG_ISA_DMA_API /* */ if (xfer == isa_dma_transfer) { irq_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* */ clear_dma_ff(devpriv->dma_chan); set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); /* */ devpriv->dma_transfer_size = labpc_suggest_transfer_size(*cmd); if (cmd->stop_src == TRIG_COUNT && devpriv->count * sample_size < devpriv->dma_transfer_size) { devpriv->dma_transfer_size = devpriv->count * sample_size; } set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size); enable_dma(devpriv->dma_chan); release_dma_lock(irq_flags); /* */ devpriv->command3_bits |= DMA_EN_BIT | DMATC_INTR_EN_BIT; } else devpriv->command3_bits &= ~DMA_EN_BIT & ~DMATC_INTR_EN_BIT; #endif /* */ devpriv->command3_bits |= ERR_INTR_EN_BIT; /* */ if (xfer == fifo_not_empty_transfer) devpriv->command3_bits |= ADC_FNE_INTR_EN_BIT; else devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); /* */ /* */ /* */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->command2_bits |= CASCADE_BIT; switch (cmd->start_src) { case TRIG_EXT: devpriv->command2_bits |= HWTRIG_BIT; devpriv->command2_bits &= ~PRETRIG_BIT & ~SWTRIG_BIT; break; case TRIG_NOW: devpriv->command2_bits |= SWTRIG_BIT; devpriv->command2_bits &= ~PRETRIG_BIT & ~HWTRIG_BIT; break; default: comedi_error(dev, "bug with start_src"); return -1; break; } switch (cmd->stop_src) { case TRIG_EXT: devpriv->command2_bits |= HWTRIG_BIT | PRETRIG_BIT; break; case TRIG_COUNT: case TRIG_NONE: break; default: comedi_error(dev, "bug with stop_src"); return -1; } devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } /* */ static irqreturn_t labpc_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; struct comedi_cmd *cmd; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } async = s->async; cmd = &async->cmd; async->events = 0; /* */ devpriv->status1_bits = devpriv->read_byte(dev->iobase + STATUS1_REG); if (thisboard->register_layout == labpc_1200_layout) devpriv->status2_bits = devpriv->read_byte(dev->iobase + STATUS2_REG); if ((devpriv->status1_bits & (DMATC_BIT | TIMER_BIT | OVERFLOW_BIT | OVERRUN_BIT | DATA_AVAIL_BIT)) == 0 && (devpriv->status2_bits & A1_TC_BIT) == 0 && (devpriv->status2_bits & FNHF_BIT)) { return IRQ_NONE; } if (devpriv->status1_bits & OVERRUN_BIT) { /* */ devpriv->write_byte(0x1, dev->iobase + ADC_CLEAR_REG); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; comedi_event(dev, s); comedi_error(dev, "overrun"); return IRQ_HANDLED; } #ifdef CONFIG_ISA_DMA_API if (devpriv->current_transfer == isa_dma_transfer) { /* */ if (devpriv->status1_bits & DMATC_BIT || (thisboard->register_layout == labpc_1200_layout && devpriv->status2_bits & A1_TC_BIT)) { handle_isa_dma(dev); } } else #endif labpc_drain_fifo(dev); if (devpriv->status1_bits & TIMER_BIT) { comedi_error(dev, "handled timer interrupt?"); /* */ devpriv->write_byte(0x1, dev->iobase + TIMER_CLEAR_REG); } if (devpriv->status1_bits & OVERFLOW_BIT) { /* */ devpriv->write_byte(0x1, dev->iobase + ADC_CLEAR_REG); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; comedi_event(dev, s); comedi_error(dev, "overflow"); return IRQ_HANDLED; } /* */ if (cmd->stop_src == TRIG_EXT) { if (devpriv->status2_bits & A1_TC_BIT) { labpc_drain_dregs(dev); labpc_cancel(dev, s); async->events |= COMEDI_CB_EOA; } } /* */ if (cmd->stop_src == TRIG_COUNT) { if (devpriv->count == 0) { labpc_cancel(dev, s); async->events |= COMEDI_CB_EOA; } } comedi_event(dev, s); return IRQ_HANDLED; } /* */ static int labpc_drain_fifo(struct comedi_device *dev) { unsigned int lsb, msb; short data; struct comedi_async *async = dev->read_subdev->async; const int timeout = 10000; unsigned int i; devpriv->status1_bits = devpriv->read_byte(dev->iobase + STATUS1_REG); for (i = 0; (devpriv->status1_bits & DATA_AVAIL_BIT) && i < timeout; i++) { /* */ if (async->cmd.stop_src == TRIG_COUNT) { if (devpriv->count == 0) break; devpriv->count--; } lsb = devpriv->read_byte(dev->iobase + ADC_FIFO_REG); msb = devpriv->read_byte(dev->iobase + ADC_FIFO_REG); data = (msb << 8) | lsb; cfc_write_to_buffer(dev->read_subdev, data); devpriv->status1_bits = devpriv->read_byte(dev->iobase + STATUS1_REG); } if (i == timeout) { comedi_error(dev, "ai timeout, fifo never empties"); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; return -1; } return 0; } #ifdef CONFIG_ISA_DMA_API static void labpc_drain_dma(struct comedi_device *dev) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; int status; unsigned long flags; unsigned int max_points, num_points, residue, leftover; int i; status = devpriv->status1_bits; flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* */ clear_dma_ff(devpriv->dma_chan); /* */ max_points = devpriv->dma_transfer_size / sample_size; /* */ residue = get_dma_residue(devpriv->dma_chan) / sample_size; num_points = max_points - residue; if (devpriv->count < num_points && async->cmd.stop_src == TRIG_COUNT) num_points = devpriv->count; /* */ leftover = 0; if (async->cmd.stop_src != TRIG_COUNT) { leftover = devpriv->dma_transfer_size / sample_size; } else if (devpriv->count > num_points) { leftover = devpriv->count - num_points; if (leftover > max_points) leftover = max_points; } /* */ for (i = 0; i < num_points; i++) cfc_write_to_buffer(s, devpriv->dma_buffer[i]); if (async->cmd.stop_src == TRIG_COUNT) devpriv->count -= num_points; /* */ set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); async->events |= COMEDI_CB_BLOCK; } static void handle_isa_dma(struct comedi_device *dev) { labpc_drain_dma(dev); enable_dma(devpriv->dma_chan); /* */ devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG); } #endif /* */ static void labpc_drain_dregs(struct comedi_device *dev) { #ifdef CONFIG_ISA_DMA_API if (devpriv->current_transfer == isa_dma_transfer) labpc_drain_dma(dev); #endif labpc_drain_fifo(dev); } static int labpc_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int chan, range; int lsb, msb; int timeout = 1000; unsigned long flags; /* */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->command2_bits &= ~SWTRIG_BIT & ~HWTRIG_BIT & ~PRETRIG_BIT; devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* */ devpriv->command3_bits = 0; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); /* */ devpriv->command1_bits = 0; chan = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); devpriv->command1_bits |= thisboard->ai_range_code[range]; /* */ if (CR_AREF(insn->chanspec) == AREF_DIFF) chan *= 2; devpriv->command1_bits |= ADC_CHAN_BITS(chan); devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); /* */ if (thisboard->register_layout == labpc_1200_layout) { /* */ if (CR_AREF(insn->chanspec) != AREF_GROUND) devpriv->command6_bits |= ADC_COMMON_BIT; else devpriv->command6_bits &= ~ADC_COMMON_BIT; /* */ if (thisboard->ai_range_is_unipolar[range]) devpriv->command6_bits |= ADC_UNIP_BIT; else devpriv->command6_bits &= ~ADC_UNIP_BIT; /* */ devpriv->command6_bits &= ~ADC_FHF_INTR_EN_BIT; /* */ devpriv->command6_bits &= ~A1_INTR_EN_BIT; /* */ devpriv->write_byte(devpriv->command6_bits, dev->iobase + COMMAND6_REG); } /* */ devpriv->command4_bits = 0; devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; /* */ if (CR_AREF(insn->chanspec) == AREF_DIFF) devpriv->command4_bits |= ADC_DIFF_BIT; devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); /* */ devpriv->write_byte(INIT_A0_BITS, dev->iobase + COUNTER_A_CONTROL_REG); labpc_clear_adc_fifo(dev); for (n = 0; n < insn->n; n++) { /* */ devpriv->write_byte(0x1, dev->iobase + ADC_CONVERT_REG); for (i = 0; i < timeout; i++) { if (devpriv->read_byte(dev->iobase + STATUS1_REG) & DATA_AVAIL_BIT) break; udelay(1); } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } lsb = devpriv->read_byte(dev->iobase + ADC_FIFO_REG); msb = devpriv->read_byte(dev->iobase + ADC_FIFO_REG); data[n] = (msb << 8) | lsb; } return n; } /* */ static int labpc_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel, range; unsigned long flags; int lsb, msb; channel = CR_CHAN(insn->chanspec); /* */ /* */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->command2_bits &= ~DAC_PACED_BIT(channel); devpriv->write_byte(devpriv->command2_bits, dev->iobase + COMMAND2_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* */ if (thisboard->register_layout == labpc_1200_layout) { range = CR_RANGE(insn->chanspec); if (range & AO_RANGE_IS_UNIPOLAR) devpriv->command6_bits |= DAC_UNIP_BIT(channel); else devpriv->command6_bits &= ~DAC_UNIP_BIT(channel); /* */ devpriv->write_byte(devpriv->command6_bits, dev->iobase + COMMAND6_REG); } /* */ lsb = data[0] & 0xff; msb = (data[0] >> 8) & 0xff; devpriv->write_byte(lsb, dev->iobase + DAC_LSB_REG(channel)); devpriv->write_byte(msb, dev->iobase + DAC_MSB_REG(channel)); /* */ devpriv->ao_value[channel] = data[0]; return 1; } /* */ static int labpc_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static int labpc_calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->caldac[CR_CHAN(insn->chanspec)]; return 1; } static int labpc_calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); write_caldac(dev, channel, data[0]); return 1; } static int labpc_eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->eeprom_data[CR_CHAN(insn->chanspec)]; return 1; } static int labpc_eeprom_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); int ret; /* */ if (channel < 16 || channel > 127) { printk ("eeprom writes are only allowed to channels 16 through 127 (the pointer and user areas)"); return -EINVAL; } ret = labpc_eeprom_write(dev, channel, data[0]); if (ret < 0) return ret; return 1; } #ifdef CONFIG_ISA_DMA_API /* */ static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd) { unsigned int size; unsigned int freq; if (cmd.convert_src == TRIG_TIMER) freq = 1000000000 / cmd.convert_arg; /* */ else freq = 0xffffffff; /* */ size = (freq / 3) * sample_size; /* */ if (size > dma_buffer_size) size = dma_buffer_size - dma_buffer_size % sample_size; else if (size < sample_size) size = sample_size; return size; } #endif /* */ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd) { /* */ const int max_counter_value = 0x10000; /* */ const int min_counter_value = 2; unsigned int base_period; /* */ if (labpc_ai_convert_period(cmd) && labpc_ai_scan_period(cmd)) { /* */ devpriv->divisor_b0 = (labpc_ai_scan_period(cmd) - 1) / (LABPC_TIMER_BASE * max_counter_value) + 1; if (devpriv->divisor_b0 < min_counter_value) devpriv->divisor_b0 = min_counter_value; if (devpriv->divisor_b0 > max_counter_value) devpriv->divisor_b0 = max_counter_value; base_period = LABPC_TIMER_BASE * devpriv->divisor_b0; /* */ switch (cmd->flags & TRIG_ROUND_MASK) { default: case TRIG_ROUND_NEAREST: devpriv->divisor_a0 = (labpc_ai_convert_period(cmd) + (base_period / 2)) / base_period; devpriv->divisor_b1 = (labpc_ai_scan_period(cmd) + (base_period / 2)) / base_period; break; case TRIG_ROUND_UP: devpriv->divisor_a0 = (labpc_ai_convert_period(cmd) + (base_period - 1)) / base_period; devpriv->divisor_b1 = (labpc_ai_scan_period(cmd) + (base_period - 1)) / base_period; break; case TRIG_ROUND_DOWN: devpriv->divisor_a0 = labpc_ai_convert_period(cmd) / base_period; devpriv->divisor_b1 = labpc_ai_scan_period(cmd) / base_period; break; } /* */ if (devpriv->divisor_a0 < min_counter_value) devpriv->divisor_a0 = min_counter_value; if (devpriv->divisor_a0 > max_counter_value) devpriv->divisor_a0 = max_counter_value; if (devpriv->divisor_b1 < min_counter_value) devpriv->divisor_b1 = min_counter_value; if (devpriv->divisor_b1 > max_counter_value) devpriv->divisor_b1 = max_counter_value; /* */ labpc_set_ai_convert_period(cmd, base_period * devpriv->divisor_a0); labpc_set_ai_scan_period(cmd, base_period * devpriv->divisor_b1); /* */ } else if (labpc_ai_scan_period(cmd)) { unsigned int scan_period; scan_period = labpc_ai_scan_period(cmd); /* */ i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE, &(devpriv->divisor_b1), &(devpriv->divisor_b0), &scan_period, cmd->flags & TRIG_ROUND_MASK); labpc_set_ai_scan_period(cmd, scan_period); } else if (labpc_ai_convert_period(cmd)) { unsigned int convert_period; convert_period = labpc_ai_convert_period(cmd); /* */ i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE, &(devpriv->divisor_a0), &(devpriv->divisor_b0), &convert_period, cmd->flags & TRIG_ROUND_MASK); labpc_set_ai_convert_period(cmd, convert_period); } } static int labpc_dio_mem_callback(int dir, int port, int data, unsigned long iobase) { if (dir) { writeb(data, (void *)(iobase + port)); return 0; } else { return readb((void *)(iobase + port)); } } /* */ static void labpc_serial_out(struct comedi_device *dev, unsigned int value, unsigned int value_width) { int i; for (i = 1; i <= value_width; i++) { /* */ devpriv->command5_bits &= ~SCLOCK_BIT; /* */ if (value & (1 << (value_width - i))) devpriv->command5_bits |= SDATA_BIT; else devpriv->command5_bits &= ~SDATA_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ devpriv->command5_bits |= SCLOCK_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); } } /* */ static unsigned int labpc_serial_in(struct comedi_device *dev) { unsigned int value = 0; int i; const int value_width = 8; /* */ for (i = 1; i <= value_width; i++) { /* */ devpriv->command5_bits |= SCLOCK_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ devpriv->command5_bits &= ~SCLOCK_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ udelay(1); devpriv->status2_bits = devpriv->read_byte(dev->iobase + STATUS2_REG); if (devpriv->status2_bits & EEPROM_OUT_BIT) value |= 1 << (value_width - i); } return value; } static unsigned int labpc_eeprom_read(struct comedi_device *dev, unsigned int address) { unsigned int value; /* */ const int read_instruction = 0x3; /* */ const int write_length = 8; /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); devpriv->command5_bits |= EEPROM_EN_BIT | EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ labpc_serial_out(dev, read_instruction, write_length); /* */ labpc_serial_out(dev, address, write_length); /* */ value = labpc_serial_in(dev); /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT & ~EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); return value; } static int labpc_eeprom_write(struct comedi_device *dev, unsigned int address, unsigned int value) { const int write_enable_instruction = 0x6; const int write_instruction = 0x2; const int write_length = 8; /* */ const int write_in_progress_bit = 0x1; const int timeout = 10000; int i; /* */ for (i = 0; i < timeout; i++) { if ((labpc_eeprom_read_status(dev) & write_in_progress_bit) == 0) break; } if (i == timeout) { comedi_error(dev, "eeprom write timed out"); return -ETIME; } /* */ devpriv->eeprom_data[address] = value; /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); devpriv->command5_bits |= EEPROM_EN_BIT | EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ labpc_serial_out(dev, write_enable_instruction, write_length); devpriv->command5_bits &= ~EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ devpriv->command5_bits |= EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); labpc_serial_out(dev, write_instruction, write_length); /* */ labpc_serial_out(dev, address, write_length); /* */ labpc_serial_out(dev, value, write_length); devpriv->command5_bits &= ~EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT & ~EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); return 0; } static unsigned int labpc_eeprom_read_status(struct comedi_device *dev) { unsigned int value; const int read_status_instruction = 0x5; const int write_length = 8; /* */ /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); devpriv->command5_bits |= EEPROM_EN_BIT | EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ labpc_serial_out(dev, read_status_instruction, write_length); /* */ value = labpc_serial_in(dev); /* */ devpriv->command5_bits &= ~EEPROM_EN_BIT & ~EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); return value; } /* */ static void write_caldac(struct comedi_device *dev, unsigned int channel, unsigned int value) { if (value == devpriv->caldac[channel]) return; devpriv->caldac[channel] = value; /* */ devpriv->command5_bits &= ~CALDAC_LOAD_BIT & ~EEPROM_EN_BIT & ~EEPROM_WRITE_UNPROTECT_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); /* */ labpc_serial_out(dev, channel, 4); /* */ labpc_serial_out(dev, value, 8); /* */ devpriv->command5_bits |= CALDAC_LOAD_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); devpriv->command5_bits &= ~CALDAC_LOAD_BIT; udelay(1); devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG); } #ifdef CONFIG_COMEDI_PCI_DRIVERS static int __devinit driver_labpc_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_labpc.driver_name); } static void __devexit driver_labpc_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_labpc_pci_driver = { .id_table = labpc_pci_table, .probe = &driver_labpc_pci_probe, .remove = __devexit_p(&driver_labpc_pci_remove) }; static int __init driver_labpc_init_module(void) { int retval; retval = comedi_driver_register(&driver_labpc); if (retval < 0) return retval; driver_labpc_pci_driver.name = (char *)driver_labpc.driver_name; return pci_register_driver(&driver_labpc_pci_driver); } static void __exit driver_labpc_cleanup_module(void) { pci_unregister_driver(&driver_labpc_pci_driver); comedi_driver_unregister(&driver_labpc); } module_init(driver_labpc_init_module); module_exit(driver_labpc_cleanup_module); #else static int __init driver_labpc_init_module(void) { return comedi_driver_register(&driver_labpc); } static void __exit driver_labpc_cleanup_module(void) { comedi_driver_unregister(&driver_labpc); } module_init(driver_labpc_init_module); module_exit(driver_labpc_cleanup_module); #endif MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
holyangel/LGE_G3
drivers/staging/comedi/drivers/ni_labpc.c
C
gpl-2.0
62,708
/* Basic IPA utilities for type inheritance graph construction and devirtualization. Copyright (C) 2013-2015 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Brief vocabulary: ODR = One Definition Rule In short, the ODR states that: 1 In any translation unit, a template, type, function, or object can have no more than one definition. Some of these can have any number of declarations. A definition provides an instance. 2 In the entire program, an object or non-inline function cannot have more than one definition; if an object or function is used, it must have exactly one definition. You can declare an object or function that is never used, in which case you don't have to provide a definition. In no event can there be more than one definition. 3 Some things, like types, templates, and extern inline functions, can be defined in more than one translation unit. For a given entity, each definition must be the same. Non-extern objects and functions in different translation units are different entities, even if their names and types are the same. OTR = OBJ_TYPE_REF This is the Gimple representation of type information of a polymorphic call. It contains two parameters: otr_type is a type of class whose method is called. otr_token is the index into virtual table where address is taken. BINFO This is the type inheritance information attached to each tree RECORD_TYPE by the C++ frontend. It provides information about base types and virtual tables. BINFO is linked to the RECORD_TYPE by TYPE_BINFO. BINFO also links to its type by BINFO_TYPE and to the virtual table by BINFO_VTABLE. Base types of a given type are enumerated by BINFO_BASE_BINFO vector. Members of this vectors are not BINFOs associated with a base type. Rather they are new copies of BINFOs (base BINFOs). Their virtual tables may differ from virtual table of the base type. Also BINFO_OFFSET specifies offset of the base within the type. In the case of single inheritance, the virtual table is shared and BINFO_VTABLE of base BINFO is NULL. In the case of multiple inheritance the individual virtual tables are pointer to by BINFO_VTABLE of base binfos (that differs of BINFO_VTABLE of binfo associated to the base type). BINFO lookup for a given base type and offset can be done by get_binfo_at_offset. It returns proper BINFO whose virtual table can be used for lookup of virtual methods associated with the base type. token This is an index of virtual method in virtual table associated to the type defining it. Token can be looked up from OBJ_TYPE_REF or from DECL_VINDEX of a given virtual table. polymorphic (indirect) call This is callgraph representation of virtual method call. Every polymorphic call contains otr_type and otr_token taken from original OBJ_TYPE_REF at callgraph construction time. What we do here: build_type_inheritance_graph triggers a construction of the type inheritance graph. We reconstruct it based on types of methods we see in the unit. This means that the graph is not complete. Types with no methods are not inserted into the graph. Also types without virtual methods are not represented at all, though it may be easy to add this. The inheritance graph is represented as follows: Vertices are structures odr_type. Every odr_type may correspond to one or more tree type nodes that are equivalent by ODR rule. (the multiple type nodes appear only with linktime optimization) Edges are represented by odr_type->base and odr_type->derived_types. At the moment we do not track offsets of types for multiple inheritance. Adding this is easy. possible_polymorphic_call_targets returns, given an parameters found in indirect polymorphic edge all possible polymorphic call targets of the call. pass_ipa_devirt performs simple speculative devirtualization. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "tree.h" #include "gimple.h" #include "rtl.h" #include "alias.h" #include "fold-const.h" #include "print-tree.h" #include "calls.h" #include "cgraph.h" #include "flags.h" #include "insn-config.h" #include "expmed.h" #include "dojump.h" #include "explow.h" #include "emit-rtl.h" #include "varasm.h" #include "stmt.h" #include "expr.h" #include "tree-pass.h" #include "target.h" #include "ipa-utils.h" #include "internal-fn.h" #include "gimple-fold.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "ipa-prop.h" #include "ipa-inline.h" #include "diagnostic.h" #include "tree-dfa.h" #include "demangle.h" #include "dbgcnt.h" #include "gimple-pretty-print.h" #include "stor-layout.h" #include "intl.h" #include "lto-streamer.h" /* Hash based set of pairs of types. */ struct type_pair { tree first; tree second; }; template <> struct default_hash_traits <type_pair> : typed_noop_remove <type_pair> { typedef type_pair value_type; typedef type_pair compare_type; static hashval_t hash (type_pair p) { return TYPE_UID (p.first) ^ TYPE_UID (p.second); } static bool is_empty (type_pair p) { return p.first == NULL; } static bool is_deleted (type_pair p ATTRIBUTE_UNUSED) { return false; } static bool equal (const type_pair &a, const type_pair &b) { return a.first==b.first && a.second == b.second; } static void mark_empty (type_pair &e) { e.first = NULL; } }; static bool odr_types_equivalent_p (tree, tree, bool, bool *, hash_set<type_pair> *, location_t, location_t); static bool odr_violation_reported = false; /* Pointer set of all call targets appearing in the cache. */ static hash_set<cgraph_node *> *cached_polymorphic_call_targets; /* The node of type inheritance graph. For each type unique in One Definition Rule (ODR) sense, we produce one node linking all main variants of types equivalent to it, bases and derived types. */ struct GTY(()) odr_type_d { /* leader type. */ tree type; /* All bases; built only for main variants of types. */ vec<odr_type> GTY((skip)) bases; /* All derived types with virtual methods seen in unit; built only for main variants of types. */ vec<odr_type> GTY((skip)) derived_types; /* All equivalent types, if more than one. */ vec<tree, va_gc> *types; /* Set of all equivalent types, if NON-NULL. */ hash_set<tree> * GTY((skip)) types_set; /* Unique ID indexing the type in odr_types array. */ int id; /* Is it in anonymous namespace? */ bool anonymous_namespace; /* Do we know about all derivations of given type? */ bool all_derivations_known; /* Did we report ODR violation here? */ bool odr_violated; /* Set when virtual table without RTTI previaled table with. */ bool rtti_broken; }; /* Return true if T is a type with linkage defined. */ bool type_with_linkage_p (const_tree t) { /* Builtin types do not define linkage, their TYPE_CONTEXT is NULL. */ if (!TYPE_CONTEXT (t) || !TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL || !TYPE_STUB_DECL (t)) return false; /* In LTO do not get confused by non-C++ produced types or types built with -fno-lto-odr-type-merigng. */ if (in_lto_p) { /* To support -fno-lto-odr-type-merigng recognize types with vtables to have linkage. */ if (RECORD_OR_UNION_TYPE_P (t) && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t))) return true; /* Do not accept any other types - we do not know if they were produced by C++ FE. */ if (!DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))) return false; } return (RECORD_OR_UNION_TYPE_P (t) || TREE_CODE (t) == ENUMERAL_TYPE); } /* Return true if T is in anonymous namespace. This works only on those C++ types with linkage defined. */ bool type_in_anonymous_namespace_p (const_tree t) { gcc_assert (type_with_linkage_p (t)); /* Keep -fno-lto-odr-type-merging working by recognizing classes with vtables properly into anonymous namespaces. */ if (RECORD_OR_UNION_TYPE_P (t) && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t))) return (TYPE_STUB_DECL (t) && !TREE_PUBLIC (TYPE_STUB_DECL (t))); if (TYPE_STUB_DECL (t) && !TREE_PUBLIC (TYPE_STUB_DECL (t))) { /* C++ FE uses magic <anon> as assembler names of anonymous types. verify that this match with type_in_anonymous_namespace_p. */ #ifdef ENABLE_CHECKING if (in_lto_p) gcc_assert (!strcmp ("<anon>", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t))))); #endif return true; } return false; } /* Return true of T is type with One Definition Rule info attached. It means that either it is anonymous type or it has assembler name set. */ bool odr_type_p (const_tree t) { /* We do not have this information when not in LTO, but we do not need to care, since it is used only for type merging. */ gcc_checking_assert (in_lto_p || flag_lto); /* To support -fno-lto-odr-type-merging consider types with vtables ODR. */ if (type_with_linkage_p (t) && type_in_anonymous_namespace_p (t)) return true; if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL && (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))) { #ifdef ENABLE_CHECKING /* C++ FE uses magic <anon> as assembler names of anonymous types. verify that this match with type_in_anonymous_namespace_p. */ gcc_assert (!type_with_linkage_p (t) || strcmp ("<anon>", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t)))) || type_in_anonymous_namespace_p (t)); #endif return true; } return false; } /* Return TRUE if all derived types of T are known and thus we may consider the walk of derived type complete. This is typically true only for final anonymous namespace types and types defined within functions (that may be COMDAT and thus shared across units, but with the same set of derived types). */ bool type_all_derivations_known_p (const_tree t) { if (TYPE_FINAL_P (t)) return true; if (flag_ltrans) return false; /* Non-C++ types may have IDENTIFIER_NODE here, do not crash. */ if (!TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL) return true; if (type_in_anonymous_namespace_p (t)) return true; return (decl_function_context (TYPE_NAME (t)) != NULL); } /* Return TRUE if type's constructors are all visible. */ static bool type_all_ctors_visible_p (tree t) { return !flag_ltrans && symtab->state >= CONSTRUCTION /* We can not always use type_all_derivations_known_p. For function local types we must assume case where the function is COMDAT and shared in between units. TODO: These cases are quite easy to get, but we need to keep track of C++ privatizing via -Wno-weak as well as the IPA privatizing. */ && type_in_anonymous_namespace_p (t); } /* Return TRUE if type may have instance. */ static bool type_possibly_instantiated_p (tree t) { tree vtable; varpool_node *vnode; /* TODO: Add abstract types here. */ if (!type_all_ctors_visible_p (t)) return true; vtable = BINFO_VTABLE (TYPE_BINFO (t)); if (TREE_CODE (vtable) == POINTER_PLUS_EXPR) vtable = TREE_OPERAND (TREE_OPERAND (vtable, 0), 0); vnode = varpool_node::get (vtable); return vnode && vnode->definition; } /* Hash used to unify ODR types based on their mangled name and for anonymous namespace types. */ struct odr_name_hasher : pointer_hash <odr_type_d> { typedef union tree_node *compare_type; static inline hashval_t hash (const odr_type_d *); static inline bool equal (const odr_type_d *, const tree_node *); static inline void remove (odr_type_d *); }; /* Has used to unify ODR types based on their associated virtual table. This hash is needed to keep -fno-lto-odr-type-merging to work and contains only polymorphic types. Types with mangled names are inserted to both. */ struct odr_vtable_hasher:odr_name_hasher { static inline hashval_t hash (const odr_type_d *); static inline bool equal (const odr_type_d *, const tree_node *); }; /* Return type that was declared with T's name so that T is an qualified variant of it. */ static inline tree main_odr_variant (const_tree t) { if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL) return TREE_TYPE (TYPE_NAME (t)); /* Unnamed types and non-C++ produced types can be compared by variants. */ else return TYPE_MAIN_VARIANT (t); } static bool can_be_name_hashed_p (tree t) { return (!in_lto_p || odr_type_p (t)); } /* Hash type by its ODR name. */ static hashval_t hash_odr_name (const_tree t) { gcc_checking_assert (main_odr_variant (t) == t); /* If not in LTO, all main variants are unique, so we can do pointer hash. */ if (!in_lto_p) return htab_hash_pointer (t); /* Anonymous types are unique. */ if (type_with_linkage_p (t) && type_in_anonymous_namespace_p (t)) return htab_hash_pointer (t); gcc_checking_assert (TYPE_NAME (t) && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))); return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (TYPE_NAME (t))); } /* Return the computed hashcode for ODR_TYPE. */ inline hashval_t odr_name_hasher::hash (const odr_type_d *odr_type) { return hash_odr_name (odr_type->type); } static bool can_be_vtable_hashed_p (tree t) { /* vtable hashing can distinguish only main variants. */ if (TYPE_MAIN_VARIANT (t) != t) return false; /* Anonymous namespace types are always handled by name hash. */ if (type_with_linkage_p (t) && type_in_anonymous_namespace_p (t)) return false; return (TREE_CODE (t) == RECORD_TYPE && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t))); } /* Hash type by assembler name of its vtable. */ static hashval_t hash_odr_vtable (const_tree t) { tree v = BINFO_VTABLE (TYPE_BINFO (TYPE_MAIN_VARIANT (t))); inchash::hash hstate; gcc_checking_assert (in_lto_p); gcc_checking_assert (!type_in_anonymous_namespace_p (t)); gcc_checking_assert (TREE_CODE (t) == RECORD_TYPE && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t))); gcc_checking_assert (main_odr_variant (t) == t); if (TREE_CODE (v) == POINTER_PLUS_EXPR) { add_expr (TREE_OPERAND (v, 1), hstate); v = TREE_OPERAND (TREE_OPERAND (v, 0), 0); } hstate.add_wide_int (IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (v))); return hstate.end (); } /* Return the computed hashcode for ODR_TYPE. */ inline hashval_t odr_vtable_hasher::hash (const odr_type_d *odr_type) { return hash_odr_vtable (odr_type->type); } /* For languages with One Definition Rule, work out if types are the same based on their name. This is non-trivial for LTO where minor differences in the type representation may have prevented type merging to merge two copies of otherwise equivalent type. Until we start streaming mangled type names, this function works only for polymorphic types. When STRICT is true, we compare types by their names for purposes of ODR violation warnings. When strict is false, we consider variants equivalent, becuase it is all that matters for devirtualization machinery. */ bool types_same_for_odr (const_tree type1, const_tree type2, bool strict) { gcc_checking_assert (TYPE_P (type1) && TYPE_P (type2)); type1 = main_odr_variant (type1); type2 = main_odr_variant (type2); if (!strict) { type1 = TYPE_MAIN_VARIANT (type1); type2 = TYPE_MAIN_VARIANT (type2); } if (type1 == type2) return true; if (!in_lto_p) return false; /* Check for anonymous namespaces. Those have !TREE_PUBLIC on the corresponding TYPE_STUB_DECL. */ if ((type_with_linkage_p (type1) && type_in_anonymous_namespace_p (type1)) || (type_with_linkage_p (type2) && type_in_anonymous_namespace_p (type2))) return false; /* ODR name of the type is set in DECL_ASSEMBLER_NAME of its TYPE_NAME. Ideally we should never need types without ODR names here. It can however happen in two cases: 1) for builtin types that are not streamed but rebuilt in lto/lto-lang.c Here testing for equivalence is safe, since their MAIN_VARIANTs are unique. 2) for units streamed with -fno-lto-odr-type-merging. Here we can't establish precise ODR equivalency, but for correctness we care only about equivalency on complete polymorphic types. For these we can compare assembler names of their virtual tables. */ if ((!TYPE_NAME (type1) || !DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (type1))) || (!TYPE_NAME (type2) || !DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (type2)))) { /* See if types are obviously different (i.e. different codes or polymorphic wrt non-polymorphic). This is not strictly correct for ODR violating programs, but we can't do better without streaming ODR names. */ if (TREE_CODE (type1) != TREE_CODE (type2)) return false; if (TREE_CODE (type1) == RECORD_TYPE && (TYPE_BINFO (type1) == NULL_TREE) != (TYPE_BINFO (type2) == NULL_TREE)) return false; if (TREE_CODE (type1) == RECORD_TYPE && TYPE_BINFO (type1) && (BINFO_VTABLE (TYPE_BINFO (type1)) == NULL_TREE) != (BINFO_VTABLE (TYPE_BINFO (type2)) == NULL_TREE)) return false; /* At the moment we have no way to establish ODR equivalence at LTO other than comparing virtual table pointers of polymorphic types. Eventually we should start saving mangled names in TYPE_NAME. Then this condition will become non-trivial. */ if (TREE_CODE (type1) == RECORD_TYPE && TYPE_BINFO (type1) && TYPE_BINFO (type2) && BINFO_VTABLE (TYPE_BINFO (type1)) && BINFO_VTABLE (TYPE_BINFO (type2))) { tree v1 = BINFO_VTABLE (TYPE_BINFO (type1)); tree v2 = BINFO_VTABLE (TYPE_BINFO (type2)); gcc_assert (TREE_CODE (v1) == POINTER_PLUS_EXPR && TREE_CODE (v2) == POINTER_PLUS_EXPR); return (operand_equal_p (TREE_OPERAND (v1, 1), TREE_OPERAND (v2, 1), 0) && DECL_ASSEMBLER_NAME (TREE_OPERAND (TREE_OPERAND (v1, 0), 0)) == DECL_ASSEMBLER_NAME (TREE_OPERAND (TREE_OPERAND (v2, 0), 0))); } gcc_unreachable (); } return (DECL_ASSEMBLER_NAME (TYPE_NAME (type1)) == DECL_ASSEMBLER_NAME (TYPE_NAME (type2))); } /* Return true if we can decide on ODR equivalency. In non-LTO it is always decide, in LTO however it depends in the type has ODR info attached. When STRICT is false, compare main variants. */ bool types_odr_comparable (tree t1, tree t2, bool strict) { return (!in_lto_p || (strict ? (main_odr_variant (t1) == main_odr_variant (t2) && main_odr_variant (t1)) : TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2)) || (odr_type_p (t1) && odr_type_p (t2)) || (TREE_CODE (t1) == RECORD_TYPE && TREE_CODE (t2) == RECORD_TYPE && TYPE_BINFO (t1) && TYPE_BINFO (t2) && polymorphic_type_binfo_p (TYPE_BINFO (t1)) && polymorphic_type_binfo_p (TYPE_BINFO (t2)))); } /* Return true if T1 and T2 are ODR equivalent. If ODR equivalency is not known, be conservative and return false. */ bool types_must_be_same_for_odr (tree t1, tree t2) { if (types_odr_comparable (t1, t2)) return types_same_for_odr (t1, t2); else return TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2); } /* If T is compound type, return type it is based on. */ static tree compound_type_base (const_tree t) { if (TREE_CODE (t) == ARRAY_TYPE || POINTER_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE || VECTOR_TYPE_P (t)) return TREE_TYPE (t); if (TREE_CODE (t) == METHOD_TYPE) return TYPE_METHOD_BASETYPE (t); if (TREE_CODE (t) == OFFSET_TYPE) return TYPE_OFFSET_BASETYPE (t); return NULL_TREE; } /* Return true if T is either ODR type or compound type based from it. If the function return true, we know that T is a type originating from C++ source even at link-time. */ bool odr_or_derived_type_p (const_tree t) { do { if (odr_type_p (t)) return true; /* Function type is a tricky one. Basically we can consider it ODR derived if return type or any of the parameters is. We need to check all parameters because LTO streaming merges common types (such as void) and they are not considered ODR then. */ if (TREE_CODE (t) == FUNCTION_TYPE) { if (TYPE_METHOD_BASETYPE (t)) t = TYPE_METHOD_BASETYPE (t); else { if (TREE_TYPE (t) && odr_or_derived_type_p (TREE_TYPE (t))) return true; for (t = TYPE_ARG_TYPES (t); t; t = TREE_CHAIN (t)) if (odr_or_derived_type_p (TREE_VALUE (t))) return true; return false; } } else t = compound_type_base (t); } while (t); return t; } /* Compare types T1 and T2 and return true if they are equivalent. */ inline bool odr_name_hasher::equal (const odr_type_d *o1, const tree_node *t2) { tree t1 = o1->type; gcc_checking_assert (main_odr_variant (t2) == t2); gcc_checking_assert (main_odr_variant (t1) == t1); if (t1 == t2) return true; if (!in_lto_p) return false; /* Check for anonymous namespaces. Those have !TREE_PUBLIC on the corresponding TYPE_STUB_DECL. */ if ((type_with_linkage_p (t1) && type_in_anonymous_namespace_p (t1)) || (type_with_linkage_p (t2) && type_in_anonymous_namespace_p (t2))) return false; gcc_checking_assert (DECL_ASSEMBLER_NAME (TYPE_NAME (t1))); gcc_checking_assert (DECL_ASSEMBLER_NAME (TYPE_NAME (t2))); return (DECL_ASSEMBLER_NAME (TYPE_NAME (t1)) == DECL_ASSEMBLER_NAME (TYPE_NAME (t2))); } /* Compare types T1 and T2 and return true if they are equivalent. */ inline bool odr_vtable_hasher::equal (const odr_type_d *o1, const tree_node *t2) { tree t1 = o1->type; gcc_checking_assert (main_odr_variant (t2) == t2); gcc_checking_assert (main_odr_variant (t1) == t1); gcc_checking_assert (in_lto_p); t1 = TYPE_MAIN_VARIANT (t1); t2 = TYPE_MAIN_VARIANT (t2); if (t1 == t2) return true; tree v1 = BINFO_VTABLE (TYPE_BINFO (t1)); tree v2 = BINFO_VTABLE (TYPE_BINFO (t2)); return (operand_equal_p (TREE_OPERAND (v1, 1), TREE_OPERAND (v2, 1), 0) && DECL_ASSEMBLER_NAME (TREE_OPERAND (TREE_OPERAND (v1, 0), 0)) == DECL_ASSEMBLER_NAME (TREE_OPERAND (TREE_OPERAND (v2, 0), 0))); } /* Free ODR type V. */ inline void odr_name_hasher::remove (odr_type_d *v) { v->bases.release (); v->derived_types.release (); if (v->types_set) delete v->types_set; ggc_free (v); } /* ODR type hash used to look up ODR type based on tree type node. */ typedef hash_table<odr_name_hasher> odr_hash_type; static odr_hash_type *odr_hash; typedef hash_table<odr_vtable_hasher> odr_vtable_hash_type; static odr_vtable_hash_type *odr_vtable_hash; /* ODR types are also stored into ODR_TYPE vector to allow consistent walking. Bases appear before derived types. Vector is garbage collected so we won't end up visiting empty types. */ static GTY(()) vec <odr_type, va_gc> *odr_types_ptr; #define odr_types (*odr_types_ptr) /* Set TYPE_BINFO of TYPE and its variants to BINFO. */ void set_type_binfo (tree type, tree binfo) { for (; type; type = TYPE_NEXT_VARIANT (type)) if (COMPLETE_TYPE_P (type)) TYPE_BINFO (type) = binfo; else gcc_assert (!TYPE_BINFO (type)); } /* Compare T2 and T2 based on name or structure. */ static bool odr_subtypes_equivalent_p (tree t1, tree t2, hash_set<type_pair> *visited, location_t loc1, location_t loc2) { /* This can happen in incomplete types that should be handled earlier. */ gcc_assert (t1 && t2); t1 = main_odr_variant (t1); t2 = main_odr_variant (t2); if (t1 == t2) return true; /* Anonymous namespace types must match exactly. */ if ((type_with_linkage_p (t1) && type_in_anonymous_namespace_p (t1)) || (type_with_linkage_p (t2) && type_in_anonymous_namespace_p (t2))) return false; /* For ODR types be sure to compare their names. To support -wno-odr-type-merging we allow one type to be non-ODR and other ODR even though it is a violation. */ if (types_odr_comparable (t1, t2, true)) { if (!types_same_for_odr (t1, t2, true)) return false; /* Limit recursion: If subtypes are ODR types and we know that they are same, be happy. */ if (!odr_type_p (t1) || !get_odr_type (t1, true)->odr_violated) return true; } /* Component types, builtins and possibly violating ODR types have to be compared structurally. */ if (TREE_CODE (t1) != TREE_CODE (t2)) return false; if (AGGREGATE_TYPE_P (t1) && (TYPE_NAME (t1) == NULL_TREE) != (TYPE_NAME (t2) == NULL_TREE)) return false; type_pair pair={t1,t2}; if (TYPE_UID (t1) > TYPE_UID (t2)) { pair.first = t2; pair.second = t1; } if (visited->add (pair)) return true; return odr_types_equivalent_p (t1, t2, false, NULL, visited, loc1, loc2); } /* Compare two virtual tables, PREVAILING and VTABLE and output ODR violation warnings. */ void compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) { int n1, n2; if (DECL_VIRTUAL_P (prevailing->decl) != DECL_VIRTUAL_P (vtable->decl)) { odr_violation_reported = true; if (DECL_VIRTUAL_P (prevailing->decl)) { varpool_node *tmp = prevailing; prevailing = vtable; vtable = tmp; } if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD violates one definition rule", DECL_CONTEXT (vtable->decl))) inform (DECL_SOURCE_LOCATION (prevailing->decl), "variable of same assembler name as the virtual table is " "defined in another translation unit"); return; } if (!prevailing->definition || !vtable->definition) return; /* If we do not stream ODR type info, do not bother to do useful compare. */ if (!TYPE_BINFO (DECL_CONTEXT (vtable->decl)) || !polymorphic_type_binfo_p (TYPE_BINFO (DECL_CONTEXT (vtable->decl)))) return; odr_type class_type = get_odr_type (DECL_CONTEXT (vtable->decl), true); if (class_type->odr_violated) return; for (n1 = 0, n2 = 0; true; n1++, n2++) { struct ipa_ref *ref1, *ref2; bool end1, end2; end1 = !prevailing->iterate_reference (n1, ref1); end2 = !vtable->iterate_reference (n2, ref2); /* !DECL_VIRTUAL_P means RTTI entry; We warn when RTTI is lost because non-RTTI previals; we silently accept the other case. */ while (!end2 && (end1 || (DECL_ASSEMBLER_NAME (ref1->referred->decl) != DECL_ASSEMBLER_NAME (ref2->referred->decl) && TREE_CODE (ref1->referred->decl) == FUNCTION_DECL)) && TREE_CODE (ref2->referred->decl) != FUNCTION_DECL) { if (!class_type->rtti_broken && warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD contains RTTI " "information", DECL_CONTEXT (vtable->decl))) { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "but is prevailed by one without from other translation " "unit"); inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "RTTI will not work on this type"); class_type->rtti_broken = true; } n2++; end2 = !vtable->iterate_reference (n2, ref2); } while (!end1 && (end2 || (DECL_ASSEMBLER_NAME (ref2->referred->decl) != DECL_ASSEMBLER_NAME (ref1->referred->decl) && TREE_CODE (ref2->referred->decl) == FUNCTION_DECL)) && TREE_CODE (ref1->referred->decl) != FUNCTION_DECL) { n1++; end1 = !prevailing->iterate_reference (n1, ref1); } /* Finished? */ if (end1 && end2) { /* Extra paranoia; compare the sizes. We do not have information about virtual inheritance offsets, so just be sure that these match. Do this as very last check so the not very informative error is not output too often. */ if (DECL_SIZE (prevailing->decl) != DECL_SIZE (vtable->decl)) { class_type->odr_violated = true; if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD violates " "one definition rule ", DECL_CONTEXT (vtable->decl))) { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit has virtual table of different size"); } } return; } if (!end1 && !end2) { if (DECL_ASSEMBLER_NAME (ref1->referred->decl) == DECL_ASSEMBLER_NAME (ref2->referred->decl)) continue; class_type->odr_violated = true; /* If the loops above stopped on non-virtual pointer, we have mismatch in RTTI information mangling. */ if (TREE_CODE (ref1->referred->decl) != FUNCTION_DECL && TREE_CODE (ref2->referred->decl) != FUNCTION_DECL) { if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD violates " "one definition rule ", DECL_CONTEXT (vtable->decl))) { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit with different RTTI information"); } return; } /* At this point both REF1 and REF2 points either to virtual table or virtual method. If one points to virtual table and other to method we can complain the same way as if one table was shorter than other pointing out the extra method. */ if (TREE_CODE (ref1->referred->decl) != TREE_CODE (ref2->referred->decl)) { if (TREE_CODE (ref1->referred->decl) == VAR_DECL) end1 = true; else if (TREE_CODE (ref2->referred->decl) == VAR_DECL) end2 = true; } } class_type->odr_violated = true; /* Complain about size mismatch. Either we have too many virutal functions or too many virtual table pointers. */ if (end1 || end2) { if (end1) { varpool_node *tmp = prevailing; prevailing = vtable; vtable = tmp; ref1 = ref2; } if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD violates " "one definition rule", DECL_CONTEXT (vtable->decl))) { if (TREE_CODE (ref1->referring->decl) == FUNCTION_DECL) { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit"); inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (ref1->referring->decl))), "contains additional virtual method %qD", ref1->referred->decl); } else { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit has virtual table with more entries"); } } return; } /* And in the last case we have either mistmatch in between two virtual methods or two virtual table pointers. */ if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, "virtual table of type %qD violates " "one definition rule ", DECL_CONTEXT (vtable->decl))) { if (TREE_CODE (ref1->referred->decl) == FUNCTION_DECL) { inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit"); gcc_assert (TREE_CODE (ref2->referred->decl) == FUNCTION_DECL); inform (DECL_SOURCE_LOCATION (ref1->referred->decl), "virtual method %qD", ref1->referred->decl); inform (DECL_SOURCE_LOCATION (ref2->referred->decl), "ought to match virtual method %qD but does not", ref2->referred->decl); } else inform (DECL_SOURCE_LOCATION (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), "the conflicting type defined in another translation " "unit has virtual table with different contents"); return; } } } /* Output ODR violation warning about T1 and T2 with REASON. Display location of ST1 and ST2 if REASON speaks about field or method of the type. If WARN is false, do nothing. Set WARNED if warning was indeed output. */ void warn_odr (tree t1, tree t2, tree st1, tree st2, bool warn, bool *warned, const char *reason) { tree decl2 = TYPE_NAME (t2); if (warned) *warned = false; if (!warn || !TYPE_NAME(t1)) return; /* ODR warnings are output druing LTO streaming; we must apply location cache for potential warnings to be output correctly. */ if (lto_location_cache::current_cache) lto_location_cache::current_cache->apply_location_cache (); if (!warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (t1)), OPT_Wodr, "type %qT violates the C++ One Definition Rule", t1)) return; if (!st1 && !st2) ; /* For FIELD_DECL support also case where one of fields is NULL - this is used when the structures have mismatching number of elements. */ else if (!st1 || TREE_CODE (st1) == FIELD_DECL) { inform (DECL_SOURCE_LOCATION (decl2), "a different type is defined in another translation unit"); if (!st1) { st1 = st2; st2 = NULL; } inform (DECL_SOURCE_LOCATION (st1), "the first difference of corresponding definitions is field %qD", st1); if (st2) decl2 = st2; } else if (TREE_CODE (st1) == FUNCTION_DECL) { inform (DECL_SOURCE_LOCATION (decl2), "a different type is defined in another translation unit"); inform (DECL_SOURCE_LOCATION (st1), "the first difference of corresponding definitions is method %qD", st1); decl2 = st2; } else return; inform (DECL_SOURCE_LOCATION (decl2), reason); if (warned) *warned = true; } /* Return ture if T1 and T2 are incompatible and we want to recusively dive into them from warn_type_mismatch to give sensible answer. */ static bool type_mismatch_p (tree t1, tree t2) { if (odr_or_derived_type_p (t1) && odr_or_derived_type_p (t2) && !odr_types_equivalent_p (t1, t2)) return true; return !types_compatible_p (t1, t2); } /* Types T1 and T2 was found to be incompatible in a context they can't (either used to declare a symbol of same assembler name or unified by ODR rule). We already output warning about this, but if possible, output extra information on how the types mismatch. This is hard to do in general. We basically handle the common cases. If LOC1 and LOC2 are meaningful locations, use it in the case the types themselves do no thave one.*/ void warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) { /* Location of type is known only if it has TYPE_NAME and the name is TYPE_DECL. */ location_t loc_t1 = TYPE_NAME (t1) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL ? DECL_SOURCE_LOCATION (TYPE_NAME (t1)) : UNKNOWN_LOCATION; location_t loc_t2 = TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL ? DECL_SOURCE_LOCATION (TYPE_NAME (t2)) : UNKNOWN_LOCATION; bool loc_t2_useful = false; /* With LTO it is a common case that the location of both types match. See if T2 has a location that is different from T1. If so, we will inform user about the location. Do not consider the location passed to us in LOC1/LOC2 as those are already output. */ if (loc_t2 > BUILTINS_LOCATION && loc_t2 != loc_t1) { if (loc_t1 <= BUILTINS_LOCATION) loc_t2_useful = true; else { expanded_location xloc1 = expand_location (loc_t1); expanded_location xloc2 = expand_location (loc_t2); if (strcmp (xloc1.file, xloc2.file) || xloc1.line != xloc2.line || xloc1.column != xloc2.column) loc_t2_useful = true; } } if (loc_t1 <= BUILTINS_LOCATION) loc_t1 = loc1; if (loc_t2 <= BUILTINS_LOCATION) loc_t2 = loc2; location_t loc = loc_t1 <= BUILTINS_LOCATION ? loc_t2 : loc_t1; /* It is a quite common bug to reference anonymous namespace type in non-anonymous namespace class. */ if ((type_with_linkage_p (t1) && type_in_anonymous_namespace_p (t1)) || (type_with_linkage_p (t2) && type_in_anonymous_namespace_p (t2))) { if (type_with_linkage_p (t1) && !type_in_anonymous_namespace_p (t1)) { std::swap (t1, t2); std::swap (loc_t1, loc_t2); } gcc_assert (TYPE_NAME (t1) && TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL); /* Most of the time, the type names will match, do not be unnecesarily verbose. */ if (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t1))) != IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t2)))) inform (loc_t1, "type %qT defined in anonymous namespace can not match " "type %qT across the translation unit boundary", t1, t2); else inform (loc_t1, "type %qT defined in anonymous namespace can not match " "across the translation unit boundary", t1); if (loc_t2_useful) inform (loc_t2, "the incompatible type defined in another translation unit"); return; } /* If types have mangled ODR names and they are different, it is most informative to output those. This also covers types defined in different namespaces. */ if (TYPE_NAME (t1) && TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t1)) && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t2)) && DECL_ASSEMBLER_NAME (TYPE_NAME (t1)) != DECL_ASSEMBLER_NAME (TYPE_NAME (t2))) { char *name1 = xstrdup (cplus_demangle (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t1))), DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES)); char *name2 = cplus_demangle (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t2))), DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES); if (name1 && name2 && strcmp (name1, name2)) { inform (loc_t1, "type name %<%s%> should match type name %<%s%>", name1, name2); if (loc_t2_useful) inform (loc_t2, "the incompatible type is defined here"); free (name1); return; } free (name1); } /* A tricky case are compound types. Often they appear the same in source code and the mismatch is dragged in by type they are build from. Look for those differences in subtypes and try to be informative. In other cases just output nothing because the source code is probably different and in this case we already output a all necessary info. */ if (!TYPE_NAME (t1) || !TYPE_NAME (t2)) { if (TREE_CODE (t1) == TREE_CODE (t2)) { if (TREE_CODE (t1) == ARRAY_TYPE && COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) { tree i1 = TYPE_DOMAIN (t1); tree i2 = TYPE_DOMAIN (t2); if (i1 && i2 && TYPE_MAX_VALUE (i1) && TYPE_MAX_VALUE (i2) && !operand_equal_p (TYPE_MAX_VALUE (i1), TYPE_MAX_VALUE (i2), 0)) { inform (loc, "array types have different bounds"); return; } } if ((POINTER_TYPE_P (t1) || TREE_CODE (t1) == ARRAY_TYPE) && type_mismatch_p (TREE_TYPE (t1), TREE_TYPE (t2))) warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc_t1, loc_t2); else if (TREE_CODE (t1) == METHOD_TYPE || TREE_CODE (t1) == FUNCTION_TYPE) { tree parms1 = NULL, parms2 = NULL; int count = 1; if (type_mismatch_p (TREE_TYPE (t1), TREE_TYPE (t2))) { inform (loc, "return value type mismatch"); warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc_t1, loc_t2); return; } if (prototype_p (t1) && prototype_p (t2)) for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); parms1 && parms2; parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2), count++) { if (type_mismatch_p (TREE_VALUE (parms1), TREE_VALUE (parms2))) { if (count == 1 && TREE_CODE (t1) == METHOD_TYPE) inform (loc, "implicit this pointer type mismatch"); else inform (loc, "type mismatch in parameter %i", count - (TREE_CODE (t1) == METHOD_TYPE)); warn_types_mismatch (TREE_VALUE (parms1), TREE_VALUE (parms2), loc_t1, loc_t2); return; } } if (parms1 || parms2) { inform (loc, "types have different parameter counts"); return; } } } return; } if (types_odr_comparable (t1, t2, true) && types_same_for_odr (t1, t2, true)) inform (loc_t1, "type %qT itself violate the C++ One Definition Rule", t1); /* Prevent pointless warnings like "struct aa" should match "struct aa". */ else if (TYPE_NAME (t1) == TYPE_NAME (t2) && TREE_CODE (t1) == TREE_CODE (t2) && !loc_t2_useful) return; else inform (loc_t1, "type %qT should match type %qT", t1, t2); if (loc_t2_useful) inform (loc_t2, "the incompatible type is defined here"); } /* Compare T1 and T2, report ODR violations if WARN is true and set WARNED to true if anything is reported. Return true if types match. If true is returned, the types are also compatible in the sense of gimple_canonical_types_compatible_p. If LOC1 and LOC2 is not UNKNOWN_LOCATION it may be used to output a warning about the type if the type itself do not have location. */ static bool odr_types_equivalent_p (tree t1, tree t2, bool warn, bool *warned, hash_set<type_pair> *visited, location_t loc1, location_t loc2) { /* Check first for the obvious case of pointer identity. */ if (t1 == t2) return true; gcc_assert (!type_with_linkage_p (t1) || !type_in_anonymous_namespace_p (t1)); gcc_assert (!type_with_linkage_p (t2) || !type_in_anonymous_namespace_p (t2)); /* Can't be the same type if the types don't have the same code. */ if (TREE_CODE (t1) != TREE_CODE (t2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a different type is defined in another translation unit")); return false; } if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different qualifiers is defined in another " "translation unit")); return false; } if ((type_with_linkage_p (t1) && type_in_anonymous_namespace_p (t1)) || (type_with_linkage_p (t2) && type_in_anonymous_namespace_p (t2))) { /* We can not trip this when comparing ODR types, only when trying to match different ODR derivations from different declarations. So WARN should be always false. */ gcc_assert (!warn); return false; } if (comp_type_attributes (t1, t2) != 1) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different attributes " "is defined in another translation unit")); return false; } if (TREE_CODE (t1) == ENUMERAL_TYPE && TYPE_VALUES (t1) && TYPE_VALUES (t2)) { tree v1, v2; for (v1 = TYPE_VALUES (t1), v2 = TYPE_VALUES (t2); v1 && v2 ; v1 = TREE_CHAIN (v1), v2 = TREE_CHAIN (v2)) { if (TREE_PURPOSE (v1) != TREE_PURPOSE (v2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("an enum with different value name" " is defined in another translation unit")); return false; } if (TREE_VALUE (v1) != TREE_VALUE (v2) && !operand_equal_p (DECL_INITIAL (TREE_VALUE (v1)), DECL_INITIAL (TREE_VALUE (v2)), 0)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("an enum with different values is defined" " in another translation unit")); return false; } } if (v1 || v2) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("an enum with mismatching number of values " "is defined in another translation unit")); return false; } } /* Non-aggregate types can be handled cheaply. */ if (INTEGRAL_TYPE_P (t1) || SCALAR_FLOAT_TYPE_P (t1) || FIXED_POINT_TYPE_P (t1) || TREE_CODE (t1) == VECTOR_TYPE || TREE_CODE (t1) == COMPLEX_TYPE || TREE_CODE (t1) == OFFSET_TYPE || POINTER_TYPE_P (t1)) { if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different precision is defined " "in another translation unit")); return false; } if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different signedness is defined " "in another translation unit")); return false; } if (TREE_CODE (t1) == INTEGER_TYPE && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)) { /* char WRT uint_8? */ warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a different type is defined in another " "translation unit")); return false; } /* For canonical type comparisons we do not want to build SCCs so we cannot compare pointed-to types. But we can, for now, require the same pointed-to type kind and match what useless_type_conversion_p would do. */ if (POINTER_TYPE_P (t1)) { if (TYPE_ADDR_SPACE (TREE_TYPE (t1)) != TYPE_ADDR_SPACE (TREE_TYPE (t2))) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("it is defined as a pointer in different address " "space in another translation unit")); return false; } if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), visited, loc1, loc2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("it is defined as a pointer to different type " "in another translation unit")); if (warn && warned) warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); return false; } } if ((TREE_CODE (t1) == VECTOR_TYPE || TREE_CODE (t1) == COMPLEX_TYPE) && !odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), visited, loc1, loc2)) { /* Probably specific enough. */ warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a different type is defined " "in another translation unit")); if (warn && warned) warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); return false; } } /* Do type-specific comparisons. */ else switch (TREE_CODE (t1)) { case ARRAY_TYPE: { /* Array types are the same if the element types are the same and the number of elements are the same. */ if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), visited, loc1, loc2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a different type is defined in another " "translation unit")); if (warn && warned) warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); } gcc_assert (TYPE_STRING_FLAG (t1) == TYPE_STRING_FLAG (t2)); gcc_assert (TYPE_NONALIASED_COMPONENT (t1) == TYPE_NONALIASED_COMPONENT (t2)); tree i1 = TYPE_DOMAIN (t1); tree i2 = TYPE_DOMAIN (t2); /* For an incomplete external array, the type domain can be NULL_TREE. Check this condition also. */ if (i1 == NULL_TREE || i2 == NULL_TREE) return true; tree min1 = TYPE_MIN_VALUE (i1); tree min2 = TYPE_MIN_VALUE (i2); tree max1 = TYPE_MAX_VALUE (i1); tree max2 = TYPE_MAX_VALUE (i2); /* In C++, minimums should be always 0. */ gcc_assert (min1 == min2); if (!operand_equal_p (max1, max2, 0)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("an array of different size is defined " "in another translation unit")); return false; } } break; case METHOD_TYPE: case FUNCTION_TYPE: /* Function types are the same if the return type and arguments types are the same. */ if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), visited, loc1, loc2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("has different return value " "in another translation unit")); if (warn && warned) warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); return false; } if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2) || !prototype_p (t1) || !prototype_p (t2)) return true; else { tree parms1, parms2; for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); parms1 && parms2; parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) { if (!odr_subtypes_equivalent_p (TREE_VALUE (parms1), TREE_VALUE (parms2), visited, loc1, loc2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("has different parameters in another " "translation unit")); if (warn && warned) warn_types_mismatch (TREE_VALUE (parms1), TREE_VALUE (parms2), loc1, loc2); return false; } } if (parms1 || parms2) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("has different parameters " "in another translation unit")); return false; } return true; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree f1, f2; /* For aggregate types, all the fields must be the same. */ if (COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) { if (TYPE_BINFO (t1) && TYPE_BINFO (t2) && polymorphic_type_binfo_p (TYPE_BINFO (t1)) != polymorphic_type_binfo_p (TYPE_BINFO (t2))) { if (polymorphic_type_binfo_p (TYPE_BINFO (t1))) warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type defined in another translation unit " "is not polymorphic")); else warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type defined in another translation unit " "is polymorphic")); return false; } for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); f1 || f2; f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) { /* Skip non-fields. */ while (f1 && TREE_CODE (f1) != FIELD_DECL) f1 = TREE_CHAIN (f1); while (f2 && TREE_CODE (f2) != FIELD_DECL) f2 = TREE_CHAIN (f2); if (!f1 || !f2) break; if (DECL_VIRTUAL_P (f1) != DECL_VIRTUAL_P (f2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different virtual table pointers" " is defined in another translation unit")); return false; } if (DECL_ARTIFICIAL (f1) != DECL_ARTIFICIAL (f2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different bases is defined " "in another translation unit")); return false; } if (DECL_NAME (f1) != DECL_NAME (f2) && !DECL_ARTIFICIAL (f1)) { warn_odr (t1, t2, f1, f2, warn, warned, G_("a field with different name is defined " "in another translation unit")); return false; } if (!odr_subtypes_equivalent_p (TREE_TYPE (f1), TREE_TYPE (f2), visited, loc1, loc2)) { /* Do not warn about artificial fields and just go into generic field mismatch warning. */ if (DECL_ARTIFICIAL (f1)) break; warn_odr (t1, t2, f1, f2, warn, warned, G_("a field of same name but different type " "is defined in another translation unit")); if (warn && warned) warn_types_mismatch (TREE_TYPE (f1), TREE_TYPE (f2), loc1, loc2); return false; } if (!gimple_compare_field_offset (f1, f2)) { /* Do not warn about artificial fields and just go into generic field mismatch warning. */ if (DECL_ARTIFICIAL (f1)) break; warn_odr (t1, t2, f1, f2, warn, warned, G_("fields has different layout " "in another translation unit")); return false; } gcc_assert (DECL_NONADDRESSABLE_P (f1) == DECL_NONADDRESSABLE_P (f2)); } /* If one aggregate has more fields than the other, they are not the same. */ if (f1 || f2) { if ((f1 && DECL_VIRTUAL_P (f1)) || (f2 && DECL_VIRTUAL_P (f2))) warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different virtual table pointers" " is defined in another translation unit")); else if ((f1 && DECL_ARTIFICIAL (f1)) || (f2 && DECL_ARTIFICIAL (f2))) warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different bases is defined " "in another translation unit")); else warn_odr (t1, t2, f1, f2, warn, warned, G_("a type with different number of fields " "is defined in another translation unit")); return false; } if ((TYPE_MAIN_VARIANT (t1) == t1 || TYPE_MAIN_VARIANT (t2) == t2) && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t1)) && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t2)) && odr_type_p (TYPE_MAIN_VARIANT (t1)) && odr_type_p (TYPE_MAIN_VARIANT (t2)) && (TYPE_METHODS (TYPE_MAIN_VARIANT (t1)) != TYPE_METHODS (TYPE_MAIN_VARIANT (t2)))) { /* Currently free_lang_data sets TYPE_METHODS to error_mark_node if it is non-NULL so this loop will never realy execute. */ if (TYPE_METHODS (TYPE_MAIN_VARIANT (t1)) != error_mark_node && TYPE_METHODS (TYPE_MAIN_VARIANT (t2)) != error_mark_node) for (f1 = TYPE_METHODS (TYPE_MAIN_VARIANT (t1)), f2 = TYPE_METHODS (TYPE_MAIN_VARIANT (t2)); f1 && f2 ; f1 = DECL_CHAIN (f1), f2 = DECL_CHAIN (f2)) { if (DECL_ASSEMBLER_NAME (f1) != DECL_ASSEMBLER_NAME (f2)) { warn_odr (t1, t2, f1, f2, warn, warned, G_("a different method of same type " "is defined in another " "translation unit")); return false; } if (DECL_VIRTUAL_P (f1) != DECL_VIRTUAL_P (f2)) { warn_odr (t1, t2, f1, f2, warn, warned, G_("s definition that differs by virtual " "keyword in another translation unit")); return false; } if (DECL_VINDEX (f1) != DECL_VINDEX (f2)) { warn_odr (t1, t2, f1, f2, warn, warned, G_("virtual table layout differs " "in another translation unit")); return false; } if (odr_subtypes_equivalent_p (TREE_TYPE (f1), TREE_TYPE (f2), visited, loc1, loc2)) { warn_odr (t1, t2, f1, f2, warn, warned, G_("method with incompatible type is " "defined in another translation unit")); return false; } } if ((f1 == NULL) != (f2 == NULL)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different number of methods " "is defined in another translation unit")); return false; } } } break; } case VOID_TYPE: case NULLPTR_TYPE: break; default: debug_tree (t1); gcc_unreachable (); } /* Those are better to come last as they are utterly uninformative. */ if (TYPE_SIZE (t1) && TYPE_SIZE (t2) && !operand_equal_p (TYPE_SIZE (t1), TYPE_SIZE (t2), 0)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different size " "is defined in another translation unit")); return false; } if (COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2) && TYPE_ALIGN (t1) != TYPE_ALIGN (t2)) { warn_odr (t1, t2, NULL, NULL, warn, warned, G_("a type with different alignment " "is defined in another translation unit")); return false; } gcc_assert (!TYPE_SIZE_UNIT (t1) || !TYPE_SIZE_UNIT (t2) || operand_equal_p (TYPE_SIZE_UNIT (t1), TYPE_SIZE_UNIT (t2), 0)); return true; } /* Return true if TYPE1 and TYPE2 are equivalent for One Definition Rule. */ bool odr_types_equivalent_p (tree type1, tree type2) { hash_set<type_pair> visited; #ifdef ENABLE_CHECKING gcc_assert (odr_or_derived_type_p (type1) && odr_or_derived_type_p (type2)); #endif return odr_types_equivalent_p (type1, type2, false, NULL, &visited, UNKNOWN_LOCATION, UNKNOWN_LOCATION); } /* TYPE is equivalent to VAL by ODR, but its tree representation differs from VAL->type. This may happen in LTO where tree merging did not merge all variants of the same type or due to ODR violation. Analyze and report ODR violations and add type to duplicate list. If TYPE is more specified than VAL->type, prevail VAL->type. Also if this is first time we see definition of a class return true so the base types are analyzed. */ static bool add_type_duplicate (odr_type val, tree type) { bool build_bases = false; bool prevail = false; bool odr_must_violate = false; if (!val->types_set) val->types_set = new hash_set<tree>; /* Chose polymorphic type as leader (this happens only in case of ODR violations. */ if ((TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) && polymorphic_type_binfo_p (TYPE_BINFO (type))) && (TREE_CODE (val->type) != RECORD_TYPE || !TYPE_BINFO (val->type) || !polymorphic_type_binfo_p (TYPE_BINFO (val->type)))) { prevail = true; build_bases = true; } /* Always prefer complete type to be the leader. */ else if (!COMPLETE_TYPE_P (val->type) && COMPLETE_TYPE_P (type)) { prevail = true; build_bases = TYPE_BINFO (type); } else if (COMPLETE_TYPE_P (val->type) && !COMPLETE_TYPE_P (type)) ; else if (TREE_CODE (val->type) == ENUMERAL_TYPE && TREE_CODE (type) == ENUMERAL_TYPE && !TYPE_VALUES (val->type) && TYPE_VALUES (type)) prevail = true; else if (TREE_CODE (val->type) == RECORD_TYPE && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) && !TYPE_BINFO (val->type)) { gcc_assert (!val->bases.length ()); build_bases = true; prevail = true; } if (prevail) std::swap (val->type, type); val->types_set->add (type); /* If we now have a mangled name, be sure to record it to val->type so ODR hash can work. */ if (can_be_name_hashed_p (type) && !can_be_name_hashed_p (val->type)) SET_DECL_ASSEMBLER_NAME (TYPE_NAME (val->type), DECL_ASSEMBLER_NAME (TYPE_NAME (type))); bool merge = true; bool base_mismatch = false; unsigned int i; bool warned = false; hash_set<type_pair> visited; gcc_assert (in_lto_p); vec_safe_push (val->types, type); /* If both are class types, compare the bases. */ if (COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type) && TREE_CODE (val->type) == RECORD_TYPE && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (val->type) && TYPE_BINFO (type)) { if (BINFO_N_BASE_BINFOS (TYPE_BINFO (type)) != BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))) { if (!flag_ltrans && !warned && !val->odr_violated) { tree extra_base; warn_odr (type, val->type, NULL, NULL, !warned, &warned, "a type with the same name but different " "number of polymorphic bases is " "defined in another translation unit"); if (warned) { if (BINFO_N_BASE_BINFOS (TYPE_BINFO (type)) > BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))) extra_base = BINFO_BASE_BINFO (TYPE_BINFO (type), BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))); else extra_base = BINFO_BASE_BINFO (TYPE_BINFO (val->type), BINFO_N_BASE_BINFOS (TYPE_BINFO (type))); tree extra_base_type = BINFO_TYPE (extra_base); inform (DECL_SOURCE_LOCATION (TYPE_NAME (extra_base_type)), "the extra base is defined here"); } } base_mismatch = true; } else for (i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) { tree base1 = BINFO_BASE_BINFO (TYPE_BINFO (type), i); tree base2 = BINFO_BASE_BINFO (TYPE_BINFO (val->type), i); tree type1 = BINFO_TYPE (base1); tree type2 = BINFO_TYPE (base2); if (types_odr_comparable (type1, type2)) { if (!types_same_for_odr (type1, type2)) base_mismatch = true; } else if (!odr_types_equivalent_p (type1, type2)) base_mismatch = true; if (base_mismatch) { if (!warned && !val->odr_violated) { warn_odr (type, val->type, NULL, NULL, !warned, &warned, "a type with the same name but different base " "type is defined in another translation unit"); if (warned) warn_types_mismatch (type1, type2, UNKNOWN_LOCATION, UNKNOWN_LOCATION); } break; } if (BINFO_OFFSET (base1) != BINFO_OFFSET (base2)) { base_mismatch = true; if (!warned && !val->odr_violated) warn_odr (type, val->type, NULL, NULL, !warned, &warned, "a type with the same name but different base " "layout is defined in another translation unit"); break; } /* One of bases is not of complete type. */ if (!TYPE_BINFO (type1) != !TYPE_BINFO (type2)) { /* If we have a polymorphic type info specified for TYPE1 but not for TYPE2 we possibly missed a base when recording VAL->type earlier. Be sure this does not happen. */ if (TYPE_BINFO (type1) && polymorphic_type_binfo_p (TYPE_BINFO (type1)) && !build_bases) odr_must_violate = true; break; } /* One base is polymorphic and the other not. This ought to be diagnosed earlier, but do not ICE in the checking bellow. */ else if (TYPE_BINFO (type1) && polymorphic_type_binfo_p (TYPE_BINFO (type1)) != polymorphic_type_binfo_p (TYPE_BINFO (type2))) { if (!warned && !val->odr_violated) warn_odr (type, val->type, NULL, NULL, !warned, &warned, "a base of the type is polymorphic only in one " "translation unit"); base_mismatch = true; break; } } if (base_mismatch) { merge = false; odr_violation_reported = true; val->odr_violated = true; if (symtab->dump_file) { fprintf (symtab->dump_file, "ODR base violation\n"); print_node (symtab->dump_file, "", val->type, 0); putc ('\n',symtab->dump_file); print_node (symtab->dump_file, "", type, 0); putc ('\n',symtab->dump_file); } } } /* Next compare memory layout. */ if (!odr_types_equivalent_p (val->type, type, !flag_ltrans && !val->odr_violated && !warned, &warned, &visited, DECL_SOURCE_LOCATION (TYPE_NAME (val->type)), DECL_SOURCE_LOCATION (TYPE_NAME (type)))) { merge = false; odr_violation_reported = true; val->odr_violated = true; if (symtab->dump_file) { fprintf (symtab->dump_file, "ODR violation\n"); print_node (symtab->dump_file, "", val->type, 0); putc ('\n',symtab->dump_file); print_node (symtab->dump_file, "", type, 0); putc ('\n',symtab->dump_file); } } gcc_assert (val->odr_violated || !odr_must_violate); /* Sanity check that all bases will be build same way again. */ #ifdef ENABLE_CHECKING if (COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type) && TREE_CODE (val->type) == RECORD_TYPE && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (val->type) && TYPE_BINFO (type) && !val->odr_violated && !base_mismatch && val->bases.length ()) { unsigned int num_poly_bases = 0; unsigned int j; for (i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) if (polymorphic_type_binfo_p (BINFO_BASE_BINFO (TYPE_BINFO (type), i))) num_poly_bases++; gcc_assert (num_poly_bases == val->bases.length ()); for (j = 0, i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) if (polymorphic_type_binfo_p (BINFO_BASE_BINFO (TYPE_BINFO (type), i))) { odr_type base = get_odr_type (BINFO_TYPE (BINFO_BASE_BINFO (TYPE_BINFO (type), i)), true); gcc_assert (val->bases[j] == base); j++; } } #endif /* Regularize things a little. During LTO same types may come with different BINFOs. Either because their virtual table was not merged by tree merging and only later at decl merging or because one type comes with external vtable, while other with internal. We want to merge equivalent binfos to conserve memory and streaming overhead. The external vtables are more harmful: they contain references to external declarations of methods that may be defined in the merged LTO unit. For this reason we absolutely need to remove them and replace by internal variants. Not doing so will lead to incomplete answers from possible_polymorphic_call_targets. FIXME: disable for now; because ODR types are now build during streaming in, the variants do not need to be linked to the type, yet. We need to do the merging in cleanup pass to be implemented soon. */ if (!flag_ltrans && merge && 0 && TREE_CODE (val->type) == RECORD_TYPE && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (val->type) && TYPE_BINFO (type) && TYPE_MAIN_VARIANT (type) == type && TYPE_MAIN_VARIANT (val->type) == val->type && BINFO_VTABLE (TYPE_BINFO (val->type)) && BINFO_VTABLE (TYPE_BINFO (type))) { tree master_binfo = TYPE_BINFO (val->type); tree v1 = BINFO_VTABLE (master_binfo); tree v2 = BINFO_VTABLE (TYPE_BINFO (type)); if (TREE_CODE (v1) == POINTER_PLUS_EXPR) { gcc_assert (TREE_CODE (v2) == POINTER_PLUS_EXPR && operand_equal_p (TREE_OPERAND (v1, 1), TREE_OPERAND (v2, 1), 0)); v1 = TREE_OPERAND (TREE_OPERAND (v1, 0), 0); v2 = TREE_OPERAND (TREE_OPERAND (v2, 0), 0); } gcc_assert (DECL_ASSEMBLER_NAME (v1) == DECL_ASSEMBLER_NAME (v2)); if (DECL_EXTERNAL (v1) && !DECL_EXTERNAL (v2)) { unsigned int i; set_type_binfo (val->type, TYPE_BINFO (type)); for (i = 0; i < val->types->length (); i++) { if (TYPE_BINFO ((*val->types)[i]) == master_binfo) set_type_binfo ((*val->types)[i], TYPE_BINFO (type)); } BINFO_TYPE (TYPE_BINFO (type)) = val->type; } else set_type_binfo (type, master_binfo); } return build_bases; } /* Get ODR type hash entry for TYPE. If INSERT is true, create possibly new entry. */ odr_type get_odr_type (tree type, bool insert) { odr_type_d **slot = NULL; odr_type_d **vtable_slot = NULL; odr_type val = NULL; hashval_t hash; bool build_bases = false; bool insert_to_odr_array = false; int base_id = -1; type = main_odr_variant (type); gcc_checking_assert (can_be_name_hashed_p (type) || can_be_vtable_hashed_p (type)); /* Lookup entry, first try name hash, fallback to vtable hash. */ if (can_be_name_hashed_p (type)) { hash = hash_odr_name (type); slot = odr_hash->find_slot_with_hash (type, hash, insert ? INSERT : NO_INSERT); } if ((!slot || !*slot) && in_lto_p && can_be_vtable_hashed_p (type)) { hash = hash_odr_vtable (type); vtable_slot = odr_vtable_hash->find_slot_with_hash (type, hash, insert ? INSERT : NO_INSERT); } if (!slot && !vtable_slot) return NULL; /* See if we already have entry for type. */ if ((slot && *slot) || (vtable_slot && *vtable_slot)) { if (slot && *slot) { val = *slot; #ifdef ENABLE_CHECKING if (in_lto_p && can_be_vtable_hashed_p (type)) { hash = hash_odr_vtable (type); vtable_slot = odr_vtable_hash->find_slot_with_hash (type, hash, NO_INSERT); gcc_assert (!vtable_slot || *vtable_slot == *slot); vtable_slot = NULL; } #endif } else if (*vtable_slot) val = *vtable_slot; if (val->type != type && (!val->types_set || !val->types_set->add (type))) { gcc_assert (insert); /* We have type duplicate, but it may introduce vtable name or mangled name; be sure to keep hashes in sync. */ if (in_lto_p && can_be_vtable_hashed_p (type) && (!vtable_slot || !*vtable_slot)) { if (!vtable_slot) { hash = hash_odr_vtable (type); vtable_slot = odr_vtable_hash->find_slot_with_hash (type, hash, INSERT); gcc_checking_assert (!*vtable_slot || *vtable_slot == val); } *vtable_slot = val; } if (slot && !*slot) *slot = val; build_bases = add_type_duplicate (val, type); } } else { val = ggc_cleared_alloc<odr_type_d> (); val->type = type; val->bases = vNULL; val->derived_types = vNULL; if (type_with_linkage_p (type)) val->anonymous_namespace = type_in_anonymous_namespace_p (type); else val->anonymous_namespace = 0; build_bases = COMPLETE_TYPE_P (val->type); insert_to_odr_array = true; if (slot) *slot = val; if (vtable_slot) *vtable_slot = val; } if (build_bases && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) && type_with_linkage_p (type) && type == TYPE_MAIN_VARIANT (type)) { tree binfo = TYPE_BINFO (type); unsigned int i; gcc_assert (BINFO_TYPE (TYPE_BINFO (val->type)) == type); val->all_derivations_known = type_all_derivations_known_p (type); for (i = 0; i < BINFO_N_BASE_BINFOS (binfo); i++) /* For now record only polymorphic types. other are pointless for devirtualization and we can not precisely determine ODR equivalency of these during LTO. */ if (polymorphic_type_binfo_p (BINFO_BASE_BINFO (binfo, i))) { tree base_type= BINFO_TYPE (BINFO_BASE_BINFO (binfo, i)); odr_type base = get_odr_type (base_type, true); gcc_assert (TYPE_MAIN_VARIANT (base_type) == base_type); base->derived_types.safe_push (val); val->bases.safe_push (base); if (base->id > base_id) base_id = base->id; } } /* Ensure that type always appears after bases. */ if (insert_to_odr_array) { if (odr_types_ptr) val->id = odr_types.length (); vec_safe_push (odr_types_ptr, val); } else if (base_id > val->id) { odr_types[val->id] = 0; /* Be sure we did not recorded any derived types; these may need renumbering too. */ gcc_assert (val->derived_types.length() == 0); if (odr_types_ptr) val->id = odr_types.length (); vec_safe_push (odr_types_ptr, val); } return val; } /* Add TYPE od ODR type hash. */ void register_odr_type (tree type) { if (!odr_hash) { odr_hash = new odr_hash_type (23); if (in_lto_p) odr_vtable_hash = new odr_vtable_hash_type (23); } /* Arrange things to be nicer and insert main variants first. ??? fundamental prerecorded types do not have mangled names; this makes it possible that non-ODR type is main_odr_variant of ODR type. Things may get smoother if LTO FE set mangled name of those types same way as C++ FE does. */ if (odr_type_p (main_odr_variant (TYPE_MAIN_VARIANT (type))) && odr_type_p (TYPE_MAIN_VARIANT (type))) get_odr_type (TYPE_MAIN_VARIANT (type), true); if (TYPE_MAIN_VARIANT (type) != type && odr_type_p (main_odr_variant (type))) get_odr_type (type, true); } /* Return true if type is known to have no derivations. */ bool type_known_to_have_no_derivations_p (tree t) { return (type_all_derivations_known_p (t) && (TYPE_FINAL_P (t) || (odr_hash && !get_odr_type (t, true)->derived_types.length()))); } /* Dump ODR type T and all its derived types. INDENT specifies indentation for recursive printing. */ static void dump_odr_type (FILE *f, odr_type t, int indent=0) { unsigned int i; fprintf (f, "%*s type %i: ", indent * 2, "", t->id); print_generic_expr (f, t->type, TDF_SLIM); fprintf (f, "%s", t->anonymous_namespace ? " (anonymous namespace)":""); fprintf (f, "%s\n", t->all_derivations_known ? " (derivations known)":""); if (TYPE_NAME (t->type)) { /*fprintf (f, "%*s defined at: %s:%i\n", indent * 2, "", DECL_SOURCE_FILE (TYPE_NAME (t->type)), DECL_SOURCE_LINE (TYPE_NAME (t->type)));*/ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t->type))) fprintf (f, "%*s mangled name: %s\n", indent * 2, "", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t->type)))); } if (t->bases.length ()) { fprintf (f, "%*s base odr type ids: ", indent * 2, ""); for (i = 0; i < t->bases.length (); i++) fprintf (f, " %i", t->bases[i]->id); fprintf (f, "\n"); } if (t->derived_types.length ()) { fprintf (f, "%*s derived types:\n", indent * 2, ""); for (i = 0; i < t->derived_types.length (); i++) dump_odr_type (f, t->derived_types[i], indent + 1); } fprintf (f, "\n"); } /* Dump the type inheritance graph. */ static void dump_type_inheritance_graph (FILE *f) { unsigned int i; if (!odr_types_ptr) return; fprintf (f, "\n\nType inheritance graph:\n"); for (i = 0; i < odr_types.length (); i++) { if (odr_types[i] && odr_types[i]->bases.length () == 0) dump_odr_type (f, odr_types[i]); } for (i = 0; i < odr_types.length (); i++) { if (odr_types[i] && odr_types[i]->types && odr_types[i]->types->length ()) { unsigned int j; fprintf (f, "Duplicate tree types for odr type %i\n", i); print_node (f, "", odr_types[i]->type, 0); for (j = 0; j < odr_types[i]->types->length (); j++) { tree t; fprintf (f, "duplicate #%i\n", j); print_node (f, "", (*odr_types[i]->types)[j], 0); t = (*odr_types[i]->types)[j]; while (TYPE_P (t) && TYPE_CONTEXT (t)) { t = TYPE_CONTEXT (t); print_node (f, "", t, 0); } putc ('\n',f); } } } } /* Initialize IPA devirt and build inheritance tree graph. */ void build_type_inheritance_graph (void) { struct symtab_node *n; FILE *inheritance_dump_file; int flags; if (odr_hash) return; timevar_push (TV_IPA_INHERITANCE); inheritance_dump_file = dump_begin (TDI_inheritance, &flags); odr_hash = new odr_hash_type (23); if (in_lto_p) odr_vtable_hash = new odr_vtable_hash_type (23); /* We reconstruct the graph starting of types of all methods seen in the the unit. */ FOR_EACH_SYMBOL (n) if (is_a <cgraph_node *> (n) && DECL_VIRTUAL_P (n->decl) && n->real_symbol_p ()) get_odr_type (TYPE_METHOD_BASETYPE (TREE_TYPE (n->decl)), true); /* Look also for virtual tables of types that do not define any methods. We need it in a case where class B has virtual base of class A re-defining its virtual method and there is class C with no virtual methods with B as virtual base. Here we output B's virtual method in two variant - for non-virtual and virtual inheritance. B's virtual table has non-virtual version, while C's has virtual. For this reason we need to know about C in order to include both variants of B. More correctly, record_target_from_binfo should add both variants of the method when walking B, but we have no link in between them. We rely on fact that either the method is exported and thus we assume it is called externally or C is in anonymous namespace and thus we will see the vtable. */ else if (is_a <varpool_node *> (n) && DECL_VIRTUAL_P (n->decl) && TREE_CODE (DECL_CONTEXT (n->decl)) == RECORD_TYPE && TYPE_BINFO (DECL_CONTEXT (n->decl)) && polymorphic_type_binfo_p (TYPE_BINFO (DECL_CONTEXT (n->decl)))) get_odr_type (TYPE_MAIN_VARIANT (DECL_CONTEXT (n->decl)), true); if (inheritance_dump_file) { dump_type_inheritance_graph (inheritance_dump_file); dump_end (TDI_inheritance, inheritance_dump_file); } timevar_pop (TV_IPA_INHERITANCE); } /* Return true if N has reference from live virtual table (and thus can be a destination of polymorphic call). Be conservatively correct when callgraph is not built or if the method may be referred externally. */ static bool referenced_from_vtable_p (struct cgraph_node *node) { int i; struct ipa_ref *ref; bool found = false; if (node->externally_visible || DECL_EXTERNAL (node->decl) || node->used_from_other_partition) return true; /* Keep this test constant time. It is unlikely this can happen except for the case where speculative devirtualization introduced many speculative edges to this node. In this case the target is very likely alive anyway. */ if (node->ref_list.referring.length () > 100) return true; /* We need references built. */ if (symtab->state <= CONSTRUCTION) return true; for (i = 0; node->iterate_referring (i, ref); i++) if ((ref->use == IPA_REF_ALIAS && referenced_from_vtable_p (dyn_cast<cgraph_node *> (ref->referring))) || (ref->use == IPA_REF_ADDR && TREE_CODE (ref->referring->decl) == VAR_DECL && DECL_VIRTUAL_P (ref->referring->decl))) { found = true; break; } return found; } /* If TARGET has associated node, record it in the NODES array. CAN_REFER specify if program can refer to the target directly. if TARGET is unknown (NULL) or it can not be inserted (for example because its body was already removed and there is no way to refer to it), clear COMPLETEP. */ static void maybe_record_node (vec <cgraph_node *> &nodes, tree target, hash_set<tree> *inserted, bool can_refer, bool *completep) { struct cgraph_node *target_node, *alias_target; enum availability avail; /* cxa_pure_virtual and __builtin_unreachable do not need to be added into list of targets; the runtime effect of calling them is undefined. Only "real" virtual methods should be accounted. */ if (target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE) return; if (!can_refer) { /* The only case when method of anonymous namespace becomes unreferable is when we completely optimized it out. */ if (flag_ltrans || !target || !type_in_anonymous_namespace_p (DECL_CONTEXT (target))) *completep = false; return; } if (!target) return; target_node = cgraph_node::get (target); /* Prefer alias target over aliases, so we do not get confused by fake duplicates. */ if (target_node) { alias_target = target_node->ultimate_alias_target (&avail); if (target_node != alias_target && avail >= AVAIL_AVAILABLE && target_node->get_availability ()) target_node = alias_target; } /* Method can only be called by polymorphic call if any of vtables referring to it are alive. While this holds for non-anonymous functions, too, there are cases where we want to keep them in the list; for example inline functions with -fno-weak are static, but we still may devirtualize them when instance comes from other unit. The same holds for LTO. Currently we ignore these functions in speculative devirtualization. ??? Maybe it would make sense to be more aggressive for LTO even elsewhere. */ if (!flag_ltrans && type_in_anonymous_namespace_p (DECL_CONTEXT (target)) && (!target_node || !referenced_from_vtable_p (target_node))) ; /* See if TARGET is useful function we can deal with. */ else if (target_node != NULL && (TREE_PUBLIC (target) || DECL_EXTERNAL (target) || target_node->definition) && target_node->real_symbol_p ()) { gcc_assert (!target_node->global.inlined_to); gcc_assert (target_node->real_symbol_p ()); if (!inserted->add (target)) { cached_polymorphic_call_targets->add (target_node); nodes.safe_push (target_node); } } else if (completep && (!type_in_anonymous_namespace_p (DECL_CONTEXT (target)) || flag_ltrans)) *completep = false; } /* See if BINFO's type matches OUTER_TYPE. If so, look up BINFO of subtype of OTR_TYPE at OFFSET and in that BINFO find method in vtable and insert method to NODES array or BASES_TO_CONSIDER if this array is non-NULL. Otherwise recurse to base BINFOs. This matches what get_binfo_at_offset does, but with offset being unknown. TYPE_BINFOS is a stack of BINFOS of types with defined virtual table seen on way from class type to BINFO. MATCHED_VTABLES tracks virtual tables we already did lookup for virtual function in. INSERTED tracks nodes we already inserted. ANONYMOUS is true if BINFO is part of anonymous namespace. Clear COMPLETEP when we hit unreferable target. */ static void record_target_from_binfo (vec <cgraph_node *> &nodes, vec <tree> *bases_to_consider, tree binfo, tree otr_type, vec <tree> &type_binfos, HOST_WIDE_INT otr_token, tree outer_type, HOST_WIDE_INT offset, hash_set<tree> *inserted, hash_set<tree> *matched_vtables, bool anonymous, bool *completep) { tree type = BINFO_TYPE (binfo); int i; tree base_binfo; if (BINFO_VTABLE (binfo)) type_binfos.safe_push (binfo); if (types_same_for_odr (type, outer_type)) { int i; tree type_binfo = NULL; /* Look up BINFO with virtual table. For normal types it is always last binfo on stack. */ for (i = type_binfos.length () - 1; i >= 0; i--) if (BINFO_OFFSET (type_binfos[i]) == BINFO_OFFSET (binfo)) { type_binfo = type_binfos[i]; break; } if (BINFO_VTABLE (binfo)) type_binfos.pop (); /* If this is duplicated BINFO for base shared by virtual inheritance, we may not have its associated vtable. This is not a problem, since we will walk it on the other path. */ if (!type_binfo) return; tree inner_binfo = get_binfo_at_offset (type_binfo, offset, otr_type); if (!inner_binfo) { gcc_assert (odr_violation_reported); return; } /* For types in anonymous namespace first check if the respective vtable is alive. If not, we know the type can't be called. */ if (!flag_ltrans && anonymous) { tree vtable = BINFO_VTABLE (inner_binfo); varpool_node *vnode; if (TREE_CODE (vtable) == POINTER_PLUS_EXPR) vtable = TREE_OPERAND (TREE_OPERAND (vtable, 0), 0); vnode = varpool_node::get (vtable); if (!vnode || !vnode->definition) return; } gcc_assert (inner_binfo); if (bases_to_consider ? !matched_vtables->contains (BINFO_VTABLE (inner_binfo)) : !matched_vtables->add (BINFO_VTABLE (inner_binfo))) { bool can_refer; tree target = gimple_get_virt_method_for_binfo (otr_token, inner_binfo, &can_refer); if (!bases_to_consider) maybe_record_node (nodes, target, inserted, can_refer, completep); /* Destructors are never called via construction vtables. */ else if (!target || !DECL_CXX_DESTRUCTOR_P (target)) bases_to_consider->safe_push (target); } return; } /* Walk bases. */ for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) /* Walking bases that have no virtual method is pointless exercise. */ if (polymorphic_type_binfo_p (base_binfo)) record_target_from_binfo (nodes, bases_to_consider, base_binfo, otr_type, type_binfos, otr_token, outer_type, offset, inserted, matched_vtables, anonymous, completep); if (BINFO_VTABLE (binfo)) type_binfos.pop (); } /* Look up virtual methods matching OTR_TYPE (with OFFSET and OTR_TOKEN) of TYPE, insert them to NODES, recurse into derived nodes. INSERTED is used to avoid duplicate insertions of methods into NODES. MATCHED_VTABLES are used to avoid duplicate walking vtables. Clear COMPLETEP if unreferable target is found. If CONSIDER_CONSTRUCTION is true, record to BASES_TO_CONSIDER all cases where BASE_SKIPPED is true (because the base is abstract class). */ static void possible_polymorphic_call_targets_1 (vec <cgraph_node *> &nodes, hash_set<tree> *inserted, hash_set<tree> *matched_vtables, tree otr_type, odr_type type, HOST_WIDE_INT otr_token, tree outer_type, HOST_WIDE_INT offset, bool *completep, vec <tree> &bases_to_consider, bool consider_construction) { tree binfo = TYPE_BINFO (type->type); unsigned int i; auto_vec <tree, 8> type_binfos; bool possibly_instantiated = type_possibly_instantiated_p (type->type); /* We may need to consider types w/o instances because of possible derived types using their methods either directly or via construction vtables. We are safe to skip them when all derivations are known, since we will handle them later. This is done by recording them to BASES_TO_CONSIDER array. */ if (possibly_instantiated || consider_construction) { record_target_from_binfo (nodes, (!possibly_instantiated && type_all_derivations_known_p (type->type)) ? &bases_to_consider : NULL, binfo, otr_type, type_binfos, otr_token, outer_type, offset, inserted, matched_vtables, type->anonymous_namespace, completep); } for (i = 0; i < type->derived_types.length (); i++) possible_polymorphic_call_targets_1 (nodes, inserted, matched_vtables, otr_type, type->derived_types[i], otr_token, outer_type, offset, completep, bases_to_consider, consider_construction); } /* Cache of queries for polymorphic call targets. Enumerating all call targets may get expensive when there are many polymorphic calls in the program, so we memoize all the previous queries and avoid duplicated work. */ struct polymorphic_call_target_d { HOST_WIDE_INT otr_token; ipa_polymorphic_call_context context; odr_type type; vec <cgraph_node *> targets; tree decl_warning; int type_warning; bool complete; bool speculative; }; /* Polymorphic call target cache helpers. */ struct polymorphic_call_target_hasher : pointer_hash <polymorphic_call_target_d> { static inline hashval_t hash (const polymorphic_call_target_d *); static inline bool equal (const polymorphic_call_target_d *, const polymorphic_call_target_d *); static inline void remove (polymorphic_call_target_d *); }; /* Return the computed hashcode for ODR_QUERY. */ inline hashval_t polymorphic_call_target_hasher::hash (const polymorphic_call_target_d *odr_query) { inchash::hash hstate (odr_query->otr_token); hstate.add_wide_int (odr_query->type->id); hstate.merge_hash (TYPE_UID (odr_query->context.outer_type)); hstate.add_wide_int (odr_query->context.offset); if (odr_query->context.speculative_outer_type) { hstate.merge_hash (TYPE_UID (odr_query->context.speculative_outer_type)); hstate.add_wide_int (odr_query->context.speculative_offset); } hstate.add_flag (odr_query->speculative); hstate.add_flag (odr_query->context.maybe_in_construction); hstate.add_flag (odr_query->context.maybe_derived_type); hstate.add_flag (odr_query->context.speculative_maybe_derived_type); hstate.commit_flag (); return hstate.end (); } /* Compare cache entries T1 and T2. */ inline bool polymorphic_call_target_hasher::equal (const polymorphic_call_target_d *t1, const polymorphic_call_target_d *t2) { return (t1->type == t2->type && t1->otr_token == t2->otr_token && t1->speculative == t2->speculative && t1->context.offset == t2->context.offset && t1->context.speculative_offset == t2->context.speculative_offset && t1->context.outer_type == t2->context.outer_type && t1->context.speculative_outer_type == t2->context.speculative_outer_type && t1->context.maybe_in_construction == t2->context.maybe_in_construction && t1->context.maybe_derived_type == t2->context.maybe_derived_type && (t1->context.speculative_maybe_derived_type == t2->context.speculative_maybe_derived_type)); } /* Remove entry in polymorphic call target cache hash. */ inline void polymorphic_call_target_hasher::remove (polymorphic_call_target_d *v) { v->targets.release (); free (v); } /* Polymorphic call target query cache. */ typedef hash_table<polymorphic_call_target_hasher> polymorphic_call_target_hash_type; static polymorphic_call_target_hash_type *polymorphic_call_target_hash; /* Destroy polymorphic call target query cache. */ static void free_polymorphic_call_targets_hash () { if (cached_polymorphic_call_targets) { delete polymorphic_call_target_hash; polymorphic_call_target_hash = NULL; delete cached_polymorphic_call_targets; cached_polymorphic_call_targets = NULL; } } /* When virtual function is removed, we may need to flush the cache. */ static void devirt_node_removal_hook (struct cgraph_node *n, void *d ATTRIBUTE_UNUSED) { if (cached_polymorphic_call_targets && cached_polymorphic_call_targets->contains (n)) free_polymorphic_call_targets_hash (); } /* Look up base of BINFO that has virtual table VTABLE with OFFSET. */ tree subbinfo_with_vtable_at_offset (tree binfo, unsigned HOST_WIDE_INT offset, tree vtable) { tree v = BINFO_VTABLE (binfo); int i; tree base_binfo; unsigned HOST_WIDE_INT this_offset; if (v) { if (!vtable_pointer_value_to_vtable (v, &v, &this_offset)) gcc_unreachable (); if (offset == this_offset && DECL_ASSEMBLER_NAME (v) == DECL_ASSEMBLER_NAME (vtable)) return binfo; } for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) if (polymorphic_type_binfo_p (base_binfo)) { base_binfo = subbinfo_with_vtable_at_offset (base_binfo, offset, vtable); if (base_binfo) return base_binfo; } return NULL; } /* T is known constant value of virtual table pointer. Store virtual table to V and its offset to OFFSET. Return false if T does not look like virtual table reference. */ bool vtable_pointer_value_to_vtable (const_tree t, tree *v, unsigned HOST_WIDE_INT *offset) { /* We expect &MEM[(void *)&virtual_table + 16B]. We obtain object's BINFO from the context of the virtual table. This one contains pointer to virtual table represented via POINTER_PLUS_EXPR. Verify that this pointer matches what we propagated through. In the case of virtual inheritance, the virtual tables may be nested, i.e. the offset may be different from 16 and we may need to dive into the type representation. */ if (TREE_CODE (t) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == MEM_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 1)) == INTEGER_CST && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 0)) == VAR_DECL) && DECL_VIRTUAL_P (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 0))) { *v = TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 0); *offset = tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (t, 0), 1)); return true; } /* Alternative representation, used by C++ frontend is POINTER_PLUS_EXPR. We need to handle it when T comes from static variable initializer or BINFO. */ if (TREE_CODE (t) == POINTER_PLUS_EXPR) { *offset = tree_to_uhwi (TREE_OPERAND (t, 1)); t = TREE_OPERAND (t, 0); } else *offset = 0; if (TREE_CODE (t) != ADDR_EXPR) return false; *v = TREE_OPERAND (t, 0); return true; } /* T is known constant value of virtual table pointer. Return BINFO of the instance type. */ tree vtable_pointer_value_to_binfo (const_tree t) { tree vtable; unsigned HOST_WIDE_INT offset; if (!vtable_pointer_value_to_vtable (t, &vtable, &offset)) return NULL_TREE; /* FIXME: for stores of construction vtables we return NULL, because we do not have BINFO for those. Eventually we should fix our representation to allow this case to be handled, too. In the case we see store of BINFO we however may assume that standard folding will be able to cope with it. */ return subbinfo_with_vtable_at_offset (TYPE_BINFO (DECL_CONTEXT (vtable)), offset, vtable); } /* Walk bases of OUTER_TYPE that contain OTR_TYPE at OFFSET. Look up their respective virtual methods for OTR_TOKEN and OTR_TYPE and insert them in NODES. MATCHED_VTABLES and INSERTED is used to avoid duplicated work. */ static void record_targets_from_bases (tree otr_type, HOST_WIDE_INT otr_token, tree outer_type, HOST_WIDE_INT offset, vec <cgraph_node *> &nodes, hash_set<tree> *inserted, hash_set<tree> *matched_vtables, bool *completep) { while (true) { HOST_WIDE_INT pos, size; tree base_binfo; tree fld; if (types_same_for_odr (outer_type, otr_type)) return; for (fld = TYPE_FIELDS (outer_type); fld; fld = DECL_CHAIN (fld)) { if (TREE_CODE (fld) != FIELD_DECL) continue; pos = int_bit_position (fld); size = tree_to_shwi (DECL_SIZE (fld)); if (pos <= offset && (pos + size) > offset /* Do not get confused by zero sized bases. */ && polymorphic_type_binfo_p (TYPE_BINFO (TREE_TYPE (fld)))) break; } /* Within a class type we should always find corresponding fields. */ gcc_assert (fld && TREE_CODE (TREE_TYPE (fld)) == RECORD_TYPE); /* Nonbase types should have been stripped by outer_class_type. */ gcc_assert (DECL_ARTIFICIAL (fld)); outer_type = TREE_TYPE (fld); offset -= pos; base_binfo = get_binfo_at_offset (TYPE_BINFO (outer_type), offset, otr_type); if (!base_binfo) { gcc_assert (odr_violation_reported); return; } gcc_assert (base_binfo); if (!matched_vtables->add (BINFO_VTABLE (base_binfo))) { bool can_refer; tree target = gimple_get_virt_method_for_binfo (otr_token, base_binfo, &can_refer); if (!target || ! DECL_CXX_DESTRUCTOR_P (target)) maybe_record_node (nodes, target, inserted, can_refer, completep); matched_vtables->add (BINFO_VTABLE (base_binfo)); } } } /* When virtual table is removed, we may need to flush the cache. */ static void devirt_variable_node_removal_hook (varpool_node *n, void *d ATTRIBUTE_UNUSED) { if (cached_polymorphic_call_targets && DECL_VIRTUAL_P (n->decl) && type_in_anonymous_namespace_p (DECL_CONTEXT (n->decl))) free_polymorphic_call_targets_hash (); } /* Record about how many calls would benefit from given type to be final. */ struct odr_type_warn_count { tree type; int count; gcov_type dyn_count; }; /* Record about how many calls would benefit from given method to be final. */ struct decl_warn_count { tree decl; int count; gcov_type dyn_count; }; /* Information about type and decl warnings. */ struct final_warning_record { gcov_type dyn_count; vec<odr_type_warn_count> type_warnings; hash_map<tree, decl_warn_count> decl_warnings; }; struct final_warning_record *final_warning_records; /* Return vector containing possible targets of polymorphic call of type OTR_TYPE calling method OTR_TOKEN within type of OTR_OUTER_TYPE and OFFSET. If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing OTR_TYPE and include their virtual method. This is useful for types possibly in construction or destruction where the virtual table may temporarily change to one of base types. INCLUDE_DERIVER_TYPES make us to walk the inheritance graph for all derivations. If COMPLETEP is non-NULL, store true if the list is complete. CACHE_TOKEN (if non-NULL) will get stored to an unique ID of entry in the target cache. If user needs to visit every target list just once, it can memoize them. If SPECULATIVE is set, the list will not contain targets that are not speculatively taken. Returned vector is placed into cache. It is NOT caller's responsibility to free it. The vector can be freed on cgraph_remove_node call if the particular node is a virtual function present in the cache. */ vec <cgraph_node *> possible_polymorphic_call_targets (tree otr_type, HOST_WIDE_INT otr_token, ipa_polymorphic_call_context context, bool *completep, void **cache_token, bool speculative) { static struct cgraph_node_hook_list *node_removal_hook_holder; vec <cgraph_node *> nodes = vNULL; auto_vec <tree, 8> bases_to_consider; odr_type type, outer_type; polymorphic_call_target_d key; polymorphic_call_target_d **slot; unsigned int i; tree binfo, target; bool complete; bool can_refer = false; bool skipped = false; otr_type = TYPE_MAIN_VARIANT (otr_type); /* If ODR is not initialized or the context is invalid, return empty incomplete list. */ if (!odr_hash || context.invalid || !TYPE_BINFO (otr_type)) { if (completep) *completep = context.invalid; if (cache_token) *cache_token = NULL; return nodes; } /* Do not bother to compute speculative info when user do not asks for it. */ if (!speculative || !context.speculative_outer_type) context.clear_speculation (); type = get_odr_type (otr_type, true); /* Recording type variants would waste results cache. */ gcc_assert (!context.outer_type || TYPE_MAIN_VARIANT (context.outer_type) == context.outer_type); /* Look up the outer class type we want to walk. If we fail to do so, the context is invalid. */ if ((context.outer_type || context.speculative_outer_type) && !context.restrict_to_inner_class (otr_type)) { if (completep) *completep = true; if (cache_token) *cache_token = NULL; return nodes; } gcc_assert (!context.invalid); /* Check that restrict_to_inner_class kept the main variant. */ gcc_assert (!context.outer_type || TYPE_MAIN_VARIANT (context.outer_type) == context.outer_type); /* We canonicalize our query, so we do not need extra hashtable entries. */ /* Without outer type, we have no use for offset. Just do the basic search from inner type. */ if (!context.outer_type) context.clear_outer_type (otr_type); /* We need to update our hierarchy if the type does not exist. */ outer_type = get_odr_type (context.outer_type, true); /* If the type is complete, there are no derivations. */ if (TYPE_FINAL_P (outer_type->type)) context.maybe_derived_type = false; /* Initialize query cache. */ if (!cached_polymorphic_call_targets) { cached_polymorphic_call_targets = new hash_set<cgraph_node *>; polymorphic_call_target_hash = new polymorphic_call_target_hash_type (23); if (!node_removal_hook_holder) { node_removal_hook_holder = symtab->add_cgraph_removal_hook (&devirt_node_removal_hook, NULL); symtab->add_varpool_removal_hook (&devirt_variable_node_removal_hook, NULL); } } if (in_lto_p) { if (context.outer_type != otr_type) context.outer_type = get_odr_type (context.outer_type, true)->type; if (context.speculative_outer_type) context.speculative_outer_type = get_odr_type (context.speculative_outer_type, true)->type; } /* Look up cached answer. */ key.type = type; key.otr_token = otr_token; key.speculative = speculative; key.context = context; slot = polymorphic_call_target_hash->find_slot (&key, INSERT); if (cache_token) *cache_token = (void *)*slot; if (*slot) { if (completep) *completep = (*slot)->complete; if ((*slot)->type_warning && final_warning_records) { final_warning_records->type_warnings[(*slot)->type_warning - 1].count++; final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count += final_warning_records->dyn_count; } if (!speculative && (*slot)->decl_warning && final_warning_records) { struct decl_warn_count *c = final_warning_records->decl_warnings.get ((*slot)->decl_warning); c->count++; c->dyn_count += final_warning_records->dyn_count; } return (*slot)->targets; } complete = true; /* Do actual search. */ timevar_push (TV_IPA_VIRTUAL_CALL); *slot = XCNEW (polymorphic_call_target_d); if (cache_token) *cache_token = (void *)*slot; (*slot)->type = type; (*slot)->otr_token = otr_token; (*slot)->context = context; (*slot)->speculative = speculative; hash_set<tree> inserted; hash_set<tree> matched_vtables; /* First insert targets we speculatively identified as likely. */ if (context.speculative_outer_type) { odr_type speculative_outer_type; bool speculation_complete = true; /* First insert target from type itself and check if it may have derived types. */ speculative_outer_type = get_odr_type (context.speculative_outer_type, true); if (TYPE_FINAL_P (speculative_outer_type->type)) context.speculative_maybe_derived_type = false; binfo = get_binfo_at_offset (TYPE_BINFO (speculative_outer_type->type), context.speculative_offset, otr_type); if (binfo) target = gimple_get_virt_method_for_binfo (otr_token, binfo, &can_refer); else target = NULL; /* In the case we get complete method, we don't need to walk derivations. */ if (target && DECL_FINAL_P (target)) context.speculative_maybe_derived_type = false; if (type_possibly_instantiated_p (speculative_outer_type->type)) maybe_record_node (nodes, target, &inserted, can_refer, &speculation_complete); if (binfo) matched_vtables.add (BINFO_VTABLE (binfo)); /* Next walk recursively all derived types. */ if (context.speculative_maybe_derived_type) for (i = 0; i < speculative_outer_type->derived_types.length(); i++) possible_polymorphic_call_targets_1 (nodes, &inserted, &matched_vtables, otr_type, speculative_outer_type->derived_types[i], otr_token, speculative_outer_type->type, context.speculative_offset, &speculation_complete, bases_to_consider, false); } if (!speculative || !nodes.length ()) { /* First see virtual method of type itself. */ binfo = get_binfo_at_offset (TYPE_BINFO (outer_type->type), context.offset, otr_type); if (binfo) target = gimple_get_virt_method_for_binfo (otr_token, binfo, &can_refer); else { gcc_assert (odr_violation_reported); target = NULL; } /* Destructors are never called through construction virtual tables, because the type is always known. */ if (target && DECL_CXX_DESTRUCTOR_P (target)) context.maybe_in_construction = false; if (target) { /* In the case we get complete method, we don't need to walk derivations. */ if (DECL_FINAL_P (target)) context.maybe_derived_type = false; } /* If OUTER_TYPE is abstract, we know we are not seeing its instance. */ if (type_possibly_instantiated_p (outer_type->type)) maybe_record_node (nodes, target, &inserted, can_refer, &complete); else skipped = true; if (binfo) matched_vtables.add (BINFO_VTABLE (binfo)); /* Next walk recursively all derived types. */ if (context.maybe_derived_type) { for (i = 0; i < outer_type->derived_types.length(); i++) possible_polymorphic_call_targets_1 (nodes, &inserted, &matched_vtables, otr_type, outer_type->derived_types[i], otr_token, outer_type->type, context.offset, &complete, bases_to_consider, context.maybe_in_construction); if (!outer_type->all_derivations_known) { if (!speculative && final_warning_records) { if (complete && nodes.length () == 1 && warn_suggest_final_types && !outer_type->derived_types.length ()) { if (outer_type->id >= (int)final_warning_records->type_warnings.length ()) final_warning_records->type_warnings.safe_grow_cleared (odr_types.length ()); final_warning_records->type_warnings[outer_type->id].count++; final_warning_records->type_warnings[outer_type->id].dyn_count += final_warning_records->dyn_count; final_warning_records->type_warnings[outer_type->id].type = outer_type->type; (*slot)->type_warning = outer_type->id + 1; } if (complete && warn_suggest_final_methods && nodes.length () == 1 && types_same_for_odr (DECL_CONTEXT (nodes[0]->decl), outer_type->type)) { bool existed; struct decl_warn_count &c = final_warning_records->decl_warnings.get_or_insert (nodes[0]->decl, &existed); if (existed) { c.count++; c.dyn_count += final_warning_records->dyn_count; } else { c.count = 1; c.dyn_count = final_warning_records->dyn_count; c.decl = nodes[0]->decl; } (*slot)->decl_warning = nodes[0]->decl; } } complete = false; } } if (!speculative) { /* Destructors are never called through construction virtual tables, because the type is always known. One of entries may be cxa_pure_virtual so look to at least two of them. */ if (context.maybe_in_construction) for (i =0 ; i < MIN (nodes.length (), 2); i++) if (DECL_CXX_DESTRUCTOR_P (nodes[i]->decl)) context.maybe_in_construction = false; if (context.maybe_in_construction) { if (type != outer_type && (!skipped || (context.maybe_derived_type && !type_all_derivations_known_p (outer_type->type)))) record_targets_from_bases (otr_type, otr_token, outer_type->type, context.offset, nodes, &inserted, &matched_vtables, &complete); if (skipped) maybe_record_node (nodes, target, &inserted, can_refer, &complete); for (i = 0; i < bases_to_consider.length(); i++) maybe_record_node (nodes, bases_to_consider[i], &inserted, can_refer, &complete); } } } (*slot)->targets = nodes; (*slot)->complete = complete; if (completep) *completep = complete; timevar_pop (TV_IPA_VIRTUAL_CALL); return nodes; } bool add_decl_warning (const tree &key ATTRIBUTE_UNUSED, const decl_warn_count &value, vec<const decl_warn_count*> *vec) { vec->safe_push (&value); return true; } /* Dump target list TARGETS into FILE. */ static void dump_targets (FILE *f, vec <cgraph_node *> targets) { unsigned int i; for (i = 0; i < targets.length (); i++) { char *name = NULL; if (in_lto_p) name = cplus_demangle_v3 (targets[i]->asm_name (), 0); fprintf (f, " %s/%i", name ? name : targets[i]->name (), targets[i]->order); if (in_lto_p) free (name); if (!targets[i]->definition) fprintf (f, " (no definition%s)", DECL_DECLARED_INLINE_P (targets[i]->decl) ? " inline" : ""); } fprintf (f, "\n"); } /* Dump all possible targets of a polymorphic call. */ void dump_possible_polymorphic_call_targets (FILE *f, tree otr_type, HOST_WIDE_INT otr_token, const ipa_polymorphic_call_context &ctx) { vec <cgraph_node *> targets; bool final; odr_type type = get_odr_type (TYPE_MAIN_VARIANT (otr_type), false); unsigned int len; if (!type) return; targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final, NULL, false); fprintf (f, " Targets of polymorphic call of type %i:", type->id); print_generic_expr (f, type->type, TDF_SLIM); fprintf (f, " token %i\n", (int)otr_token); ctx.dump (f); fprintf (f, " %s%s%s%s\n ", final ? "This is a complete list." : "This is partial list; extra targets may be defined in other units.", ctx.maybe_in_construction ? " (base types included)" : "", ctx.maybe_derived_type ? " (derived types included)" : "", ctx.speculative_maybe_derived_type ? " (speculative derived types included)" : ""); len = targets.length (); dump_targets (f, targets); targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final, NULL, true); if (targets.length () != len) { fprintf (f, " Speculative targets:"); dump_targets (f, targets); } gcc_assert (targets.length () <= len); fprintf (f, "\n"); } /* Return true if N can be possibly target of a polymorphic call of OTR_TYPE/OTR_TOKEN. */ bool possible_polymorphic_call_target_p (tree otr_type, HOST_WIDE_INT otr_token, const ipa_polymorphic_call_context &ctx, struct cgraph_node *n) { vec <cgraph_node *> targets; unsigned int i; enum built_in_function fcode; bool final; if (TREE_CODE (TREE_TYPE (n->decl)) == FUNCTION_TYPE && ((fcode = DECL_FUNCTION_CODE (n->decl)) == BUILT_IN_UNREACHABLE || fcode == BUILT_IN_TRAP)) return true; if (!odr_hash) return true; targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final); for (i = 0; i < targets.length (); i++) if (n->semantically_equivalent_p (targets[i])) return true; /* At a moment we allow middle end to dig out new external declarations as a targets of polymorphic calls. */ if (!final && !n->definition) return true; return false; } /* Return true if N can be possibly target of a polymorphic call of OBJ_TYPE_REF expression REF in STMT. */ bool possible_polymorphic_call_target_p (tree ref, gimple stmt, struct cgraph_node *n) { ipa_polymorphic_call_context context (current_function_decl, ref, stmt); tree call_fn = gimple_call_fn (stmt); return possible_polymorphic_call_target_p (obj_type_ref_class (call_fn), tree_to_uhwi (OBJ_TYPE_REF_TOKEN (call_fn)), context, n); } /* After callgraph construction new external nodes may appear. Add them into the graph. */ void update_type_inheritance_graph (void) { struct cgraph_node *n; if (!odr_hash) return; free_polymorphic_call_targets_hash (); timevar_push (TV_IPA_INHERITANCE); /* We reconstruct the graph starting from types of all methods seen in the the unit. */ FOR_EACH_FUNCTION (n) if (DECL_VIRTUAL_P (n->decl) && !n->definition && n->real_symbol_p ()) get_odr_type (TYPE_METHOD_BASETYPE (TREE_TYPE (n->decl)), true); timevar_pop (TV_IPA_INHERITANCE); } /* Return true if N looks like likely target of a polymorphic call. Rule out cxa_pure_virtual, noreturns, function declared cold and other obvious cases. */ bool likely_target_p (struct cgraph_node *n) { int flags; /* cxa_pure_virtual and similar things are not likely. */ if (TREE_CODE (TREE_TYPE (n->decl)) != METHOD_TYPE) return false; flags = flags_from_decl_or_type (n->decl); if (flags & ECF_NORETURN) return false; if (lookup_attribute ("cold", DECL_ATTRIBUTES (n->decl))) return false; if (n->frequency < NODE_FREQUENCY_NORMAL) return false; /* If there are no live virtual tables referring the target, the only way the target can be called is an instance coming from other compilation unit; speculative devirtualization is built around an assumption that won't happen. */ if (!referenced_from_vtable_p (n)) return false; return true; } /* Compare type warning records P1 and P2 and choose one with larger count; helper for qsort. */ int type_warning_cmp (const void *p1, const void *p2) { const odr_type_warn_count *t1 = (const odr_type_warn_count *)p1; const odr_type_warn_count *t2 = (const odr_type_warn_count *)p2; if (t1->dyn_count < t2->dyn_count) return 1; if (t1->dyn_count > t2->dyn_count) return -1; return t2->count - t1->count; } /* Compare decl warning records P1 and P2 and choose one with larger count; helper for qsort. */ int decl_warning_cmp (const void *p1, const void *p2) { const decl_warn_count *t1 = *(const decl_warn_count * const *)p1; const decl_warn_count *t2 = *(const decl_warn_count * const *)p2; if (t1->dyn_count < t2->dyn_count) return 1; if (t1->dyn_count > t2->dyn_count) return -1; return t2->count - t1->count; } /* Try to speculatively devirtualize call to OTR_TYPE with OTR_TOKEN with context CTX. */ struct cgraph_node * try_speculative_devirtualization (tree otr_type, HOST_WIDE_INT otr_token, ipa_polymorphic_call_context ctx) { vec <cgraph_node *>targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, NULL, NULL, true); unsigned int i; struct cgraph_node *likely_target = NULL; for (i = 0; i < targets.length (); i++) if (likely_target_p (targets[i])) { if (likely_target) return NULL; likely_target = targets[i]; } if (!likely_target ||!likely_target->definition || DECL_EXTERNAL (likely_target->decl)) return NULL; /* Don't use an implicitly-declared destructor (c++/58678). */ struct cgraph_node *non_thunk_target = likely_target->function_symbol (); if (DECL_ARTIFICIAL (non_thunk_target->decl)) return NULL; if (likely_target->get_availability () <= AVAIL_INTERPOSABLE && likely_target->can_be_discarded_p ()) return NULL; return likely_target; } /* The ipa-devirt pass. When polymorphic call has only one likely target in the unit, turn it into a speculative call. */ static unsigned int ipa_devirt (void) { struct cgraph_node *n; hash_set<void *> bad_call_targets; struct cgraph_edge *e; int npolymorphic = 0, nspeculated = 0, nconverted = 0, ncold = 0; int nmultiple = 0, noverwritable = 0, ndevirtualized = 0, nnotdefined = 0; int nwrong = 0, nok = 0, nexternal = 0, nartificial = 0; int ndropped = 0; if (!odr_types_ptr) return 0; if (dump_file) dump_type_inheritance_graph (dump_file); /* We can output -Wsuggest-final-methods and -Wsuggest-final-types warnings. This is implemented by setting up final_warning_records that are updated by get_polymorphic_call_targets. We need to clear cache in this case to trigger recomputation of all entries. */ if (warn_suggest_final_methods || warn_suggest_final_types) { final_warning_records = new (final_warning_record); final_warning_records->type_warnings = vNULL; final_warning_records->type_warnings.safe_grow_cleared (odr_types.length ()); free_polymorphic_call_targets_hash (); } FOR_EACH_DEFINED_FUNCTION (n) { bool update = false; if (!opt_for_fn (n->decl, flag_devirtualize)) continue; if (dump_file && n->indirect_calls) fprintf (dump_file, "\n\nProcesing function %s/%i\n", n->name (), n->order); for (e = n->indirect_calls; e; e = e->next_callee) if (e->indirect_info->polymorphic) { struct cgraph_node *likely_target = NULL; void *cache_token; bool final; if (final_warning_records) final_warning_records->dyn_count = e->count; vec <cgraph_node *>targets = possible_polymorphic_call_targets (e, &final, &cache_token, true); unsigned int i; /* Trigger warnings by calculating non-speculative targets. */ if (warn_suggest_final_methods || warn_suggest_final_types) possible_polymorphic_call_targets (e); if (dump_file) dump_possible_polymorphic_call_targets (dump_file, e); npolymorphic++; /* See if the call can be devirtualized by means of ipa-prop's polymorphic call context propagation. If not, we can just forget about this call being polymorphic and avoid some heavy lifting in remove_unreachable_nodes that will otherwise try to keep all possible targets alive until inlining and in the inliner itself. This may need to be revisited once we add further ways to use the may edges, but it is a resonable thing to do right now. */ if ((e->indirect_info->param_index == -1 || (!opt_for_fn (n->decl, flag_devirtualize_speculatively) && e->indirect_info->vptr_changed)) && !flag_ltrans_devirtualize) { e->indirect_info->polymorphic = false; ndropped++; if (dump_file) fprintf (dump_file, "Dropping polymorphic call info;" " it can not be used by ipa-prop\n"); } if (!opt_for_fn (n->decl, flag_devirtualize_speculatively)) continue; if (!e->maybe_hot_p ()) { if (dump_file) fprintf (dump_file, "Call is cold\n\n"); ncold++; continue; } if (e->speculative) { if (dump_file) fprintf (dump_file, "Call is already speculated\n\n"); nspeculated++; /* When dumping see if we agree with speculation. */ if (!dump_file) continue; } if (bad_call_targets.contains (cache_token)) { if (dump_file) fprintf (dump_file, "Target list is known to be useless\n\n"); nmultiple++; continue; } for (i = 0; i < targets.length (); i++) if (likely_target_p (targets[i])) { if (likely_target) { likely_target = NULL; if (dump_file) fprintf (dump_file, "More than one likely target\n\n"); nmultiple++; break; } likely_target = targets[i]; } if (!likely_target) { bad_call_targets.add (cache_token); continue; } /* This is reached only when dumping; check if we agree or disagree with the speculation. */ if (e->speculative) { struct cgraph_edge *e2; struct ipa_ref *ref; e->speculative_call_info (e2, e, ref); if (e2->callee->ultimate_alias_target () == likely_target->ultimate_alias_target ()) { fprintf (dump_file, "We agree with speculation\n\n"); nok++; } else { fprintf (dump_file, "We disagree with speculation\n\n"); nwrong++; } continue; } if (!likely_target->definition) { if (dump_file) fprintf (dump_file, "Target is not a definition\n\n"); nnotdefined++; continue; } /* Do not introduce new references to external symbols. While we can handle these just well, it is common for programs to incorrectly with headers defining methods they are linked with. */ if (DECL_EXTERNAL (likely_target->decl)) { if (dump_file) fprintf (dump_file, "Target is external\n\n"); nexternal++; continue; } /* Don't use an implicitly-declared destructor (c++/58678). */ struct cgraph_node *non_thunk_target = likely_target->function_symbol (); if (DECL_ARTIFICIAL (non_thunk_target->decl)) { if (dump_file) fprintf (dump_file, "Target is artificial\n\n"); nartificial++; continue; } if (likely_target->get_availability () <= AVAIL_INTERPOSABLE && likely_target->can_be_discarded_p ()) { if (dump_file) fprintf (dump_file, "Target is overwritable\n\n"); noverwritable++; continue; } else if (dbg_cnt (devirt)) { if (dump_enabled_p ()) { location_t locus = gimple_location_safe (e->call_stmt); dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, locus, "speculatively devirtualizing call in %s/%i to %s/%i\n", n->name (), n->order, likely_target->name (), likely_target->order); } if (!likely_target->can_be_discarded_p ()) { cgraph_node *alias; alias = dyn_cast<cgraph_node *> (likely_target->noninterposable_alias ()); if (alias) likely_target = alias; } nconverted++; update = true; e->make_speculative (likely_target, e->count * 8 / 10, e->frequency * 8 / 10); } } if (update) inline_update_overall_summary (n); } if (warn_suggest_final_methods || warn_suggest_final_types) { if (warn_suggest_final_types) { final_warning_records->type_warnings.qsort (type_warning_cmp); for (unsigned int i = 0; i < final_warning_records->type_warnings.length (); i++) if (final_warning_records->type_warnings[i].count) { tree type = final_warning_records->type_warnings[i].type; int count = final_warning_records->type_warnings[i].count; long long dyn_count = final_warning_records->type_warnings[i].dyn_count; if (!dyn_count) warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)), OPT_Wsuggest_final_types, count, "Declaring type %qD final " "would enable devirtualization of %i call", "Declaring type %qD final " "would enable devirtualization of %i calls", type, count); else warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)), OPT_Wsuggest_final_types, count, "Declaring type %qD final " "would enable devirtualization of %i call " "executed %lli times", "Declaring type %qD final " "would enable devirtualization of %i calls " "executed %lli times", type, count, dyn_count); } } if (warn_suggest_final_methods) { vec<const decl_warn_count*> decl_warnings_vec = vNULL; final_warning_records->decl_warnings.traverse <vec<const decl_warn_count *> *, add_decl_warning> (&decl_warnings_vec); decl_warnings_vec.qsort (decl_warning_cmp); for (unsigned int i = 0; i < decl_warnings_vec.length (); i++) { tree decl = decl_warnings_vec[i]->decl; int count = decl_warnings_vec[i]->count; long long dyn_count = decl_warnings_vec[i]->dyn_count; if (!dyn_count) if (DECL_CXX_DESTRUCTOR_P (decl)) warning_n (DECL_SOURCE_LOCATION (decl), OPT_Wsuggest_final_methods, count, "Declaring virtual destructor of %qD final " "would enable devirtualization of %i call", "Declaring virtual destructor of %qD final " "would enable devirtualization of %i calls", DECL_CONTEXT (decl), count); else warning_n (DECL_SOURCE_LOCATION (decl), OPT_Wsuggest_final_methods, count, "Declaring method %qD final " "would enable devirtualization of %i call", "Declaring method %qD final " "would enable devirtualization of %i calls", decl, count); else if (DECL_CXX_DESTRUCTOR_P (decl)) warning_n (DECL_SOURCE_LOCATION (decl), OPT_Wsuggest_final_methods, count, "Declaring virtual destructor of %qD final " "would enable devirtualization of %i call " "executed %lli times", "Declaring virtual destructor of %qD final " "would enable devirtualization of %i calls " "executed %lli times", DECL_CONTEXT (decl), count, dyn_count); else warning_n (DECL_SOURCE_LOCATION (decl), OPT_Wsuggest_final_methods, count, "Declaring method %qD final " "would enable devirtualization of %i call " "executed %lli times", "Declaring method %qD final " "would enable devirtualization of %i calls " "executed %lli times", decl, count, dyn_count); } } delete (final_warning_records); final_warning_records = 0; } if (dump_file) fprintf (dump_file, "%i polymorphic calls, %i devirtualized," " %i speculatively devirtualized, %i cold\n" "%i have multiple targets, %i overwritable," " %i already speculated (%i agree, %i disagree)," " %i external, %i not defined, %i artificial, %i infos dropped\n", npolymorphic, ndevirtualized, nconverted, ncold, nmultiple, noverwritable, nspeculated, nok, nwrong, nexternal, nnotdefined, nartificial, ndropped); return ndevirtualized || ndropped ? TODO_remove_functions : 0; } namespace { const pass_data pass_data_ipa_devirt = { IPA_PASS, /* type */ "devirt", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_IPA_DEVIRT, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ ( TODO_dump_symtab ), /* todo_flags_finish */ }; class pass_ipa_devirt : public ipa_opt_pass_d { public: pass_ipa_devirt (gcc::context *ctxt) : ipa_opt_pass_d (pass_data_ipa_devirt, ctxt, NULL, /* generate_summary */ NULL, /* write_summary */ NULL, /* read_summary */ NULL, /* write_optimization_summary */ NULL, /* read_optimization_summary */ NULL, /* stmt_fixup */ 0, /* function_transform_todo_flags_start */ NULL, /* function_transform */ NULL) /* variable_transform */ {} /* opt_pass methods: */ virtual bool gate (function *) { /* In LTO, always run the IPA passes and decide on function basis if the pass is enabled. */ if (in_lto_p) return true; return (flag_devirtualize && (flag_devirtualize_speculatively || (warn_suggest_final_methods || warn_suggest_final_types)) && optimize); } virtual unsigned int execute (function *) { return ipa_devirt (); } }; // class pass_ipa_devirt } // anon namespace ipa_opt_pass_d * make_pass_ipa_devirt (gcc::context *ctxt) { return new pass_ipa_devirt (ctxt); } #include "gt-ipa-devirt.h"
nguyentu1602/gcc
gcc/ipa-devirt.c
C
gpl-2.0
126,410
#include "conf.h" int *video_quality = &conf.video_quality; int *video_mode = &conf.video_mode; long def_table1[9]={0x2000,0x38D,0x788,0x5800,0x9C5,0x14B8,0x10000,0x1C6A,0x3C45}; long def_table2[9]={0x1CCD,-0x2E1,-0x579,0x4F33,-0x7EB,-0xF0C,0xE666,-0x170A,-0x2BC6}; long table1[9], table2[9]; void change_video_tables(int a, int b){ int i; for (i=0;i<9;i++) {table1[i]=(def_table1[i]*a)/b; table2[i]=(def_table2[i]*a)/b;} } long CompressionRateTable[]={0x60, 0x5D, 0x5A, 0x57, 0x54, 0x51, 0x4D, 0x48, 0x42, 0x3B, 0x32, 0x29, 0x22, 0x1D, 0x17, 0x14, 0x10, 0xE, 0xB, 9, 7, 6, 5, 4, 3, 2, 1}; void __attribute__((naked,noinline)) movie_record_task(){ asm volatile( "STMFD SP!, {R4,LR}\n" "SUB SP, SP, #4\n" "MOV R4, SP\n" "B loc_FFD3B4C8\n" "loc_FFD3B424:\n" "LDR R3, =0x6E460\n" "LDR R2, [R3]\n" "CMP R2, #0\n" "BNE loc_FFD3B4B4\n" "SUB R3, R12, #2\n" "CMP R3, #9\n" "LDRLS PC, [PC,R3,LSL#2]\n" "B loc_FFD3B4B4\n" ".long loc_FFD3B474\n" ".long loc_FFD3B48C\n" ".long loc_FFD3B494\n" ".long loc_FFD3B49C\n" ".long loc_FFD3B47C\n" ".long loc_FFD3B4A4\n" ".long loc_FFD3B484\n" ".long loc_FFD3B4B4\n" ".long loc_FFD3B4AC\n" ".long loc_FFD3B46C\n" "loc_FFD3B46C:\n" "BL sub_FFD3B560\n" "B loc_FFD3B4B0\n" "loc_FFD3B474:\n" "BL unlock_optical_zoom\n" "BL sub_FFD3B714\n" "B loc_FFD3B4B0\n" "loc_FFD3B47C:\n" "BL sub_FFD3BAE8_my\n" //---------------> "B loc_FFD3B4B0\n" "loc_FFD3B484:\n" "BL sub_FFD3BF1C\n" "B loc_FFD3B4B0\n" "loc_FFD3B48C:\n" "BL sub_FFD3BD80\n" "B loc_FFD3B4B0\n" "loc_FFD3B494:\n" "BL sub_FFD3C08C\n" "B loc_FFD3B4B0\n" "loc_FFD3B49C:\n" "BL sub_FFD3C250\n" "B loc_FFD3B4B0\n" "loc_FFD3B4A4:\n" "BL sub_FFD3BFA4\n" "B loc_FFD3B4B0\n" "loc_FFD3B4AC:\n" "BL sub_FFD3BDD0\n" "loc_FFD3B4B0:\n" "LDR R1, [SP]\n" "loc_FFD3B4B4:\n" "LDR R3, =0x6E394\n" "MOV R2, #0\n" "STR R2, [R1]\n" "LDR R0, [R3]\n" "BL sub_FFC104D8\n" "loc_FFD3B4C8:\n" "LDR R3, =0x6E390\n" "MOV R1, R4\n" "LDR R0, [R3]\n" "MOV R2, #0\n" "BL sub_FFC100C0\n" "LDR R0, [SP]\n" "LDR R12, [R0]\n" "CMP R12, #0xC\n" "MOV R1, R0\n" "BNE loc_FFD3B424\n" "LDR R3, =0x6E38C\n" "LDR R0, [R3]\n" "BL sub_FFC10E54\n" "BL sub_FFC1161C\n" "ADD SP, SP, #4\n" "LDMFD SP!, {R4,PC}\n" ); } void __attribute__((naked,noinline)) sub_FFD3BAE8_my(){ asm volatile( "STMFD SP!, {R4-R11,LR}\n" "LDR R5, =0x6E47C\n" "SUB SP, SP, #0x34\n" "LDR R3, [R5]\n" "CMP R3, #3\n" "MOV R4, R0\n" "MOVEQ R3, #4\n" "STREQ R3, [R5]\n" "LDR R3, =0x6E52C\n" "MOV LR, PC\n" "LDR PC, [R3]\n" "LDR R2, [R5]\n" "CMP R2, #4\n" "BNE loc_FFD3BCAC\n" "ADD R0, SP, #0x30\n" "ADD R1, SP, #0x2C\n" "ADD R2, SP, #0x28\n" "ADD R3, SP, #0x24\n" "BL sub_FFD3D1BC_my\n" //---------------------> "CMP R0, #0\n" "BNE loc_FFD3BB64\n" "LDR R3, =0x6E468\n" "LDR R2, [R3]\n" "CMP R2, #1\n" "BNE loc_FFD3BB78\n" "LDR R2, =0x6E4C0\n" "LDR R1, =0x6E494\n" "LDR R12, [R2]\n" "LDR R3, [R1]\n" "CMP R12, R3\n" "BCC loc_FFD3BB78\n" "loc_FFD3BB64:\n" "BL sub_FFD3BCF8\n" "BL sub_FFD3BEF8\n" "MOV R3, #5\n" "STR R3, [R5]\n" "B loc_FFD3BCAC\n" "loc_FFD3BB78:\n" "LDR R12, =0x6E4C8\n" "LDR R11, =0x6E4D4\n" "LDMIB R4, {R0-R2}\n" "LDR R10, [R12]\n" "LDR R7, [R11]\n" "LDR R4, [SP,#0x2C]\n" "LDR R5, [SP,#0x28]\n" "LDR R6, [SP,#0x24]\n" "LDR R8, =0x6E46C\n" "LDR R3, [SP,#0x30]\n" "ADD R12, SP, #0x20\n" "ADD LR, SP, #0x1C\n" "MOV R9, #1\n" "STMEA SP, {R4-R6,R12}\n" "STR R10, [SP,#0x10]\n" "STR R7, [SP,#0x14]\n" "STR LR, [SP,#0x18]\n" "STR R9, [R8]\n" "BL sub_FFC84328\n" "LDR R3, =0x6E384\n" "MOV R1, #0x3E8\n" "LDR R0, [R3]\n" "BL sub_FFC10C6C\n" "CMP R0, #9\n" "BNE loc_FFD3BBEC\n" "BL sub_FFD3D9CC\n" "LDR R3, =0x6E47C\n" "LDR R0, =0xFFD3BAD0\n" "B loc_FFD3BC04\n" "loc_FFD3BBEC:\n" "LDR R5, [SP,#0x1C]\n" "CMP R5, #0\n" "BEQ loc_FFD3BC10\n" "BL sub_FFD3D9CC\n" "LDR R3, =0x6E47C\n" "LDR R0, =0xFFD3BADC\n" "loc_FFD3BC04:\n" "STR R9, [R3]\n" "BL sub_FFD50948\n" "B loc_FFD3BCAC\n" "loc_FFD3BC10:\n" "BL sub_FFC84494\n" "LDR R0, [SP,#0x30]\n" "LDR R1, [SP,#0x20]\n" "BL sub_FFD3D6F0\n" "LDR R4, =0x6E4C0\n" "LDR R3, [R4]\n" "ADD R3, R3, #1\n" "LDR R0, [SP,#0x20]\n" "MOV R1, R11\n" "STR R3, [R4]\n" "MOV R2, R5\n" "BL sub_FFD3C5AC_my\n" //----------------------> "LDR R3, =0x6E4E0\n" "LDR R1, [R4]\n" "LDR R2, [R3]\n" "LDR R12, =0x6E4DC\n" "MUL R0, R2, R1\n" "LDR R1, [R12]\n" "BL sub_FFEDC0F0\n" "LDR R7, =0x6E4D8\n" "LDR R3, [R7]\n" "MOV R4, R0\n" "CMP R3, R4\n" "BNE loc_FFD3BC84\n" "LDR R6, =0x6E470\n" "LDR R3, [R6]\n" "CMP R3, #1\n" "BNE loc_FFD3BCA0\n" "B loc_FFD3BC88\n" "loc_FFD3BC84:\n" "LDR R6, =0x6E470\n" "loc_FFD3BC88:\n" "LDR R3, =0x6E510\n" "MOV R0, R4\n" "MOV LR, PC\n" "LDR PC, [R3]\n" "STR R5, [R6]\n" "STR R4, [R7]\n" "loc_FFD3BCA0:\n" "LDR R2, =0x6E46C\n" "MOV R3, #0\n" "STR R3, [R2]\n" "loc_FFD3BCAC:\n" "ADD SP, SP, #0x34\n" "LDMFD SP!, {R4-R11,PC}\n" ); } void __attribute__((naked,noinline)) sub_FFD3D1BC_my(){ asm volatile( "STMFD SP!, {R4-R11,LR}\n" "LDR R5, =0x6E7D4\n" "SUB SP, SP, #0x14\n" "LDR LR, [R5]\n" "LDR R12, =0x6E7EC\n" "ADD LR, LR, #1\n" "LDR R4, [R12]\n" "STR LR, [R5]\n" "LDR R12, =0x6E86C\n" "STR R0, [SP,#0x10]\n" "STR R1, [SP,#0xC]\n" "STR R2, [SP,#8]\n" "STR R3, [SP,#4]\n" "CMP LR, R4\n" "LDR R11, [R12]\n" "MOVHI R0, #0x80000001\n" "BHI loc_FFD3D6A4\n" "LDR R3, =0x6E850\n" "MOV R0, LR\n" "LDR R1, [R3]\n" "BL sub_FFEDC780\n" "CMP R0, #1\n" "BNE loc_FFD3D3DC\n" "LDR R0, =0x6E874\n" "LDR R1, =0x6E7C0\n" "LDR R3, [R0]\n" "LDR R2, [R1]\n" "CMP R3, R2\n" "LDREQ R3, =0x6E870\n" "LDREQ R5, [R3]\n" "MOVNE R5, R2\n" "LDR R3, =0x6E7D4\n" "LDR R2, =0x6E850\n" "LDR R0, [R3]\n" "LDR R1, [R2]\n" "BL sub_FFEDC0F0\n" "LDR R3, =0x6E7C8\n" "ADD R0, R0, #1\n" "AND R0, R0, #1\n" "STR R5, [R3,R0,LSL#2]\n" "LDR R3, =0x6E7BC\n" "LDR R2, [R3]\n" "CMP R5, R2\n" "BHI loc_FFD3D28C\n" "LDR R4, =0x6E80C\n" "LDR R3, [R4]\n" "ADD R3, R5, R3\n" "ADD R3, R3, #8\n" "CMP R2, R3\n" "BCS loc_FFD3D290\n" "loc_FFD3D284:\n" "MOV R0, #0x80000003\n" "B loc_FFD3D6A4\n" "loc_FFD3D28C:\n" "LDR R4, =0x6E80C\n" "loc_FFD3D290:\n" "LDR R3, [R4]\n" "LDR R2, =0x6E874\n" "ADD R1, R5, R3\n" "LDR R3, [R2]\n" "ADD R2, R1, #8\n" "CMP R2, R3\n" "BLS loc_FFD3D2DC\n" "LDR R2, =0x6E870\n" "LDR R0, =0x6E7BC\n" "RSB R3, R3, R1\n" "LDR R1, [R2]\n" "ADD R3, R3, #8\n" "LDR R2, [R0]\n" "ADD R1, R1, R3\n" "CMP R2, R1\n" "BCC loc_FFD3D284\n" "LDR R3, =0x6E7C0\n" "STR R1, [R3]\n" "B loc_FFD3D2E4\n" "loc_FFD3D2DC:\n" "LDR R3, =0x6E7C0\n" "STR R2, [R3]\n" "loc_FFD3D2E4:\n" "LDR R3, [R4]\n" "LDR R12, =0x6E820\n" "ADD R3, R3, #0x18\n" "LDR R2, [R12,#4]\n" "MOV R0, R3\n" "MOV R1, #0\n" "CMP R1, R2\n" "BHI loc_FFD3D528\n" "BNE loc_FFD3D314\n" "LDR R3, [R12]\n" "CMP R0, R3\n" "BHI loc_FFD3D528\n" "loc_FFD3D314:\n" "LDR R4, [R4]\n" "LDR LR, =0x6E828\n" "STR R4, [SP]\n" "LDR R12, =0x6E820\n" "LDR R3, =0x6E7D4\n" "LDMIA LR, {R7,R8}\n" "LDMIA R12, {R5,R6}\n" "LDR R10, [R3]\n" "LDR R2, =0x6E850\n" "MOV R3, R4\n" "MOV R4, #0\n" "ADDS R7, R7, R3\n" "ADC R8, R8, R4\n" "LDR R9, [R2]\n" "SUBS R5, R5, R3\n" "SBC R6, R6, R4\n" "MVN R2, #0\n" "MVN R1, #0x17\n" "ADDS R5, R5, R1\n" "MOV R4, #0\n" "MOV R3, #0x18\n" "ADC R6, R6, R2\n" "ADDS R7, R7, R3\n" "ADC R8, R8, R4\n" "STMIA R12, {R5,R6}\n" "SUB R0, R10, #1\n" "MOV R1, R9\n" "STMIA LR, {R7,R8}\n" "BL sub_FFEDC0F0\n" "CMP R10, #1\n" "MLA R0, R9, R0, R0\n" "BEQ loc_FFD3D3DC\n" "SUB R3, R0, #1\n" "MOV R3, R3,LSL#4\n" "ADD R4, R11, #0x10\n" "ADD R5, R11, #0x14\n" "LDR R1, [R5,R3]\n" "LDR R2, [R4,R3]\n" "LDR LR, =0x62773130\n" "ADD R2, R2, R1\n" "MOV R3, R0,LSL#4\n" "ADD R2, R2, #8\n" "MOV R0, #0\n" "ADD R12, R11, #0xC\n" "ADD R1, R11, #8\n" "STR LR, [R1,R3]\n" "STR R0, [R12,R3]\n" "STR R2, [R4,R3]\n" "LDR R0, [SP]\n" "STR R0, [R5,R3]\n" "loc_FFD3D3DC:\n" "LDR R2, =0x6E7C0\n" "LDR R3, =0x6E874\n" "LDR R1, [R2]\n" "LDR R0, [R3]\n" "ADD R3, R1, #9\n" "CMP R3, R0\n" "BLS loc_FFD3D418\n" "LDR R2, =0x6E870\n" "LDR R3, [R2]\n" "ADD R3, R3, R1\n" "RSB R3, R0, R3\n" "LDR R0, [SP,#0x10]\n" "ADD R3, R3, #8\n" "STR R3, [R0]\n" "B loc_FFD3D424\n" "loc_FFD3D418:\n" "ADD R3, R1, #8\n" "LDR R1, [SP,#0x10]\n" "STR R3, [R1]\n" "loc_FFD3D424:\n" "LDR R2, [SP,#0x10]\n" "LDR R1, =0x6E81C\n" "LDR R3, =0x6E874\n" "LDR R12, [R2]\n" "LDR R2, [R1]\n" "LDR R0, [R3]\n" "ADD R3, R12, R2\n" "CMP R3, R0\n" "BLS loc_FFD3D478\n" "LDR R2, [SP,#0xC]\n" "RSB R0, R12, R0\n" "STR R0, [R2]\n" "LDR R2, =0x6E870\n" "LDR R3, [R1]\n" "LDR R1, [R2]\n" "RSB R3, R0, R3\n" "LDR R0, [SP,#8]\n" "STR R1, [R0]\n" "LDR R1, [SP,#4]\n" "STR R3, [R1]\n" "B loc_FFD3D494\n" "loc_FFD3D478:\n" "LDR R0, [SP,#0xC]\n" "STR R2, [R0]\n" "LDR R1, [SP,#4]\n" "MOV R3, #0\n" "STR R3, [R1]\n" "LDR R2, [SP,#8]\n" "STR R3, [R2]\n" "loc_FFD3D494:\n" "LDR R0, =0x6E7C0\n" "LDR R1, =0x6E7BC\n" "LDR R3, [R0]\n" "LDR R2, [R1]\n" "CMP R3, R2\n" "BHI loc_FFD3D4C0\n" "LDR R0, [SP,#0xC]\n" "LDR R3, [R0]\n" "ADD R3, R12, R3\n" "CMP R2, R3\n" "BCC loc_FFD3D284\n" "loc_FFD3D4C0:\n" "LDR R1, [SP,#8]\n" "LDR R2, [R1]\n" "CMP R2, #0\n" "BEQ loc_FFD3D4F4\n" "LDR R3, =0x6E7BC\n" "LDR R1, [R3]\n" "CMP R2, R1\n" "BHI loc_FFD3D4F4\n" "LDR R0, [SP,#4]\n" "LDR R3, [R0]\n" "ADD R3, R2, R3\n" "CMP R1, R3\n" "BCC loc_FFD3D284\n" "loc_FFD3D4F4:\n" "LDR R3, =0x6E81C\n" "LDR R0, =0x6E820\n" "LDR R2, [R3]\n" "LDR R3, [R0,#4]\n" "ADD R2, R2, #0x18\n" "MOV R1, R2\n" "MOV R2, #0\n" "CMP R2, R3\n" "BHI loc_FFD3D528\n" "BNE loc_FFD3D530\n" "LDR R3, [R0]\n" "CMP R1, R3\n" "BLS loc_FFD3D530\n" "loc_FFD3D528:\n" "MOV R0, #0x80000005\n" "B loc_FFD3D6A4\n" "loc_FFD3D530:\n" "LDR R1, =0x6E804\n" "LDR R0, =0x6E850\n" "LDR R3, [R1]\n" "LDR R2, [R0]\n" "ADD R3, R3, R2,LSL#4\n" "ADD R3, R3, R3,LSL#2\n" "LDR R12, =0x6E820\n" "MOV R3, R3,LSL#1\n" "ADD R3, R3, #0xA0\n" "LDR R2, [R12,#4]\n" "MOV R0, R3\n" "MOV R1, #0\n" "CMP R1, R2\n" "BHI loc_FFD3D578\n" "BNE loc_FFD3D59C\n" "LDR R3, [R12]\n" "CMP R0, R3\n" "BLS loc_FFD3D59C\n" "loc_FFD3D578:\n" "LDR R4, =0x6E838\n" "LDR R1, [R4]\n" "CMP R1, #0\n" "BNE loc_FFD3D59C\n" "MOV R0, #0x3140\n" "ADD R0, R0, #8\n" "BL sub_FFD54E78\n" "MOV R3, #1\n" "STR R3, [R4]\n" "loc_FFD3D59C:\n" "LDR R1, =0x6E804\n" "LDR R0, =0x6E850\n" "LDR R2, [R1]\n" "LDR R3, [R0]\n" "LDR R0, =0x6E828\n" "ADD R2, R2, R3,LSL#4\n" "MVN R3, #0x9F\n" "ADD R2, R2, R2,LSL#2\n" "ADD R3, R3, #0x40000000\n" "SUB R3, R3, R2,LSL#1\n" "LDR R1, [R0,#4]\n" "MOV R4, R3\n" "MOV R5, #0\n" "CMP R1, R5\n" "BHI loc_FFD3D5E8\n" "BNE loc_FFD3D60C\n" "LDR R3, [R0]\n" "CMP R3, R4\n" "BLS loc_FFD3D60C\n" "loc_FFD3D5E8:\n" "LDR R4, =0x6E838\n" "LDR R1, [R4]\n" "CMP R1, #0\n" "BNE loc_FFD3D60C\n" "MOV R0, #0x3140\n" "ADD R0, R0, #8\n" "BL sub_FFD54E78\n" "MOV R3, #1\n" "STR R3, [R4]\n" "loc_FFD3D60C:\n" "LDR R3, =0x6E850\n" "LDR R0, =0x6E7EC\n" "LDR R2, [R3]\n" "LDR R12, =0x6E7D4\n" "LDR R1, [R0]\n" "ADD R3, R2, R2,LSL#2\n" "ADD R2, R2, R3,LSL#1\n" "LDR R0, [R12]\n" "RSB R1, R2, R1\n" "CMP R0, R1\n" "BLS loc_FFD3D65C\n" "LDR R4, =0x6E838\n" "LDR R1, [R4]\n" "CMP R1, #0\n" "BNE loc_FFD3D65C\n" "MOV R0, #0x3140\n" "ADD R0, R0, #8\n" "BL sub_FFD54E78\n" "MOV R3, #1\n" "STR R3, [R4]\n" "loc_FFD3D65C:\n" "LDR R3, =0x6E828\n" "LDR R12, =0x6E81C\n" "LDMIA R3, {R1,R2}\n" "LDR R0, [R12]\n" "MOV R4, #0\n" "MOV R3, #0x18\n" "ADDS R1, R1, R0\n" "ADC R2, R2, #0\n" "ADDS R1, R1, R3\n" "ADC R2, R2, R4\n" "CMP R2, #0\n" "BHI loc_FFD3D698\n" "BNE loc_FFD3D6A0\n" "CMP R1, #0x40000000\n" // "BLS loc_FFD3D6A0\n" // - "B loc_FFD3D6A0\n" // + "loc_FFD3D698:\n" "MOV R0, #0x80000007\n" "B loc_FFD3D6A4\n" "loc_FFD3D6A0:\n" "MOV R0, #0\n" "loc_FFD3D6A4:\n" "ADD SP, SP, #0x14\n" "LDMFD SP!, {R4-R11,PC}\n" ); } void __attribute__((naked,noinline)) sub_FFD3C5AC_my(){ asm volatile( "CMP R2, #1\n" "STMFD SP!, {R4-R7,LR}\n" "MOV R7, R0\n" "MOV R6, R1\n" "MOVEQ R3, #0x79\n" "STREQ R3, [R6]\n" "LDMEQFD SP!, {R4-R7,PC}\n" "LDR R12, =0x6E538\n" "LDR R0, [R12]\n" "LDR R3, =0x6E540\n" "CMP R0, #0\n" "LDR R1, [R3]\n" "BEQ loc_FFD3C5F4\n" "LDR R2, =0x6E544\n" "LDR R3, [R2]\n" "CMP R3, #1\n" "BNE loc_FFD3C608\n" "B loc_FFD3C5F8\n" "loc_FFD3C5F4:\n" "LDR R2, =0x6E544\n" "loc_FFD3C5F8:\n" "MOV R3, #0\n" "STR R3, [R2]\n" "STR R7, [R12]\n" "B loc_FFD3C6C0\n" "loc_FFD3C608:\n" "LDR R2, =0x6E53C\n" "LDR R3, [R2]\n" "LDR R5, =table1\n" //+ 0xFFD3C41C "ADD R3, R3, R3,LSL#1\n" "MOV LR, R3,LSL#2\n" "LDR R2, [R5,LR]\n" "LDR R4, =table2\n" //+ 0xFFD3C440 "RSB R12, R2, R0\n" "LDR R3, [R4,LR]\n" "CMP R12, #0\n" "RSB R0, R3, R0\n" "BLE loc_FFD3C66C\n" "ADD R3, R5, #4\n" "LDR R2, [R3,LR]\n" "CMP R2, R12\n" "ADDGE R1, R1, #1\n" "BGE loc_FFD3C660\n" "ADD R3, R5, #8\n" "LDR R2, [R3,LR]\n" "CMP R2, R12\n" "ADDGE R1, R1, #2\n" "ADDLT R1, R1, #3\n" "loc_FFD3C660:\n" // "CMP R1, #0xE\n" // - // "MOVGE R1, #0xE\n" // - "CMP R1, #0x1A\n" // + "MOVGE R1, #0x1A\n" // + "B loc_FFD3C6A4\n" "loc_FFD3C66C:\n" "CMP R0, #0\n" "BGE loc_FFD3C6A4\n" "ADD R3, R4, #4\n" "LDR R2, [R3,LR]\n" "CMP R2, R0\n" "SUBLE R1, R1, #1\n" "BLE loc_FFD3C69C\n" "ADD R3, R4, #8\n" "LDR R2, [R3,LR]\n" "CMP R2, R0\n" "SUBLE R1, R1, #2\n" "SUBGT R1, R1, #3\n" "loc_FFD3C69C:\n" "CMP R1, #0\n" "MOVLT R1, #0\n" "loc_FFD3C6A4:\n" "LDR R0, =0x6E540\n" "LDR R3, [R0]\n" "CMP R1, R3\n" "LDRNE R2, =0x6E544\n" "MOVNE R3, #1\n" "STRNE R1, [R0]\n" "STRNE R3, [R2]\n" "loc_FFD3C6C0:\n" "LDR R3, =0x6E540\n" // "LDR R1, =0x6088\n" // - "LDR R1, =video_mode\n" //+ "LDR R0, [R3]\n" "LDR R2, =CompressionRateTable\n" // + 0xFFD3C3E0 "LDR R12, [R1]\n" "LDR R12, [R12]\n" //+ "LDR LR, [R2,R0,LSL#2]\n" "LDR R3, =0x6E538\n" "CMP R12, #1\n" "STR R7, [R3]\n" "STR LR, [R6]\n" // "MOVEQ R3, #0xB\n" // - "LDREQ R3, =video_quality\n" // + "LDREQ R3, [R3]\n" // + "LDREQ R3, [R3]\n" // + "STREQ R3, [R6]\n" "BL mute_on_zoom\n" // + "LDMFD SP!, {R4-R7,PC}\n" ); }
arne182/chdk-eyefi
platform/a700/sub/100b/movie_rec.c
C
gpl-2.0
37,022
#include <lwk/types.h> long sys_getgroups(int n, gid_t gids[]) { return 0; }
jnouyang/kitten
kernel/linux_syscalls/getgroups.c
C
gpl-2.0
79
/* * Copyright (C) 2010-2015 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** * @file ump_ukk_wrappers.c * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls */ #include <asm/uaccess.h> /* user space access */ #include "ump_osk.h" #include "ump_uk_types.h" #include "ump_ukk.h" #include "ump_kernel_common.h" /* * IOCTL operation; Negotiate version of IOCTL API */ int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_api_version_s version_info; _mali_osk_errcode_t err; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n")); return -ENOTTY; } /* Copy the user space memory to kernel space (so we safely can read it) */ if (0 != copy_from_user(&version_info, argument, sizeof(version_info))) { MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n")); return -EFAULT; } version_info.ctx = (void *) session_data; err = _ump_uku_get_api_version(&version_info); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n")); return map_errcode(err); } version_info.ctx = NULL; /* Copy ouput data back to user space */ if (0 != copy_to_user(argument, &version_info, sizeof(version_info))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n")); return -EFAULT; } return 0; /* success */ } /* * IOCTL operation; Release reference to specified UMP memory. */ int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_release_s release_args; _mali_osk_errcode_t err; /* Sanity check input parameters */ if (NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_release()\n")); return -ENOTTY; } /* Copy the user space memory to kernel space (so we safely can read it) */ if (0 != copy_from_user(&release_args, argument, sizeof(release_args))) { MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n")); return -EFAULT; } release_args.ctx = (void *) session_data; err = _ump_ukk_release(&release_args); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n")); return map_errcode(err); } return 0; /* success */ } /* * IOCTL operation; Return size for specified UMP memory. */ int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_size_get_s user_interaction; _mali_osk_errcode_t err; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; err = _ump_ukk_size_get(&user_interaction); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n")); return map_errcode(err); } user_interaction.ctx = NULL; if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n")); return -EFAULT; } return 0; /* success */ } /* * IOCTL operation; Do cache maintenance on specified UMP memory. */ int ump_msync_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_msync_s user_interaction; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; _ump_ukk_msync(&user_interaction); user_interaction.ctx = NULL; if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n")); return -EFAULT; } return 0; /* success */ } int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_cache_operations_control_s user_interaction; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_cache_operations_control()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s *) &user_interaction); user_interaction.ctx = NULL; #if 0 /* No data to copy back */ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_cache_operations_control()\n")); return -EFAULT; } #endif return 0; /* success */ } int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_switch_hw_usage_s user_interaction; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; _ump_ukk_switch_hw_usage(&user_interaction); user_interaction.ctx = NULL; #if 0 /* No data to copy back */ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } #endif return 0; /* success */ } int ump_lock_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_lock_s user_interaction; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; _ump_ukk_lock(&user_interaction); user_interaction.ctx = NULL; #if 0 /* No data to copy back */ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } #endif return 0; /* success */ } int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data *session_data) { _ump_uk_unlock_s user_interaction; /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n")); return -ENOTTY; } if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; _ump_ukk_unlock(&user_interaction); user_interaction.ctx = NULL; #if 0 /* No data to copy back */ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n")); return -EFAULT; } #endif return 0; /* success */ }
kszaq/linux-amlogic
drivers/amlogic/gpu/ump/linux/ump_ukk_wrappers.c
C
gpl-2.0
7,996
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <mach/gpio.h> #include <mach/camera.h> #include "msm_ispif.h" #include "msm.h" #include "msm_ispif_hwreg.h" #define V4L2_IDENT_ISPIF 50001 #define CSID_VERSION_V2 0x02000011 #define CSID_VERSION_V3 0x30000000 #define MAX_CID 15 static atomic_t ispif_irq_cnt; static spinlock_t ispif_tasklet_lock; static struct list_head ispif_tasklet_q; static int msm_ispif_intf_reset(struct ispif_device *ispif, uint16_t intfmask, uint8_t vfe_intf) { int rc = 0; uint32_t data = (0x1 << STROBED_RST_EN); uint16_t intfnum = 0, mask = intfmask; while (mask != 0) { if (!(intfmask & (0x1 << intfnum))) { mask >>= 1; intfnum++; continue; } switch (intfnum) { case PIX0: data |= (0x1 << PIX_0_VFE_RST_STB) | (0x1 << PIX_0_CSID_RST_STB); ispif->pix_sof_count = 0; break; case RDI0: data |= (0x1 << RDI_0_VFE_RST_STB) | (0x1 << RDI_0_CSID_RST_STB); ispif->rdi0_sof_count = 0; break; case PIX1: data |= (0x1 << PIX_1_VFE_RST_STB) | (0x1 << PIX_1_CSID_RST_STB); break; case RDI1: data |= (0x1 << RDI_1_VFE_RST_STB) | (0x1 << RDI_1_CSID_RST_STB); ispif->rdi1_sof_count = 0; break; case RDI2: data |= (0x1 << RDI_2_VFE_RST_STB) | (0x1 << RDI_2_CSID_RST_STB); ispif->rdi2_sof_count = 0; break; default: rc = -EINVAL; break; } mask >>= 1; intfnum++; } /*end while */ if (data > 0x1) { if (vfe_intf == VFE0) msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR); else msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_1_ADDR); rc = wait_for_completion_interruptible(&ispif->reset_complete); } return rc; } static int msm_ispif_reset(struct ispif_device *ispif) { int rc = 0; ispif->pix_sof_count = 0; msm_camera_io_w(ISPIF_RST_CMD_MASK, ispif->base + ISPIF_RST_CMD_ADDR); if (ispif->csid_version == CSID_VERSION_V3) msm_camera_io_w(ISPIF_RST_CMD_1_MASK, ispif->base + ISPIF_RST_CMD_1_ADDR); rc = wait_for_completion_interruptible(&ispif->reset_complete); return rc; } static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { BUG_ON(!chip); chip->ident = V4L2_IDENT_ISPIF; chip->revision = 0; return 0; } static void msm_ispif_sel_csid_core(struct ispif_device *ispif, uint8_t intftype, uint8_t csid, uint8_t vfe_intf) { int rc = 0; uint32_t data = 0; if (ispif->csid_version <= CSID_VERSION_V2) { if (ispif->ispif_clk[intftype] == NULL) { pr_err("%s: ispif NULL clk\n", __func__); return; } rc = clk_set_rate(ispif->ispif_clk[intftype], csid); if (rc < 0) pr_err("%s: clk_set_rate failed %d\n", __func__, rc); } data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR + (0x200 * vfe_intf)); switch (intftype) { case PIX0: data &= ~(0x3); data |= csid; break; case RDI0: data &= ~(0x3 << 4); data |= (csid << 4); break; case PIX1: data &= ~(0x3 << 8); data |= (csid << 8); break; case RDI1: data &= ~(0x3 << 12); data |= (csid << 12); break; case RDI2: data &= ~(0x3 << 20); data |= (csid << 20); break; } if (data) { msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR + (0x200 * vfe_intf)); } } static void msm_ispif_enable_intf_cids(struct ispif_device *ispif, uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf) { uint32_t data = 0; mutex_lock(&ispif->mutex); switch (intftype) { case PIX0: data = msm_camera_io_r(ispif->base + ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); data |= cid_mask; msm_camera_io_w(data, ispif->base + ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI0: data = msm_camera_io_r(ispif->base + ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); data |= cid_mask; msm_camera_io_w(data, ispif->base + ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case PIX1: data = msm_camera_io_r(ispif->base + ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); data |= cid_mask; msm_camera_io_w(data, ispif->base + ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI1: data = msm_camera_io_r(ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); data |= cid_mask; msm_camera_io_w(data, ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI2: data = msm_camera_io_r(ispif->base + ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); data |= cid_mask; msm_camera_io_w(data, ispif->base + ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; } mutex_unlock(&ispif->mutex); } static int32_t msm_ispif_validate_intf_status(struct ispif_device *ispif, uint8_t intftype, uint8_t vfe_intf) { int32_t rc = 0; uint32_t data = 0; mutex_lock(&ispif->mutex); switch (intftype) { case PIX0: data = msm_camera_io_r(ispif->base + ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf)); break; case RDI0: data = msm_camera_io_r(ispif->base + ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf)); break; case PIX1: data = msm_camera_io_r(ispif->base + ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf)); break; case RDI1: data = msm_camera_io_r(ispif->base + ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf)); break; case RDI2: data = msm_camera_io_r(ispif->base + ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf)); break; } if ((data & 0xf) != 0xf) rc = -EBUSY; mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_config(struct ispif_device *ispif, struct msm_ispif_params_list *params_list) { uint32_t params_len; struct msm_ispif_params *ispif_params; int rc = 0, i = 0; uint8_t intftype; uint8_t vfe_intf; params_len = params_list->len; ispif_params = params_list->params; CDBG("Enable interface\n"); msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_ADDR); msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_1_ADDR); msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_2_ADDR); for (i = 0; i < params_len; i++) { intftype = ispif_params[i].intftype; vfe_intf = ispif_params[i].vfe_intf; CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__, intftype, vfe_intf, ispif_params[i].csid); if ((intftype >= INTF_MAX) || (ispif->csid_version <= CSID_VERSION_V2 && vfe_intf > VFE0) || (ispif->csid_version == CSID_VERSION_V3 && vfe_intf >= VFE_MAX)) { pr_err("%s: intftype / vfe intf not valid\n", __func__); return -EINVAL; } rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf); if (rc < 0) { pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc); return rc; } msm_ispif_sel_csid_core(ispif, intftype, ispif_params[i].csid, vfe_intf); msm_ispif_enable_intf_cids(ispif, intftype, ispif_params[i].cid_mask, vfe_intf); } msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_IRQ_MASK_ADDR); msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_IRQ_CLEAR_ADDR); msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base + ISPIF_IRQ_MASK_1_ADDR); msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base + ISPIF_IRQ_CLEAR_1_ADDR); msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base + ISPIF_IRQ_MASK_2_ADDR); msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base + ISPIF_IRQ_CLEAR_2_ADDR); msm_camera_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); return rc; } static uint32_t msm_ispif_get_cid_mask(struct ispif_device *ispif, uint16_t intftype, uint8_t vfe_intf) { uint32_t mask = 0; switch (intftype) { case PIX0: mask = msm_camera_io_r(ispif->base + ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI0: mask = msm_camera_io_r(ispif->base + ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case PIX1: mask = msm_camera_io_r(ispif->base + ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI1: mask = msm_camera_io_r(ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; case RDI2: mask = msm_camera_io_r(ispif->base + ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf)); break; default: break; } return mask; } static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint16_t intfmask, uint8_t intf_cmd_mask, uint8_t vfe_intf) { uint8_t vc = 0, val = 0; uint16_t mask = intfmask, intfnum = 0; uint32_t cid_mask = 0; uint32_t global_intf_cmd_mask1 = 0xFFFFFFFF; while (mask != 0) { if (!(intfmask & (0x1 << intfnum))) { mask >>= 1; intfnum++; continue; } cid_mask = msm_ispif_get_cid_mask(ispif, intfnum, vfe_intf); vc = 0; while (cid_mask != 0) { if ((cid_mask & 0xf) != 0x0) { if (intfnum != RDI2) { val = (intf_cmd_mask>>(vc*2)) & 0x3; ispif->global_intf_cmd_mask |= (0x3 << ((vc * 2) + (intfnum * 8))); ispif->global_intf_cmd_mask &= ~((0x3 & ~val) << ((vc * 2) + (intfnum * 8))); } else global_intf_cmd_mask1 &= ~((0x3 & ~intf_cmd_mask) << ((vc * 2) + 8)); } vc++; cid_mask >>= 4; } mask >>= 1; intfnum++; } msm_camera_io_w(ispif->global_intf_cmd_mask, ispif->base + ISPIF_INTF_CMD_ADDR + (0x200 * vfe_intf)); if (global_intf_cmd_mask1 != 0xFFFFFFFF) msm_camera_io_w(global_intf_cmd_mask1, ispif->base + ISPIF_INTF_CMD_1_ADDR + (0x200 * vfe_intf)); } static int msm_ispif_abort_intf_transfer(struct ispif_device *ispif, uint16_t intfmask, uint8_t vfe_intf) { int rc = 0; uint8_t intf_cmd_mask = 0xAA; uint16_t intfnum = 0, mask = intfmask; mutex_lock(&ispif->mutex); CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask, intf_cmd_mask); msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf); while (mask != 0) { if (intfmask & (0x1 << intfnum)) ispif->global_intf_cmd_mask |= (0xFF << (intfnum * 8)); mask >>= 1; intfnum++; if (intfnum == RDI2) break; } mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_start_intf_transfer(struct ispif_device *ispif, uint16_t intfmask, uint8_t vfe_intf) { uint8_t intf_cmd_mask = 0x55; int rc = 0; mutex_lock(&ispif->mutex); rc = msm_ispif_intf_reset(ispif, intfmask, vfe_intf); CDBG("%s intfmask start after%x intf_cmd_mask %x\n", __func__, intfmask, intf_cmd_mask); msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf); mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_stop_intf_transfer(struct ispif_device *ispif, uint16_t intfmask, uint8_t vfe_intf) { int rc = 0; uint8_t intf_cmd_mask = 0x00; uint16_t intfnum = 0, mask = intfmask; mutex_lock(&ispif->mutex); CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask, intf_cmd_mask); msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf); while (mask != 0) { if (intfmask & (0x1 << intfnum)) { switch (intfnum) { case PIX0: while ((msm_camera_io_r(ispif->base + ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf)) & 0xf) != 0xf) { CDBG("Wait for pix0 Idle\n"); } break; case RDI0: while ((msm_camera_io_r(ispif->base + ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf)) & 0xf) != 0xf) { CDBG("Wait for rdi0 Idle\n"); } break; case PIX1: while ((msm_camera_io_r(ispif->base + ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf)) & 0xf) != 0xf) { CDBG("Wait for pix1 Idle\n"); } break; case RDI1: while ((msm_camera_io_r(ispif->base + ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf)) & 0xf) != 0xf) { CDBG("Wait for rdi1 Idle\n"); } break; case RDI2: while ((msm_camera_io_r(ispif->base + ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf)) & 0xf) != 0xf) { CDBG("Wait for rdi2 Idle\n"); } break; default: break; } if (intfnum != RDI2) ispif->global_intf_cmd_mask |= (0xFF << (intfnum * 8)); } mask >>= 1; intfnum++; } mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd, int enable) { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); uint32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1); uint16_t intf = enable >> ISPIF_S_STREAM_SHIFT; uint8_t vfe_intf = enable >> ISPIF_VFE_INTF_SHIFT; int rc = -EINVAL; CDBG("%s enable %x, cmd %x, intf %x\n", __func__, enable, cmd, intf); BUG_ON(!ispif); if ((ispif->csid_version <= CSID_VERSION_V2 && vfe_intf > VFE0) || (ispif->csid_version == CSID_VERSION_V3 && vfe_intf >= VFE_MAX)) { pr_err("%s invalid csid version %x && vfe intf %d\n", __func__, ispif->csid_version, vfe_intf); return rc; } switch (cmd) { case ISPIF_ON_FRAME_BOUNDARY: rc = msm_ispif_start_intf_transfer(ispif, intf, vfe_intf); break; case ISPIF_OFF_FRAME_BOUNDARY: rc = msm_ispif_stop_intf_transfer(ispif, intf, vfe_intf); break; case ISPIF_OFF_IMMEDIATELY: rc = msm_ispif_abort_intf_transfer(ispif, intf, vfe_intf); break; default: break; } return rc; } static void send_rdi_sof(struct ispif_device *ispif, enum msm_ispif_intftype interface, int count) { struct rdi_count_msg sof_msg; sof_msg.rdi_interface = interface; sof_msg.count = count; v4l2_subdev_notify(&ispif->subdev, NOTIFY_AXI_RDI_SOF_COUNT, (void *)&sof_msg); } static void ispif_do_tasklet(unsigned long data) { unsigned long flags; struct ispif_isr_queue_cmd *qcmd = NULL; struct ispif_device *ispif; ispif = (struct ispif_device *)data; while (atomic_read(&ispif_irq_cnt)) { spin_lock_irqsave(&ispif_tasklet_lock, flags); qcmd = list_first_entry(&ispif_tasklet_q, struct ispif_isr_queue_cmd, list); atomic_sub(1, &ispif_irq_cnt); if (!qcmd) { spin_unlock_irqrestore(&ispif_tasklet_lock, flags); return; } list_del(&qcmd->list); spin_unlock_irqrestore(&ispif_tasklet_lock, flags); kfree(qcmd); } } static void ispif_process_irq(struct ispif_device *ispif, struct ispif_irq_status *out) { unsigned long flags; struct ispif_isr_queue_cmd *qcmd; qcmd = kzalloc(sizeof(struct ispif_isr_queue_cmd), GFP_ATOMIC); if (!qcmd) { pr_err("ispif_process_irq: qcmd malloc failed!\n"); return; } qcmd->ispifInterruptStatus0 = out->ispifIrqStatus0; qcmd->ispifInterruptStatus1 = out->ispifIrqStatus1; qcmd->ispifInterruptStatus2 = out->ispifIrqStatus2; if (qcmd->ispifInterruptStatus0 & ISPIF_IRQ_STATUS_PIX_SOF_MASK) { CDBG("%s: ispif PIX irq status\n", __func__); ispif->pix_sof_count++; v4l2_subdev_notify(&ispif->subdev, NOTIFY_VFE_PIX_SOF_COUNT, (void *)&ispif->pix_sof_count); } if (qcmd->ispifInterruptStatus0 & ISPIF_IRQ_STATUS_RDI0_SOF_MASK) { ispif->rdi0_sof_count++; CDBG("%s: ispif RDI0 irq status, counter = %d", __func__, ispif->rdi0_sof_count); send_rdi_sof(ispif, RDI_0, ispif->rdi0_sof_count); } if (qcmd->ispifInterruptStatus1 & ISPIF_IRQ_STATUS_RDI1_SOF_MASK) { ispif->rdi1_sof_count++; CDBG("%s: ispif RDI1 irq status, counter = %d", __func__, ispif->rdi1_sof_count); send_rdi_sof(ispif, RDI_1, ispif->rdi1_sof_count); } if (qcmd->ispifInterruptStatus2 & ISPIF_IRQ_STATUS_RDI2_SOF_MASK) { ispif->rdi2_sof_count++; CDBG("%s: ispif RDI2 irq status, counter = %d", __func__, ispif->rdi2_sof_count); send_rdi_sof(ispif, RDI_2, ispif->rdi2_sof_count); } spin_lock_irqsave(&ispif_tasklet_lock, flags); list_add_tail(&qcmd->list, &ispif_tasklet_q); atomic_add(1, &ispif_irq_cnt); spin_unlock_irqrestore(&ispif_tasklet_lock, flags); tasklet_schedule(&ispif->ispif_tasklet); return; } static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, void *data) { uint32_t status0 = 0, status1 = 0, status2 = 0; struct ispif_device *ispif = (struct ispif_device *)data; out->ispifIrqStatus0 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_ADDR); out->ispifIrqStatus1 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_1_ADDR); out->ispifIrqStatus2 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_2_ADDR); msm_camera_io_w(out->ispifIrqStatus0, ispif->base + ISPIF_IRQ_CLEAR_ADDR); msm_camera_io_w(out->ispifIrqStatus1, ispif->base + ISPIF_IRQ_CLEAR_1_ADDR); msm_camera_io_w(out->ispifIrqStatus2, ispif->base + ISPIF_IRQ_CLEAR_2_ADDR); CDBG("%s: irq vfe0 Irq_status0 = 0x%x, 1 = 0x%x, 2 = 0x%x\n", __func__, out->ispifIrqStatus0, out->ispifIrqStatus1, out->ispifIrqStatus2); if (out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK || out->ispifIrqStatus1 & ISPIF_IRQ_STATUS_1_MASK || out->ispifIrqStatus2 & ISPIF_IRQ_STATUS_2_MASK) { if (out->ispifIrqStatus0 & (0x1 << RESET_DONE_IRQ)) complete(&ispif->reset_complete); if (out->ispifIrqStatus0 & (0x1 << PIX_INTF_0_OVERFLOW_IRQ)) pr_err("%s: pix intf 0 overflow.\n", __func__); if (out->ispifIrqStatus0 & (0x1 << RAW_INTF_0_OVERFLOW_IRQ)) pr_err("%s: rdi intf 0 overflow.\n", __func__); if (out->ispifIrqStatus1 & (0x1 << RAW_INTF_1_OVERFLOW_IRQ)) pr_err("%s: rdi intf 1 overflow.\n", __func__); if (out->ispifIrqStatus2 & (0x1 << RAW_INTF_2_OVERFLOW_IRQ)) pr_err("%s: rdi intf 2 overflow.\n", __func__); if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_SOF_MASK) || (out->ispifIrqStatus1 & ISPIF_IRQ_STATUS_SOF_MASK) || (out->ispifIrqStatus2 & ISPIF_IRQ_STATUS_RDI2_SOF_MASK)) ispif_process_irq(ispif, out); } if (ispif->csid_version == CSID_VERSION_V3) { status0 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_ADDR + 0x200); msm_camera_io_w(status0, ispif->base + ISPIF_IRQ_CLEAR_ADDR + 0x200); status1 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_1_ADDR + 0x200); msm_camera_io_w(status1, ispif->base + ISPIF_IRQ_CLEAR_1_ADDR + 0x200); status2 = msm_camera_io_r(ispif->base + ISPIF_IRQ_STATUS_2_ADDR + 0x200); msm_camera_io_w(status2, ispif->base + ISPIF_IRQ_CLEAR_2_ADDR + 0x200); CDBG("%s: irq vfe1 Irq_status0 = 0x%x, 1 = 0x%x, 2 = 0x%x\n", __func__, status0, status1, status2); } msm_camera_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); } static irqreturn_t msm_io_ispif_irq(int irq_num, void *data) { struct ispif_irq_status irq; msm_ispif_read_irq_status(&irq, data); return IRQ_HANDLED; } static struct msm_cam_clk_info ispif_8960_clk_info[] = { {"csi_pix_clk", 0}, {"csi_rdi_clk", 0}, {"csi_pix1_clk", 0}, {"csi_rdi1_clk", 0}, {"csi_rdi2_clk", 0}, }; static int msm_ispif_init(struct ispif_device *ispif, const uint32_t *csid_version) { int rc = 0; CDBG("%s called %d\n", __func__, __LINE__); if (ispif->ispif_state == ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); rc = -EINVAL; return rc; } spin_lock_init(&ispif_tasklet_lock); INIT_LIST_HEAD(&ispif_tasklet_q); rc = request_irq(ispif->irq->start, msm_io_ispif_irq, IRQF_TRIGGER_RISING, "ispif", ispif); ispif->global_intf_cmd_mask = 0xFFFFFFFF; init_completion(&ispif->reset_complete); tasklet_init(&ispif->ispif_tasklet, ispif_do_tasklet, (unsigned long)ispif); ispif->csid_version = *csid_version; if (ispif->csid_version < CSID_VERSION_V2) { rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info, ispif->ispif_clk, 2, 1); if (rc < 0) return rc; } else if (ispif->csid_version == CSID_VERSION_V2) { rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info, ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 1); if (rc < 0) return rc; } rc = msm_ispif_reset(ispif); ispif->ispif_state = ISPIF_POWER_UP; return rc; } static void msm_ispif_release(struct ispif_device *ispif) { if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); return; } CDBG("%s, free_irq\n", __func__); free_irq(ispif->irq->start, ispif); tasklet_kill(&ispif->ispif_tasklet); if (ispif->csid_version < CSID_VERSION_V2) { msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info, ispif->ispif_clk, 2, 0); } else if (ispif->csid_version == CSID_VERSION_V2) { msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info, ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 0); } ispif->ispif_state = ISPIF_POWER_DOWN; } static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg) { long rc = 0; struct ispif_cfg_data cdata; struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); if (copy_from_user(&cdata, (void *)arg, sizeof(struct ispif_cfg_data))) return -EFAULT; CDBG("%s cfgtype = %d\n", __func__, cdata.cfgtype); switch (cdata.cfgtype) { case ISPIF_INIT: CDBG("%s csid_version = %x\n", __func__, cdata.cfg.csid_version); rc = msm_ispif_init(ispif, &cdata.cfg.csid_version); break; case ISPIF_SET_CFG: CDBG("%s len = %d, intftype = %d,.cid_mask = %d, csid = %d\n", __func__, cdata.cfg.ispif_params.len, cdata.cfg.ispif_params.params[0].intftype, cdata.cfg.ispif_params.params[0].cid_mask, cdata.cfg.ispif_params.params[0].csid); rc = msm_ispif_config(ispif, &cdata.cfg.ispif_params); break; case ISPIF_SET_ON_FRAME_BOUNDARY: case ISPIF_SET_OFF_FRAME_BOUNDARY: case ISPIF_SET_OFF_IMMEDIATELY: rc = msm_ispif_subdev_video_s_stream(sd, cdata.cfg.cmd); break; case ISPIF_RELEASE: msm_ispif_release(ispif); break; default: break; } return rc; } static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case VIDIOC_MSM_ISPIF_CFG: return msm_ispif_cmd(sd, arg); default: return -ENOIOCTLCMD; } } static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = { .g_chip_ident = &msm_ispif_subdev_g_chip_ident, .ioctl = &msm_ispif_subdev_ioctl, }; static struct v4l2_subdev_video_ops msm_ispif_subdev_video_ops = { .s_stream = &msm_ispif_subdev_video_s_stream, }; static const struct v4l2_subdev_ops msm_ispif_subdev_ops = { .core = &msm_ispif_subdev_core_ops, .video = &msm_ispif_subdev_video_ops, }; static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops; static int __devinit ispif_probe(struct platform_device *pdev) { int rc = 0; struct msm_cam_subdev_info sd_info; struct ispif_device *ispif; CDBG("%s\n", __func__); ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL); if (!ispif) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&ispif->subdev, &msm_ispif_subdev_ops); ispif->subdev.internal_ops = &msm_ispif_internal_ops; ispif->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(ispif->subdev.name, ARRAY_SIZE(ispif->subdev.name), "msm_ispif"); v4l2_set_subdevdata(&ispif->subdev, ispif); platform_set_drvdata(pdev, &ispif->subdev); snprintf(ispif->subdev.name, sizeof(ispif->subdev.name), "ispif"); mutex_init(&ispif->mutex); if (pdev->dev.of_node) of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); ispif->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ispif"); if (!ispif->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto ispif_no_resource; } ispif->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ispif"); if (!ispif->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto ispif_no_resource; } ispif->io = request_mem_region(ispif->mem->start, resource_size(ispif->mem), pdev->name); if (!ispif->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto ispif_no_resource; } ispif->base = ioremap(ispif->mem->start, resource_size(ispif->mem)); if (!ispif->base) { rc = -ENOMEM; goto ispif_no_mem; } ispif->pdev = pdev; sd_info.sdev_type = ISPIF_DEV; sd_info.sd_index = pdev->id; sd_info.irq_num = ispif->irq->start; msm_cam_register_subdev_node(&ispif->subdev, &sd_info); media_entity_init(&ispif->subdev.entity, 0, NULL, 0); ispif->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; ispif->subdev.entity.group_id = ISPIF_DEV; ispif->subdev.entity.name = pdev->name; ispif->subdev.entity.revision = ispif->subdev.devnode->num; ispif->ispif_state = ISPIF_POWER_DOWN; return 0; ispif_no_mem: release_mem_region(ispif->mem->start, resource_size(ispif->mem)); ispif_no_resource: mutex_destroy(&ispif->mutex); kfree(ispif); return rc; } static const struct of_device_id msm_ispif_dt_match[] = { {.compatible = "qcom,ispif"}, }; MODULE_DEVICE_TABLE(of, msm_ispif_dt_match); static struct platform_driver ispif_driver = { .probe = ispif_probe, .driver = { .name = MSM_ISPIF_DRV_NAME, .owner = THIS_MODULE, .of_match_table = msm_ispif_dt_match, }, }; static int __init msm_ispif_init_module(void) { return platform_driver_register(&ispif_driver); } static void __exit msm_ispif_exit_module(void) { platform_driver_unregister(&ispif_driver); } module_init(msm_ispif_init_module); module_exit(msm_ispif_exit_module); MODULE_DESCRIPTION("MSM ISP Interface driver"); MODULE_LICENSE("GPL v2");
androidrbox/android_kernel_amazon_bueller
drivers/media/video/msm/csi/msm_ispif.c
C
gpl-2.0
25,457
/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ /* Check that heap-structure is ok */ #include "heapdef.h" static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, ulong blength, my_bool print_status); static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, my_bool print_status); /* Check if keys and rows are ok in a heap table SYNOPSIS heap_check_heap() info Table handler print_status Prints some extra status NOTES Doesn't change the state of the table handler RETURN VALUES 0 ok 1 error */ int heap_check_heap(HP_INFO *info, my_bool print_status) { int error; uint key; ulong records=0, deleted=0, pos, next_block; HP_SHARE *share=info->s; HP_INFO save_info= *info; /* Needed because scan_init */ DBUG_ENTER("heap_check_heap"); for (error=key= 0 ; key < share->keys ; key++) { if (share->keydef[key].algorithm == HA_KEY_ALG_BTREE) error|= check_one_rb_key(info, key, share->records, print_status); else error|= check_one_key(share->keydef + key, key, share->records, share->blength, print_status); } /* This is basicly the same code as in hp_scan, but we repeat it here to get shorter DBUG log file. */ for (pos=next_block= 0 ; ; pos++) { if (pos < next_block) { info->current_ptr+= share->block.recbuffer; } else { next_block+= share->block.records_in_block; if (next_block >= share->records+share->deleted) { next_block= share->records+share->deleted; if (pos >= next_block) break; /* End of file */ } } hp_find_record(info,pos); if (!info->current_ptr[share->reclength]) deleted++; else records++; } if (records != share->records || deleted != share->deleted) { DBUG_PRINT("error",("Found rows: %lu (%lu) deleted %lu (%lu)", records, (ulong) share->records, deleted, (ulong) share->deleted)); error= 1; } *info= save_info; DBUG_RETURN(error); } static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, ulong blength, my_bool print_status) { int error; ulong i,found,max_links,seek,links; ulong rec_link; /* Only used with debugging */ ulong hash_buckets_found; HASH_INFO *hash_info; error=0; hash_buckets_found= 0; for (i=found=max_links=seek=0 ; i < records ; i++) { hash_info=hp_find_hash(&keydef->block,i); if (hash_info->hash_of_key != hp_rec_hashnr(keydef, hash_info->ptr_to_rec)) { DBUG_PRINT("error", ("Found row with wrong hash_of_key at position %lu", i)); error= 1; } if (hp_mask(hash_info->hash_of_key, blength, records) == i) { found++; seek++; links=1; while ((hash_info=hash_info->next_key) && found < records + 1) { seek+= ++links; if ((rec_link= hp_mask(hash_info->hash_of_key, blength, records)) != i) { DBUG_PRINT("error", ("Record in wrong link: Link %lu Record: 0x%lx Record-link %lu", i, (long) hash_info->ptr_to_rec, rec_link)); error=1; } else found++; } if (links > max_links) max_links=links; hash_buckets_found++; } } if (found != records) { DBUG_PRINT("error",("Found %ld of %ld records", found, records)); error=1; } if (keydef->hash_buckets != hash_buckets_found) { DBUG_PRINT("error",("Found %ld buckets, stats shows %ld buckets", hash_buckets_found, (long) keydef->hash_buckets)); error=1; } DBUG_PRINT("info", ("key: %u records: %ld seeks: %lu max links: %lu " "hitrate: %.2f buckets: %lu", keynr, records,seek,max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found)); if (print_status) printf("Key: %u records: %ld seeks: %lu max links: %lu " "hitrate: %.2f buckets: %lu\n", keynr, records, seek, max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found); return error; } static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, my_bool print_status) { HP_KEYDEF *keydef= info->s->keydef + keynr; int error= 0; ulong found= 0; uchar *key, *recpos; uint key_length; uint not_used[2]; if ((key= tree_search_edge(&keydef->rb_tree, info->parents, &info->last_pos, offsetof(TREE_ELEMENT, left)))) { do { memcpy(&recpos, key + (*keydef->get_key_length)(keydef,key), sizeof(uchar*)); key_length= hp_rb_make_key(keydef, info->recbuf, recpos, 0); if (ha_key_cmp(keydef->seg, (uchar*) info->recbuf, (uchar*) key, key_length, SEARCH_FIND | SEARCH_SAME, not_used)) { error= 1; DBUG_PRINT("error",("Record in wrong link: key: %u Record: 0x%lx\n", keynr, (long) recpos)); } else found++; key= tree_search_next(&keydef->rb_tree, &info->last_pos, offsetof(TREE_ELEMENT, left), offsetof(TREE_ELEMENT, right)); } while (key); } if (found != records) { DBUG_PRINT("error",("Found %lu of %lu records", found, records)); error= 1; } if (print_status) printf("Key: %d records: %ld\n", keynr, records); return error; }
ottok/mariadb-galera-10.0
storage/heap/_check.c
C
gpl-2.0
5,942
/**************************************************************************** * * Copyright (c) DiBcom SA. All rights reserved. * * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR * PURPOSE. * ****************************************************************************/ /************************************************************************************************** * @file "DibBridgeDragonfly.c" * @brief Dragonfly sprecific bridge functionality. * ***************************************************************************************************/ #include "DibBridgeConfig.h" /* Must be first include of all SDK files - Defines compilation options */ #if (USE_DRAGONFLY == 1) #include "DibBridgeTargetDebug.h" #include "DibBridgeCommon.h" #include "DibBridgeTarget.h" #include "DibBridgeMailboxHandler.h" #include "DibBridgeTestIf.h" #include "DibBridge.h" #include "DibBridgeDragonflyRegisters.h" #include "DibBridgeDragonflyTest.h" #include "DibBridgeDragonfly.h" #include "DibBridgeDragonflyData.h" #include "DibBridgeData.h" #if (DRIVER_AND_BRIDGE_MERGED == 0) #include "DibBridgeTargetModule.h" #endif /*DRIVER_AND_BRIDGE_MERGED */ #if (DIB_CHECK_DATA == 1) #include "DibBridgePayloadCheckers.h" static void DibBridgeDragonflyClearCheckStats(struct DibBridgeContext *pContext, uint32_t * RxData); #endif #define MAC_IRQ (1 << 1) #define IRQ_POL_MSK (1 << 4) void IntBridgeGetCpt(struct DibBridgeContext *pContext, uint16_t * Data); /* Bridge 2 Driver message handling function prototype */ void DibB2DFwdMsg(struct DibBridgeContext *pContext, uint32_t Nb, uint16_t * buf); static uint32_t DibBridgeDragonflyReceiveMsg(struct DibBridgeContext *pContext, uint32_t * Data); static DIBDMA DibBridgeDragonflyMsgHandler(struct DibBridgeContext *pContext, struct MsgHeader * pHeader, uint32_t * RxData); static uint32_t DibBridgeDragonflyFormatAddress(struct DibBridgeContext *pContext, uint32_t Addr, uint8_t ByteMode); /** * Retreive the number of free bytes in the HOST mailbox */ static __inline uint32_t IntBridgeDragonflyMailboxSpace(uint32_t rdptr, uint32_t wrptr, uint32_t Size) { uint32_t free; if(rdptr == wrptr) free = Size; else if(rdptr > wrptr) free = (rdptr-wrptr); else free = Size-(wrptr - rdptr); DIB_ASSERT(free >= 4); free -= 4; return free; } /** * Retreive the number of available bytes in the HOST mailbox */ static __inline uint32_t IntBridgeDragonflyMailboxBytes(uint32_t rdptr, uint32_t wrptr, uint32_t Size) { uint32_t nbbytes; if(rdptr == wrptr) nbbytes = 0; else if(wrptr > rdptr) nbbytes = (wrptr-rdptr); else nbbytes = Size-(rdptr - wrptr); return nbbytes; } /**************************************************************************** * Setup chip memory controller ****************************************************************************/ static DIBSTATUS DibBridgeDragonflySetupDma(struct DibBridgeContext *pContext, struct DibBridgeDmaCtx * pDmaCtx) { DIBSTATUS Status = DIBSTATUS_SUCCESS; unsigned char Disable32bitDma = 0; #if (HIGH_SPEED_DMA == 1) /*** Voyager Host issue is supported => Disable 32 bits DMA when the chip used is Voyager ***/ #if (ENG3_COMPATIBILITY == 1) if (pContext->DibChip == DIB_VOYAGER) { Disable32bitDma = 1; } #else /*** Voyager Host issue is not supported => 32 bits DMA can be used ***/ Disable32bitDma = 0; #endif #else /*** HIGH_SPEED_DMA not supported ***/ Disable32bitDma = 1; #endif /*** Use 32 bits transfer. Check alignment ***/ if (Disable32bitDma == 0) { uint32_t NbBytes, j; pDmaCtx->Mode = DIBBRIDGE_BIT_MODE_32; pDmaCtx->DmaSize = pDmaCtx->DmaLen; /*-------------- Software management of alignement issues -----------------------*/ /* particular case where only one 32 bit word is involved */ if((pDmaCtx->ChipAddr & 0xFFFFFFFC) == (((pDmaCtx->ChipAddr+pDmaCtx->DmaSize-1) & 0xFFFFFFFC))) { for(j=0; j<pDmaCtx->DmaSize; j++) { if(pDmaCtx->Dir == DIBBRIDGE_DMA_READ) Status = DibBridgeReadReg8(pContext, pDmaCtx->ChipAddr + j, pDmaCtx->pHostAddr + j); else Status = DibBridgeWriteReg8(pContext, pDmaCtx->ChipAddr + j, pDmaCtx->pHostAddr[j]); if(Status != DIBSTATUS_SUCCESS) { DIB_DEBUG(PORT_ERR, (CRB "DibBridgeDragonflySetupDma Failed Idx%d Nb %d " CRA, j, pDmaCtx->DmaSize)); return Status; } } pDmaCtx->ChipAddr += pDmaCtx->DmaSize; pDmaCtx->pHostAddr += pDmaCtx->DmaSize; pDmaCtx->DmaSize = 0; } /* general case: alignement issues are at the beginning and at the end */ else { /* beginning */ if(pDmaCtx->ChipAddr & 3) { NbBytes = 4 - (pDmaCtx->ChipAddr & 3); for(j=0; j<NbBytes; j++) { if(pDmaCtx->Dir == DIBBRIDGE_DMA_READ) Status = DibBridgeReadReg8(pContext, pDmaCtx->ChipAddr + j, pDmaCtx->pHostAddr + j); else Status = DibBridgeWriteReg8(pContext, pDmaCtx->ChipAddr + j, pDmaCtx->pHostAddr[j]); if(Status != DIBSTATUS_SUCCESS) { DIB_DEBUG(PORT_ERR, (CRB "DibBridgeDragonflySetupDma Failed Idx%d Nb %d " CRA, j, NbBytes)); return Status; } } pDmaCtx->DmaSize -= NbBytes; pDmaCtx->ChipAddr += NbBytes; pDmaCtx->pHostAddr += NbBytes; } /* at the end */ if((pDmaCtx->ChipAddr+pDmaCtx->DmaSize) & 3) { NbBytes = ((pDmaCtx->ChipAddr + pDmaCtx->DmaSize) & 3); /* do not transfert these NbBytes in the main transfert */ pDmaCtx->DmaSize -= NbBytes; for(j=0; j<NbBytes; j++) { if(pDmaCtx->Dir == DIBBRIDGE_DMA_READ) Status = DibBridgeReadReg8(pContext, pDmaCtx->ChipAddr + pDmaCtx->DmaSize + j, pDmaCtx->pHostAddr + pDmaCtx->DmaSize + j); else Status = DibBridgeWriteReg8(pContext, pDmaCtx->ChipAddr + pDmaCtx->DmaSize + j, pDmaCtx->pHostAddr[j + pDmaCtx->DmaSize]); if(Status != DIBSTATUS_SUCCESS) { DIB_DEBUG(PORT_ERR, (CRB "DibBridgeDragonflySetupDma Failed Idx%d Nb %d " CRA, j, NbBytes)); return Status; } } } } } else { /*** Force 8 bits transfers ***/ pDmaCtx->Mode = DIBBRIDGE_BIT_MODE_8; pDmaCtx->DmaSize = pDmaCtx->DmaLen; } /** Compute formatted chip address */ pDmaCtx->FmtChipAddr = DibBridgeDragonflyFormatAddress(pContext, pDmaCtx->ChipAddr, pDmaCtx->Mode); return Status; } /**************************************************************************** * Really start target dma. Swap buffer if really need. * ############ WARNING: host buffer will be modified ####################### ****************************************************************************/ DIBDMA DibBridgeDragonflyRequestDma(struct DibBridgeContext *pContext, struct DibBridgeDmaCtx * pDmaCtx) { DIBDMA rc; rc = DibBridgeTargetRequestDma(pContext, pDmaCtx); return rc; } /** * Sends a message to SPARC * Warning!!!!!! the message MUST be a set of uint32_t or int32_t, and the use of * bit Mask is forbidden cause behave differently on little and big endian arch * @param pContext: bridge context * param Data: aligned 32 bits Data pointer. Reference the whole message * param Size: number of bytes of the message * WARNING: the Data buffer can be swapped by this function and should not be used after this call!!! */ static DIBSTATUS DibBridgeDragonflySendMsg(struct DibBridgeContext *pContext, uint32_t * Data, uint32_t Size) { uint8_t Status = DIBSTATUS_ERROR; int32_t MaxRetries = DIB_BRIDGE_MAX_MAILBOX_TRY; uint32_t Rdptr; uint32_t Wrptr; uint32_t Free; DIB_ASSERT((Size & 3) == 0); DIB_ASSERT((Data)); DIB_ASSERT(Size >= 4); DIB_DEBUG(MAILBOX_LOG, (CRB "+SendMsg() Request=> Msg : %x, Size %d" CRA, *Data, Size)); /* Check if there is space in msgbox */ if((Status = DibBridgeReadReg32(pContext, pContext->DragonflyRegisters.MacMbxWrPtrReg, &Wrptr)) != DIBSTATUS_SUCCESS) goto End; DIB_ASSERT((Wrptr & 3) == 0); /* Ensure we have enought place in the mailbow to post this message */ while(MaxRetries > 0) { /* Get MAC read pointer (implemented as follower) */ if((Status = DibBridgeRead32Reg32(pContext, pContext->DragonflyRegisters.MacMbxRdPtrReg, &Rdptr)) != DIBSTATUS_SUCCESS) goto End; DIB_ASSERT((Rdptr & 3) == 0); DIB_ASSERT((Rdptr >= pContext->DragonflyRegisters.MacMbxStart && Rdptr < pContext->DragonflyRegisters.MacMbxEnd)); /* Do not allow to write last byte, this is to avoid overflow when rd==wr msg box is empty */ Free = IntBridgeDragonflyMailboxSpace(Rdptr, Wrptr, pContext->DragonflyRegisters.MacMbxSize); /* get the number of 32 bits words available */ if(Size > Free) { DibMSleep(1); MaxRetries--; } else { /* break successfully the loop */ MaxRetries=-1; } } if(MaxRetries < 0) { struct MsgHeader MsgIn; DIB_DEBUG(MAILBOX_LOG, (CRB "SendMsg() %d bytes available in msg box." CRA, Free)); SerialBufInit(&pContext->RxSerialBuf, Data, 32); MsgHeaderUnpack(&pContext->RxSerialBuf, &MsgIn); if(Wrptr + Size > pContext->DragonflyRegisters.MacMbxEnd) { uint32_t len; /* Transfer must be done in two step */ len = pContext->DragonflyRegisters.MacMbxEnd - Wrptr; if((Status = DibBridgeWriteBuffer32(pContext, Wrptr, Data, len)) != DIBSTATUS_SUCCESS) goto End; if((Status = DibBridgeWriteBuffer32(pContext, pContext->DragonflyRegisters.MacMbxStart, Data + (len >> 2), Size - len)) != DIBSTATUS_SUCCESS) goto End; Wrptr = pContext->DragonflyRegisters.MacMbxStart + Size - len; } else { /* Transfer can be done in a single step */ if((Status = DibBridgeWriteBuffer32(pContext, Wrptr, Data, Size)) != DIBSTATUS_SUCCESS) goto End; Wrptr += Size; } if(Wrptr == pContext->DragonflyRegisters.MacMbxEnd) Wrptr = pContext->DragonflyRegisters.MacMbxStart; /* Update rd pointer (this trigger an irq in the firmware) */ Status = DibBridgeWriteReg32(pContext, pContext->DragonflyRegisters.MacMbxWrPtrReg, Wrptr); if (MsgIn.MsgId == OUT_MSG_UDIBADAPTER_CFG) DibMSleep(50); } else { DIB_DEBUG(MAILBOX_ERR, (CRB "-SendMsg() Failed Msg box full" CRA)); Status = DIBSTATUS_ERROR; } DIB_DEBUG(MAILBOX_LOG, (CRB "-SendMsg()" CRA)); End: return Status; } /**************************************************************************** * Clear HW interrupt at host interface level ****************************************************************************/ static __inline DIBSTATUS IntBridgeDragonflyClearHostIrq(struct DibBridgeContext *pContext) { DIBSTATUS status = DIBSTATUS_SUCCESS; #if CLEAR_HOST_IRQ_MODE == CLEAR_BY_MESSAGE /* Workaround for concurrent access to apb and demod */ struct MsgHeader MsgOut; DIB_DEBUG(MAILBOX_LOG, (CRB "DibBridgeDragonflySendAck" CRA)); /* Message header */ MsgOut.Type = MSG_TYPE_MAC; MsgOut.MsgId = OUT_MSG_CLEAR_HOST_IRQ; MsgOut.MsgSize = GetWords(MsgHeaderBits, 32); MsgOut.ChipId = MASTER_IDENT; MsgHeaderPackInit(&MsgOut, &pContext->TxSerialBuf); status = DibBridgeDragonflySendMsg(pContext, pContext->TxBuffer, MsgOut.MsgSize * 4); #endif #if CLEAR_HOST_IRQ_MODE == CLEAR_BY_REGISTER uint32_t tmp; status = DibBridgeReadReg32(pContext, REG_HIF_INT_STAT, &tmp); /* Clear HW IRQ */ #endif return status; } /**************************************************************************** * There was an interrupt. Let's check the necessary action ****************************************************************************/ static DIBDMA DibBridgeDragonflyProcessIrq(struct DibBridgeContext *pContext) { DIBDMA DmaStatus = DIB_NO_IRQ; struct MsgHeader MsgIn; uint32_t * RxData; if(pContext->RxCnt == 0) { #if (INTERRUPT_MODE != USE_POLLING) /* clear hardware interrrupt in anycase since we received interrupt */ if(IntBridgeDragonflyClearHostIrq(pContext) != DIBSTATUS_SUCCESS) return DIB_DEV_FAILED; /* Device failed to respond */ #endif DIB_ASSERT(pContext->HostBuffer); pContext->RxOffset = 0; /* Read N messages if possible */ pContext->RxCnt = DibBridgeDragonflyReceiveMsg(pContext, pContext->HostBuffer); } /* Process the N messages */ if(pContext->RxCnt > 0) { RxData = &pContext->HostBuffer[pContext->RxOffset]; SerialBufInit(&pContext->RxSerialBuf, RxData, 32); MsgHeaderUnpack(&pContext->RxSerialBuf, &MsgIn); SerialBufRestart(&pContext->RxSerialBuf); if((pContext->RxCnt < (MsgIn.MsgSize>>2)) || (MsgIn.MsgSize > pContext->DragonflyRegisters.HostMbxSize)) { DIB_DEBUG(MAILBOX_ERR, (CRB "+RecvMsg() => ERROR: MsgSize %d" CRA, MsgIn.MsgSize)); DIB_DEBUG(MAILBOX_ERR,(CRB "NbBytes received %d" CRA,pContext->RxCnt)); pContext->RxCnt = 0; } else { DmaStatus = DibBridgeDragonflyMsgHandler(pContext, &MsgIn, RxData); pContext->RxOffset += MsgIn.MsgSize; pContext->RxCnt -= (MsgIn.MsgSize << 2); } } return DmaStatus; } /** * Reads ONE message from one of the risc * @param pContext: bridge context * @param Data: Buffer owning the message, header included * @param nb_words: number of 32 bit words available in the mailbox * @return number of 32 bit words of the message */ static uint32_t DibBridgeDragonflyReceiveMsg(struct DibBridgeContext *pContext, uint32_t * Data) { uint32_t NbBytes; uint32_t rdptr; uint32_t wrptr; DIBSTATUS Status = DIBSTATUS_SUCCESS; /* Check if there is space in msgbox */ if((Status = DibBridgeReadReg32(pContext, pContext->DragonflyRegisters.HostMbxRdPtrReg, &rdptr) != DIBSTATUS_SUCCESS)) goto End; if((Status = DibBridgeRead32Reg32(pContext, pContext->DragonflyRegisters.HostMbxWrPtrReg, &wrptr) != DIBSTATUS_SUCCESS)) goto End; DIB_ASSERT((wrptr & 3) == 0); NbBytes = IntBridgeDragonflyMailboxBytes(rdptr, wrptr, pContext->DragonflyRegisters.HostMbxSize); DIB_ASSERT((NbBytes & 3) == 0); if(NbBytes > 0) { uint32_t len = 0; DIB_DEBUG(IRQ_LOG, (CRB "IRQ HOST, NbBytes in mailbox = %d" CRA, NbBytes)); if(rdptr + NbBytes > pContext->DragonflyRegisters.HostMbxEnd) { /* The mailbox must be read in two parts */ len = pContext->DragonflyRegisters.HostMbxEnd - rdptr; if((Status = DibBridgeReadBuffer32(pContext, rdptr, Data, len)) != DIBSTATUS_SUCCESS) goto End; if((Status = DibBridgeReadBuffer32(pContext, pContext->DragonflyRegisters.HostMbxStart, Data + (len / 4), NbBytes - len)) != DIBSTATUS_SUCCESS) goto End; } else { /* The mailbox can be read in one pass */ if((Status = DibBridgeReadBuffer32(pContext, rdptr, Data, NbBytes)) != DIBSTATUS_SUCCESS) goto End; } if((Status = DibBridgeWriteReg32(pContext, pContext->DragonflyRegisters.HostMbxRdPtrReg, wrptr) != DIBSTATUS_SUCCESS)) goto End; } End: if(Status == DIBSTATUS_SUCCESS) return NbBytes; else return 0; } /**************************************************************************** * Parses and processes the most prioritary messages, and passes the others * to the upper layer. Returns the DMA state: no DMA. done or pending. ****************************************************************************/ #if (mSDK==0) static DIBDMA DibBridgeDragonflyDataMsgHandler(struct DibBridgeContext * pContext) { struct DibBridgeDmaFlags flags; struct MsgData MsgIn; DIBDMA DmaStat; MsgDataUnpack(&pContext->RxSerialBuf, &MsgIn); /* struct timeval Time; gettimeofday(&Time, NULL); */ flags.Type = MSG_DATA_TYPE(MsgIn.Format); flags.ItemHdl = MSG_DATA_ITEM_INDEX(MsgIn.Format); flags.BlockId = MSG_DATA_BLOCK_ID(MsgIn.Format); flags.BlockType= MSG_DATA_BLOCK_TYPE(MsgIn.Format); flags.FirstFrag= MSG_DATA_FIRST_FRAG(MsgIn.Format); flags.LastFrag = MSG_DATA_LAST_FRAG(MsgIn.Format); flags.NbRows = MSG_DATA_NB_ROWS(MsgIn.Format); flags.FrameId = MSG_DATA_FRAME_ID(MsgIn.Format); if(flags.Type == FORMAT_MPE || flags.Type == FORMAT_LAST_FRG || flags.Type == FORMAT_FLUSH_SVC) { flags.Prefetch = MSG_DATA_BLOCK_TYPE(MsgIn.Format); } else { flags.Prefetch = 0; } DIB_DEBUG(RAWTS_LOG, (CRB "Min %d Max %d Addr %08x Len %d Rows %d FLAGS : s %d t %d b %d ff %d lf %d frm %d fw %08x" CRA, MsgIn.Min, MsgIn.Max, MsgIn.Add, MsgIn.Len, flags.NbRows, flags.ItemHdl, flags.Type, flags.BlockId, flags.FirstFrag, flags.LastFrag, flags.FrameId, MsgIn.Min)); /* DIB_DEBUG(RAWTS_LOG, (CRB "%d : %d IN_MSG_DATA Type %d len %u" CRA, (int)Time.tv_sec, (int)Time.tv_usec, flags.Type, MsgIn.Len)); DIB_DEBUG(RAWTS_ERR, (CRB "%d : %f IN_MSG_DATA Type %d len %u" CRA, (int)Time.tv_sec, (float)((float)Time.tv_usec/1000.0f), flags.Type, MsgIn.Len));*/ /* DIB_DEBUG(RAWTS_LOG, (CRB CRB "" CRA CRA)); DIB_DEBUG(RAWTS_LOG, (CRB "---" CRA));*/ DmaStat = DibBridgeHighDataMsgHandlerCommon(pContext, MsgIn.Min, MsgIn.Max, MsgIn.Add, MsgIn.Len, &flags); if(DmaStat == DIB_NO_DMA) { DIB_DEBUG(MAILBOX_LOG, (CRB "Spec: Received unknown Type for Data message: %d" CRA, flags.Type)); } return DmaStat; /* Tells do we have a pending DMA or not */ } #endif static DIBDMA DibBridgeDragonflyMsgHandler(struct DibBridgeContext *pContext, struct MsgHeader * pHeader, uint32_t * RxData) { if(pHeader->MsgSize > 0) { DIB_DEBUG(MAILBOX_LOG, (CRB "+RecvMsg() => Msg : id %d, Size %d" CRA, pHeader->MsgId, pHeader->MsgSize)); /* ------------------------------------------------------------------------------------ */ /* Now we have one message, let's check the Type of it */ DIB_DEBUG(IRQ_LOG, (CRB "IRQ: MSG %d, Size %d" CRA, pHeader->MsgId, pHeader->MsgSize)); /* ------------------------------------------------------------------------------------ */ /* It can be either Data (0), CPT (1) or a message to passed up (>= 2) */ /* ------------------------------------------------------------------------------------ */ #if (mSDK == 0) if(pHeader->MsgId == IN_MSG_DATA && pHeader->Type == MSG_TYPE_MAC) { /* This is Data message */ return DibBridgeDragonflyDataMsgHandler(pContext); } /* ------------------------------------------------------------------------------------ */ /*This is an Info message */ else if(pHeader->MsgId == IN_MSG_FRAME_INFO && pHeader->Type == MSG_TYPE_MAC) { return DibBridgeDragonflyInfoMsgHandler(pContext, RxData, pHeader->MsgSize); } else if(pHeader->MsgId == IN_MSG_CTL_MONIT && pHeader->Type == MSG_TYPE_MAC) { #if (DIB_CHECK_DATA == 1) /* Clear Bridge checker statistics */ DibBridgeDragonflyClearCheckStats(pContext, RxData); #endif } else { /* flush buffers after item removal */ if(pHeader->MsgId == IN_MSG_ACK_FREE_ITEM && pHeader->Type == MSG_TYPE_MAC) DibBridgeFreeUnusedMpeBuffer(pContext); /* Other: the whole message will be passed up */ DIB_DEBUG(MAILBOX_LOG, (CRB "MSG IN (%d) forwarded " CRA, pHeader->MsgId)); DibB2DFwdMsg(pContext, (pHeader->MsgSize << 2) /*in bytes*/, (uint16_t*)RxData); } #else DIB_DEBUG(MAILBOX_LOG, (CRB "MSG IN (%d) forwarded " CRA, pHeader->MsgId)); DibB2DFwdMsg(pContext, (pHeader->MsgSize << 2) /*in bytes*/, (uint16_t*)RxData); #endif } /* ------------------------------------------------------------------------------------ */ return DIB_NO_DMA; } /**************************************************************************** * Checks message coming from the RISC and acts appropriately ****************************************************************************/ static DIBSTATUS DibBridgeDragonflySendAck(struct DibBridgeContext *pContext, struct DibBridgeDmaFlags *pFlags, uint8_t failed) { struct MsgAckData MsgOut; DIB_DEBUG(MAILBOX_LOG, (CRB "" CRA)); DIB_DEBUG(MAILBOX_LOG, (CRB "DibBridgeDragonflySendAck" CRA)); /* Message header */ MsgOut.Head.Type = MSG_TYPE_MAC; MsgOut.Head.MsgId = (failed > 1) ? 1 : 0; /* DF1 - RESET ; DF0 - ACK */ MsgOut.Head.MsgSize = GetWords(MsgAckDataBits, 32); MsgOut.Head.ChipId = MASTER_IDENT; MsgOut.Status = failed; /*pContext->FecOffset is not used for dragonfly based chipset */ MsgOut.Format = SET_DATA_FORMAT(pFlags->ItemHdl, pFlags->Type, pFlags->FirstFrag, pFlags->LastFrag, pFlags->NbRows, pFlags->BlockType, pFlags->BlockId, pFlags->FrameId); MsgAckDataPackInit(&MsgOut, &pContext->TxSerialBuf); return DibBridgeDragonflySendMsg(pContext, pContext->TxBuffer, MsgOut.Head.MsgSize * 4); } /****************************************************************************** * Dma if finished, acknowledge the firmware and do the job ******************************************************************************/ #if (mSDK == 0) static DIBSTATUS DibBridgeDragonflyProcessDma(struct DibBridgeContext *pContext, struct DibBridgeDmaCtx * pDmaCtx) { DIBSTATUS ret = DIBSTATUS_ERROR; /* every Data message need to be acknowledged */ ret = DibBridgeDragonflySendAck(pContext, &pDmaCtx->DmaFlags, 0); if(ret == DIBSTATUS_SUCCESS) { /* process dma independantly of the architecture */ ret = DibBridgeProcessDmaCommon(pContext, pDmaCtx); } return ret; } #endif /****************************************************************************** * 32 bit address formating for all dragonfly based chipsets ******************************************************************************/ static uint32_t DibBridgeDragonflyFormatAddress(struct DibBridgeContext *pContext, uint32_t Addr, uint8_t ByteMode) { switch(pContext->HostIfMode) { case eSRAM: return DF_ADDR_TO_SRAM(Addr, ByteMode, 1, 0); case eSDIO: return DF_ADDR_TO_SDIO(Addr, 1); case eSPI: return DF_ADDR_TO_SPI(Addr, ByteMode, 1); default: return Addr; } } /****************************************************************************** * 16 bit access to non demod apb address is not working on voyager chipset ******************************************************************************/ static DIBSTATUS IntBridgeVoyagerWrite16Even(struct DibBridgeContext *pContext, uint32_t Address, uint8_t *b, uint32_t len) { uint8_t wa[4] = { 0 }; uint32_t i, FormattedAddr; DIBSTATUS ret = DIBSTATUS_SUCCESS; for (i = 0; i < len; i += 2) { FormattedAddr = DibBridgeDragonflyFormatAddress(pContext, Address + i, DIBBRIDGE_BIT_MODE_32); wa[0] = b[i]; wa[1] = b[i + 1]; if((ret = DibBridgeTargetWrite(pContext, FormattedAddr, DIBBRIDGE_BIT_MODE_32, 4, wa) != DIBSTATUS_SUCCESS)) break; } return ret; } static DIBSTATUS IntBridgeVoyagerRead16Even(struct DibBridgeContext *pContext, uint32_t Address, uint8_t *b, uint32_t len) { uint8_t wa[4] = { 0 }; uint32_t i, FormattedAddress; DIBSTATUS ret = DIBSTATUS_SUCCESS; for (i = 0; i < len; i += 2) { FormattedAddress = DibBridgeDragonflyFormatAddress(pContext, Address + i, DIBBRIDGE_BIT_MODE_32); if((ret = DibBridgeTargetRead(pContext, FormattedAddress, DIBBRIDGE_BIT_MODE_32, 4, wa) != DIBSTATUS_SUCCESS)) break; b[i] = wa[0]; b[i+1] = wa[1]; } return ret; } static DIBSTATUS DibBridgeVoyager1PreFormat(struct DibBridgeContext *pContext, uint8_t ByteMode, uint32_t * Addr, uint8_t IsWriteAccess, uint8_t * Buf, uint32_t Cnt) { if(DIB29000_APB_EVEN_ADDR(*Addr, ByteMode)) { if(IsWriteAccess) return IntBridgeVoyagerWrite16Even(pContext, *Addr, Buf, Cnt); else return IntBridgeVoyagerRead16Even(pContext, *Addr, Buf, Cnt); } /* address formating */ *Addr = DibBridgeDragonflyFormatAddress(pContext, *Addr, ByteMode); return DIBSTATUS_CONTINUE; } static DIBSTATUS DibBridgeVoyager1PostFormat(struct DibBridgeContext *pContext, uint8_t ByteMode, uint32_t * Addr, uint8_t IsWriteAccess, uint8_t * Buf, uint32_t Cnt) { return DIBSTATUS_SUCCESS; } static DIBSTATUS DibBridgeNautilus1PreFormat(struct DibBridgeContext *pContext, uint8_t ByteMode, uint32_t * Addr, uint8_t IsWriteAccess, uint8_t * Buf, uint32_t Cnt) { /* address formating */ *Addr = DibBridgeDragonflyFormatAddress(pContext, *Addr, ByteMode); return DIBSTATUS_CONTINUE; } static DIBSTATUS DibBridgeNautilus1PostFormat(struct DibBridgeContext *pContext, uint8_t ByteMode, uint32_t * Addr, uint8_t IsWriteAccess, uint8_t * Buf, uint32_t Cnt) { return DIBSTATUS_SUCCESS; } static uint32_t DibBridgeDragonflyIncrementFormattedAddress(struct DibBridgeContext *pContext, uint32_t InFmtAddr, int32_t Offset) { uint32_t OutFmtAddr = 0, ByteMode, Addr; switch(pContext->HostIfMode) { case eSRAM: Addr = DF_SRAM_TO_ADDR(InFmtAddr); Addr += Offset; ByteMode = (InFmtAddr & 0x06000000) >> 25; OutFmtAddr = DF_ADDR_TO_SRAM(Addr, ByteMode, 1, 0); break; case eSDIO: Addr = DF_SDIO_TO_ADDR(InFmtAddr); Addr += Offset; OutFmtAddr = DF_ADDR_TO_SDIO(Addr, 1); break; case eI2C: OutFmtAddr = InFmtAddr+Offset; break; case eSPI: Addr = DF_SPI_TO_ADDR(InFmtAddr); Addr += Offset; ByteMode = (InFmtAddr & 0x30000000) >> 28; OutFmtAddr = DF_ADDR_TO_SPI(Addr, ByteMode, 1); break; default: break; } return OutFmtAddr; } /****************************************************************************** * Assemble the slice from MpeBufCor to SliceBuf, and set SkipR and SKipC if not already set ******************************************************************************/ /* void DisplaySliceBuf(uint8_t *pSliceBuf, uint32_t NbRows, uint32_t NbCols) { uint32_t i,j; printf(CRB "" CRA); for(i=0; i<NbRows; i++) { for(j=0; j<NbCols; j++) { printf("%02x ",pSliceBuf[i+j*NbRows]); } printf(CRB "" CRA); } printf(CRB "" CRA); } */ /****************************************************************************** * return the bus architecture (32, 16 or 8 bits) ******************************************************************************/ static uint8_t DibBridgeDragonflyGetArchi(struct DibBridgeContext *pContext) { return DIBBRIDGE_BIT_MODE_32; } /****************************************************************************** * clean checker statistics ******************************************************************************/ #if (DIB_CHECK_DATA == 1) static void DibBridgeDragonflyClearCheckStats(struct DibBridgeContext *pContext, uint32_t * RxData) { enum DibDataType FilterType; struct MsgCtrlMonit Msg; ELEM_HDL ItemHdl; FILTER_HDL FilterHdl; MsgCtrlMonitUnpack(&pContext->RxSerialBuf, &Msg); ItemHdl = Msg.ItemId; /* When ClearMonit message, clear Bridge monitoring info */ if(Msg.Cmd == 1) { FilterHdl = pContext->ItSvc[ItemHdl].FilterParent; FilterType = pContext->FilterInfo[ItemHdl].DataType; /* DVB-H: Clear IP and RTP checker data */ if((FilterType == eMPEFEC) || (FilterType == eMPEIFEC)) { pContext->ItSvc[ItemHdl].CcFailCnt = 0; pContext->ItSvc[ItemHdl].ErrCnt = 0; pContext->ItSvc[ItemHdl].CurCc = 0xffff; } if((FilterType == eDAB)) { pContext->ItSvc[ItemHdl].CcFailCnt = 0; pContext->ItSvc[ItemHdl].ErrCnt = 0; pContext->ItSvc[ItemHdl].CurCc = 0; pContext->ItSvc[ItemHdl].DataLenRx = 0; pContext->ItSvc[ItemHdl].NbMaxFrames = 0; } #if DIB_CHECK_CMMB_DATA == 1 else if(FilterType == eCMMBSVC) { pContext->ItSvc[ItemHdl].CcFailCnt = 0; pContext->ItSvc[ItemHdl].ErrCnt = 0; pContext->ItSvc[ItemHdl].CurCc = 0xffff; } #endif #if DIB_CHECK_RAWTS_DATA == 1 /* DVB-T: Clear RAWTS checker data */ else if (FilterType == eTS) { DibSetMemory(&pContext->FilterInfo[FilterHdl].CheckRawTs, 0, sizeof(struct CheckRawTs)); } #endif DIB_DEBUG(MAILBOX_LOG, (CRB "Clear checker stats for Item %d" CRA, ItemHdl)); } } /** Build a message for driver to summarize ip checking */ /* XXX this should not go in the official release */ static void DibBridgeDragonflyForwardCheckStats(struct DibBridgeContext *pContext, ELEM_HDL Item) { struct MsgChecker Msg; FILTER_HDL Filter = pContext->ItSvc[Item].FilterParent; /* Message header */ Msg.Head.Type = MSG_TYPE_MAC; Msg.Head.MsgId = IN_MSG_CHECKER; Msg.Head.MsgSize = GetWords(MsgCheckerBits, 32); Msg.Head.ChipId = HOST_IDENT; Msg.ItemId = Item; #if DIB_CHECK_RAWTS_DATA == 1 if((pContext->FilterInfo[Filter].DataType == eTS) || (pContext->FilterInfo[Filter].DataType == eTDMB)) { Msg.Total = pContext->FilterInfo[Filter].CheckRawTs.TotalNbPackets; Msg.CcFailCnt = pContext->FilterInfo[Filter].CheckRawTs.DiscontinuitiesCount; Msg.ErrCnt = pContext->FilterInfo[Filter].CheckRawTs.CorruptedPacketsCount; } else #endif #if DIB_CHECK_CMMB_DATA == 1 if(pContext->FilterInfo[Filter].DataType == eCMMBSVC) { Msg.CcFailCnt = pContext->ItSvc[Item].CcFailCnt; Msg.ErrCnt = pContext->ItSvc[Item].ErrCnt; } else #endif #if DIB_CHECK_MSC_DATA == 1 if(pContext->FilterInfo[Filter].DataType == eDAB) { #if DIB_DAB_DATA == 1 Msg.Total = pContext->ItSvc[Item].NbMaxFrames; #endif Msg.CcFailCnt = pContext->ItSvc[Item].CcFailCnt; Msg.ErrCnt = pContext->ItSvc[Item].ErrCnt; } else #endif if ((pContext->FilterInfo[Filter].DataType == eMPEFEC) || (pContext->FilterInfo[Filter].DataType == eMPEIFEC)) { Msg.CcFailCnt = pContext->ItSvc[Item].CcFailCnt; Msg.ErrCnt = pContext->ItSvc[Item].ErrCnt; } else { DIB_DEBUG(MAILBOX_ERR, (CRB "DibBridgeDragonflyForwardCheckStats : unsupported data type" CRA)); return; } MsgCheckerPackInit(&Msg, &pContext->TxSerialBuf); DibB2DFwdMsg(pContext, Msg.Head.MsgSize * 4, (uint16_t *)pContext->TxBuffer); } #endif /** * associate svc to item. Nothing to do cause we have no idea of what is a svc * @param pContext pointer to the bridge context * @param svc firefly's service (only useful in firefly's case) * @param item item's number concerned */ static void DibBridgeDragonflySetService(struct DibBridgeContext *pContext, uint8_t Svc, ELEM_HDL ItemHdl, FILTER_HDL FilterHdl, enum DibDataType DataType, enum DibDataMode DataMode) { } /** * Indicate to the firmware that buffer reception is aborted due to buffer overflow or memory consideration * @param pContext: bridge context * @param SvcNb: service number that failed */ static DIBSTATUS DibBridgeDragonflySignalBufFail(struct DibBridgeContext *pContext, struct DibBridgeDmaFlags * pFlags, uint8_t Flush) { return DibBridgeDragonflySendAck(pContext, pFlags, (1+Flush)); } #if (DIB_BRIDGE_HBM_PROFILER == 1) /** * Send profiler info to the SPARC */ static DIBSTATUS DibBridgeDragonflyHbmProfiler(struct DibBridgeContext *pContext, uint8_t idx, uint8_t page, uint8_t LastFrag) { return DIBSTATUS_ERROR; } #endif /****************************************************************************** * Configure or reconfigure SDIO endianess ******************************************************************************/ void DibBridgeDragonflyConfigureSdioEndianness(struct DibBridgeContext *pContext) { uint32_t Jedec32; uint16_t Jedec16; uint32_t InvJedec; InvJedec = pContext->DragonflyRegisters.JedecValue; DibBridgeSwap32((uint8_t*)&InvJedec, 4); /* toggle sdio endianess if default configuration is not good */ DibBridgeReadReg32(pContext, pContext->DragonflyRegisters.JedecAddr, &Jedec32); DibBridgeReadReg16(pContext, pContext->DragonflyRegisters.JedecAddr, &Jedec16); if((Jedec32 == InvJedec) || (Jedec16 == InvJedec >> 16)) { DibBridgeWriteReg32(pContext, REG_HIF_SDIO_IRQ_EN, 0x0A000000); } } /****************************************************************************** * Init nautilus software specific ******************************************************************************/ DIBSTATUS DibBridgeDragonflyChipsetInit(struct DibBridgeContext *pContext) { pContext->RxCnt = 0; pContext->RxOffset = 0; if((pContext->HostBuffer = (uint32_t *)DibBridgeTargetAllocBuf(pContext->DragonflyRegisters.HostMbxSize)) == 0) { return DIBSTATUS_ERROR; } DibBridgeDragonflyConfigureSdioEndianness(pContext); /* printf("DibBridgeDragonflyChipsetInit:\n"); printf(" JedecAddr = %08x\n", pContext->DragonflyRegisters.JedecAddr ); printf(" JedecValue = %08x\n", pContext->DragonflyRegisters.JedecValue ); printf(" MAC_MBOX_SIZE = %d\n" , pContext->DragonflyRegisters.MacMbxSize ); printf(" MAC_MBOX_END = %08x\n", pContext->DragonflyRegisters.MacMbxEnd ); printf(" MAC_MBOX_START = %08x\n", pContext->DragonflyRegisters.MacMbxStart ); printf(" HOST_MBOX_SIZE = %d\n" , pContext->DragonflyRegisters.HostMbxSize ); printf(" HOST_MBOX_END = %08x\n", pContext->DragonflyRegisters.HostMbxEnd ); printf(" HOST_MBOX_START = %08x\n", pContext->DragonflyRegisters.HostMbxStart ); printf(" HOST_MBOX_RD_PTR = %08x\n", pContext->DragonflyRegisters.HostMbxRdPtrReg); printf(" HOST_MBOX_WR_PTR = %08x\n", pContext->DragonflyRegisters.HostMbxWrPtrReg); printf(" MAC_MBOX_RD_PTR = %08x\n", pContext->DragonflyRegisters.MacMbxRdPtrReg ); printf(" MAC_MBOX_WR_PTR = %08x\n", pContext->DragonflyRegisters.MacMbxWrPtrReg ); */ return DIBSTATUS_SUCCESS; } /****************************************************************************** * Deinit dragonfly and voyager software specific ******************************************************************************/ void DibBridgeDragonflyChipsetDeinit(struct DibBridgeContext *pContext) { DibBridgeTargetFreeBuf((uint8_t *)pContext->HostBuffer, pContext->DragonflyRegisters.HostMbxSize); } /****************************************************************************** * Specific output message formating for dragonfly ******************************************************************************/ void DibBridgeDragonflyRegisterIf(struct DibBridgeContext *pContext, uint32_t * Config) { /* upack dragonflu based register config */ pContext->DragonflyRegisters.JedecAddr = Config[0]; pContext->DragonflyRegisters.JedecValue = Config[1]; pContext->DragonflyRegisters.MacMbxSize = Config[2]; pContext->DragonflyRegisters.MacMbxStart = Config[3]; pContext->DragonflyRegisters.MacMbxEnd = Config[4]; pContext->DragonflyRegisters.HostMbxSize = Config[5]; pContext->DragonflyRegisters.HostMbxStart = Config[6]; pContext->DragonflyRegisters.HostMbxEnd = Config[7]; pContext->DragonflyRegisters.HostMbxRdPtrReg = Config[8]; pContext->DragonflyRegisters.HostMbxWrPtrReg = Config[9]; pContext->DragonflyRegisters.MacMbxRdPtrReg = Config[10]; pContext->DragonflyRegisters.MacMbxWrPtrReg = Config[11]; /* specific architecture functions */ switch(pContext->DibChip) { case DIB_VOYAGER: pContext->BridgeChipOps.PreFormat = DibBridgeVoyager1PreFormat; pContext->BridgeChipOps.PostFormat = DibBridgeVoyager1PostFormat; #if ((DIB_BRIDGE_TESTIF_PREINIT == 1) || (DIB_BRIDGE_TESTIF_POSTINIT == 1)) pContext->BridgeChipOps.TestRegister = IntBridgeVoyager1TestRegister; pContext->BridgeChipOps.TestExternalRam = IntBridgeVoyager1TestExternalRam; #endif break; case DIB_NAUTILUS: pContext->BridgeChipOps.PreFormat = DibBridgeNautilus1PreFormat; pContext->BridgeChipOps.PostFormat = DibBridgeNautilus1PostFormat; #if ((DIB_BRIDGE_TESTIF_PREINIT == 1) || (DIB_BRIDGE_TESTIF_POSTINIT == 1)) pContext->BridgeChipOps.TestRegister = IntBridgeNautilus1TestRegister; pContext->BridgeChipOps.TestExternalRam = IntBridgeNautilus1TestExternalRam; #endif break; default: break; } pContext->BridgeChipOps.SendMsg = DibBridgeDragonflySendMsg; pContext->BridgeChipOps.AssembleSlice = DibBridgeDragonflyAssembleSlice; pContext->BridgeChipOps.SendAck = DibBridgeDragonflySendAck; pContext->BridgeChipOps.ProcessIrq = DibBridgeDragonflyProcessIrq; #if (mSDK == 0) pContext->BridgeChipOps.ProcessDma = DibBridgeDragonflyProcessDma; #else pContext->BridgeChipOps.ProcessDma = NULL; #endif pContext->BridgeChipOps.SetupDma = DibBridgeDragonflySetupDma; pContext->BridgeChipOps.RequestDma = DibBridgeDragonflyRequestDma; pContext->BridgeChipOps.GetArch = DibBridgeDragonflyGetArchi; pContext->BridgeChipOps.IncrementFormattedAddress = DibBridgeDragonflyIncrementFormattedAddress; pContext->BridgeChipOps.SignalBufFail = DibBridgeDragonflySignalBufFail; pContext->BridgeChipOps.ChipsetInit = DibBridgeDragonflyChipsetInit; pContext->BridgeChipOps.ChipsetDeinit = DibBridgeDragonflyChipsetDeinit; #if (DIB_BRIDGE_HBM_PROFILER == 1) pContext->BridgeChipOps.HbmProfiler = DibBridgeDragonflyHbmProfiler; #endif #if (DIB_CHECK_DATA == 1) pContext->BridgeChipOps.ClearCheckStats = DibBridgeDragonflyClearCheckStats; pContext->BridgeChipOps.ForwardCheckStats = DibBridgeDragonflyForwardCheckStats; #endif #if ((DIB_BRIDGE_TESTIF_PREINIT == 1) || (DIB_BRIDGE_TESTIF_POSTINIT == 1)) pContext->BridgeChipOps.TestBasicRead = IntBridgeDragonflyTestBasicRead; pContext->BridgeChipOps.TestInternalRam = IntBridgeDragonflyTestInternalRam; pContext->BridgeChipOps.GetRamAddr = DibBridgeDragonflyGetRamAddr; #endif pContext->BridgeChipOps.SetService = DibBridgeDragonflySetService; } #endif /* USE_DRAGONFLY */
fefifofum/android_kernel_bq_maxwell2plus_3.0.8
drivers/media/dvb/rkdtv/DIBCom1009XH/Bridge/Common/Dragonfly/DibBridgeDragonfly.c
C
gpl-2.0
39,664
/* mxb - v4l2 driver for the Multimedia eXtension Board Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> Visit http://www.mihu.de/linux/saa7146/mxb/ for further details about this card. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define DEBUG_VARIABLE debug #include <media/saa7146_vv.h> #include <media/tuner.h> #include <linux/video_decoder.h> #include "mxb.h" #include "tea6415c.h" #include "tea6420.h" #include "tda9840.h" #define I2C_SAA7111 0x24 #define MXB_BOARD_CAN_DO_VBI(dev) (dev->revision != 0) /* global variable */ static int mxb_num = 0; /* initial frequence the tuner will be tuned to. in verden (lower saxony, germany) 4148 is a channel called "phoenix" */ static int freq = 4148; module_param(freq, int, 0644); MODULE_PARM_DESC(freq, "initial frequency the tuner will be tuned to while setup"); static int debug = 0; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off)."); #define MXB_INPUTS 4 enum { TUNER, AUX1, AUX3, AUX3_YC }; static struct v4l2_input mxb_inputs[MXB_INPUTS] = { { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, }; /* this array holds the information, which port of the saa7146 each input actually uses. the mxb uses port 0 for every input */ static struct { int hps_source; int hps_sync; } input_port_selection[MXB_INPUTS] = { { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, }; /* this array holds the information of the audio source (mxb_audios), which has to be switched corresponding to the video source (mxb_channels) */ static int video_audio_connect[MXB_INPUTS] = { 0, 1, 3, 3 }; /* these are the necessary input-output-pins for bringing one audio source (see above) to the CD-output */ static struct tea6420_multiplex TEA6420_cd[MXB_AUDIOS+1][2] = { {{1,1,0},{1,1,0}}, /* Tuner */ {{5,1,0},{6,1,0}}, /* AUX 1 */ {{4,1,0},{6,1,0}}, /* AUX 2 */ {{3,1,0},{6,1,0}}, /* AUX 3 */ {{1,1,0},{3,1,0}}, /* Radio */ {{1,1,0},{2,1,0}}, /* CD-Rom */ {{6,1,0},{6,1,0}} /* Mute */ }; /* these are the necessary input-output-pins for bringing one audio source (see above) to the line-output */ static struct tea6420_multiplex TEA6420_line[MXB_AUDIOS+1][2] = { {{2,3,0},{1,2,0}}, {{5,3,0},{6,2,0}}, {{4,3,0},{6,2,0}}, {{3,3,0},{6,2,0}}, {{2,3,0},{3,2,0}}, {{2,3,0},{2,2,0}}, {{6,3,0},{6,2,0}} /* Mute */ }; #define MAXCONTROLS 1 static struct v4l2_queryctrl mxb_controls[] = { { V4L2_CID_AUDIO_MUTE, V4L2_CTRL_TYPE_BOOLEAN, "Mute", 0, 1, 1, 0, 0 }, }; static struct saa7146_extension_ioctls ioctls[] = { { VIDIOC_ENUMINPUT, SAA7146_EXCLUSIVE }, { VIDIOC_G_INPUT, SAA7146_EXCLUSIVE }, { VIDIOC_S_INPUT, SAA7146_EXCLUSIVE }, { VIDIOC_QUERYCTRL, SAA7146_BEFORE }, { VIDIOC_G_CTRL, SAA7146_BEFORE }, { VIDIOC_S_CTRL, SAA7146_BEFORE }, { VIDIOC_G_TUNER, SAA7146_EXCLUSIVE }, { VIDIOC_S_TUNER, SAA7146_EXCLUSIVE }, { VIDIOC_G_FREQUENCY, SAA7146_EXCLUSIVE }, { VIDIOC_S_FREQUENCY, SAA7146_EXCLUSIVE }, { VIDIOC_G_AUDIO, SAA7146_EXCLUSIVE }, { VIDIOC_S_AUDIO, SAA7146_EXCLUSIVE }, { MXB_S_AUDIO_CD, SAA7146_EXCLUSIVE }, /* custom control */ { MXB_S_AUDIO_LINE, SAA7146_EXCLUSIVE }, /* custom control */ { 0, 0 } }; struct mxb { struct video_device *video_dev; struct video_device *vbi_dev; struct i2c_adapter i2c_adapter; struct i2c_client* saa7111a; struct i2c_client* tda9840; struct i2c_client* tea6415c; struct i2c_client* tuner; struct i2c_client* tea6420_1; struct i2c_client* tea6420_2; int cur_mode; /* current audio mode (mono, stereo, ...) */ int cur_input; /* current input */ int cur_mute; /* current mute status */ struct v4l2_frequency cur_freq; /* current frequency the tuner is tuned to */ }; static struct saa7146_extension extension; static int mxb_probe(struct saa7146_dev* dev) { struct mxb* mxb = NULL; struct i2c_client *client; struct list_head *item; int result; if ((result = request_module("saa7111")) < 0) { printk("mxb: saa7111 i2c module not available.\n"); return -ENODEV; } if ((result = request_module("tuner")) < 0) { printk("mxb: tuner i2c module not available.\n"); return -ENODEV; } if ((result = request_module("tea6420")) < 0) { printk("mxb: tea6420 i2c module not available.\n"); return -ENODEV; } if ((result = request_module("tea6415c")) < 0) { printk("mxb: tea6415c i2c module not available.\n"); return -ENODEV; } if ((result = request_module("tda9840")) < 0) { printk("mxb: tda9840 i2c module not available.\n"); return -ENODEV; } mxb = (struct mxb*)kmalloc(sizeof(struct mxb), GFP_KERNEL); if( NULL == mxb ) { DEB_D(("not enough kernel memory.\n")); return -ENOMEM; } memset(mxb, 0x0, sizeof(struct mxb)); mxb->i2c_adapter = (struct i2c_adapter) { .class = I2C_CLASS_TV_ANALOG, .name = "mxb", }; saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if(i2c_add_adapter(&mxb->i2c_adapter) < 0) { DEB_S(("cannot register i2c-device. skipping.\n")); kfree(mxb); return -EFAULT; } /* loop through all i2c-devices on the bus and look who is there */ list_for_each(item,&mxb->i2c_adapter.clients) { client = list_entry(item, struct i2c_client, list); if( I2C_TEA6420_1 == client->addr ) mxb->tea6420_1 = client; if( I2C_TEA6420_2 == client->addr ) mxb->tea6420_2 = client; if( I2C_TEA6415C_2 == client->addr ) mxb->tea6415c = client; if( I2C_TDA9840 == client->addr ) mxb->tda9840 = client; if( I2C_SAA7111 == client->addr ) mxb->saa7111a = client; if( 0x60 == client->addr ) mxb->tuner = client; } /* check if all devices are present */ if( 0 == mxb->tea6420_1 || 0 == mxb->tea6420_2 || 0 == mxb->tea6415c || 0 == mxb->tda9840 || 0 == mxb->saa7111a || 0 == mxb->tuner ) { printk("mxb: did not find all i2c devices. aborting\n"); i2c_del_adapter(&mxb->i2c_adapter); kfree(mxb); return -ENODEV; } /* all devices are present, probe was successful */ /* we store the pointer in our private data field */ dev->ext_priv = mxb; return 0; } /* some init data for the saa7740, the so-called 'sound arena module'. there are no specs available, so we simply use some init values */ static struct { int length; char data[9]; } mxb_saa7740_init[] = { { 3, { 0x80, 0x00, 0x00 } },{ 3, { 0x80, 0x89, 0x00 } }, { 3, { 0x80, 0xb0, 0x0a } },{ 3, { 0x00, 0x00, 0x00 } }, { 3, { 0x49, 0x00, 0x00 } },{ 3, { 0x4a, 0x00, 0x00 } }, { 3, { 0x4b, 0x00, 0x00 } },{ 3, { 0x4c, 0x00, 0x00 } }, { 3, { 0x4d, 0x00, 0x00 } },{ 3, { 0x4e, 0x00, 0x00 } }, { 3, { 0x4f, 0x00, 0x00 } },{ 3, { 0x50, 0x00, 0x00 } }, { 3, { 0x51, 0x00, 0x00 } },{ 3, { 0x52, 0x00, 0x00 } }, { 3, { 0x53, 0x00, 0x00 } },{ 3, { 0x54, 0x00, 0x00 } }, { 3, { 0x55, 0x00, 0x00 } },{ 3, { 0x56, 0x00, 0x00 } }, { 3, { 0x57, 0x00, 0x00 } },{ 3, { 0x58, 0x00, 0x00 } }, { 3, { 0x59, 0x00, 0x00 } },{ 3, { 0x5a, 0x00, 0x00 } }, { 3, { 0x5b, 0x00, 0x00 } },{ 3, { 0x5c, 0x00, 0x00 } }, { 3, { 0x5d, 0x00, 0x00 } },{ 3, { 0x5e, 0x00, 0x00 } }, { 3, { 0x5f, 0x00, 0x00 } },{ 3, { 0x60, 0x00, 0x00 } }, { 3, { 0x61, 0x00, 0x00 } },{ 3, { 0x62, 0x00, 0x00 } }, { 3, { 0x63, 0x00, 0x00 } },{ 3, { 0x64, 0x00, 0x00 } }, { 3, { 0x65, 0x00, 0x00 } },{ 3, { 0x66, 0x00, 0x00 } }, { 3, { 0x67, 0x00, 0x00 } },{ 3, { 0x68, 0x00, 0x00 } }, { 3, { 0x69, 0x00, 0x00 } },{ 3, { 0x6a, 0x00, 0x00 } }, { 3, { 0x6b, 0x00, 0x00 } },{ 3, { 0x6c, 0x00, 0x00 } }, { 3, { 0x6d, 0x00, 0x00 } },{ 3, { 0x6e, 0x00, 0x00 } }, { 3, { 0x6f, 0x00, 0x00 } },{ 3, { 0x70, 0x00, 0x00 } }, { 3, { 0x71, 0x00, 0x00 } },{ 3, { 0x72, 0x00, 0x00 } }, { 3, { 0x73, 0x00, 0x00 } },{ 3, { 0x74, 0x00, 0x00 } }, { 3, { 0x75, 0x00, 0x00 } },{ 3, { 0x76, 0x00, 0x00 } }, { 3, { 0x77, 0x00, 0x00 } },{ 3, { 0x41, 0x00, 0x42 } }, { 3, { 0x42, 0x10, 0x42 } },{ 3, { 0x43, 0x20, 0x42 } }, { 3, { 0x44, 0x30, 0x42 } },{ 3, { 0x45, 0x00, 0x01 } }, { 3, { 0x46, 0x00, 0x01 } },{ 3, { 0x47, 0x00, 0x01 } }, { 3, { 0x48, 0x00, 0x01 } }, { 9, { 0x01, 0x03, 0xc5, 0x5c, 0x7a, 0x85, 0x01, 0x00, 0x54 } }, { 9, { 0x21, 0x03, 0xc5, 0x5c, 0x7a, 0x85, 0x01, 0x00, 0x54 } }, { 9, { 0x09, 0x0b, 0xb4, 0x6b, 0x74, 0x85, 0x95, 0x00, 0x34 } }, { 9, { 0x29, 0x0b, 0xb4, 0x6b, 0x74, 0x85, 0x95, 0x00, 0x34 } }, { 9, { 0x11, 0x17, 0x43, 0x62, 0x68, 0x89, 0xd1, 0xff, 0xb0 } }, { 9, { 0x31, 0x17, 0x43, 0x62, 0x68, 0x89, 0xd1, 0xff, 0xb0 } }, { 9, { 0x19, 0x20, 0x62, 0x51, 0x5a, 0x95, 0x19, 0x01, 0x50 } }, { 9, { 0x39, 0x20, 0x62, 0x51, 0x5a, 0x95, 0x19, 0x01, 0x50 } }, { 9, { 0x05, 0x3e, 0xd2, 0x69, 0x4e, 0x9a, 0x51, 0x00, 0xf0 } }, { 9, { 0x25, 0x3e, 0xd2, 0x69, 0x4e, 0x9a, 0x51, 0x00, 0xf0 } }, { 9, { 0x0d, 0x3d, 0xa1, 0x40, 0x7d, 0x9f, 0x29, 0xfe, 0x14 } }, { 9, { 0x2d, 0x3d, 0xa1, 0x40, 0x7d, 0x9f, 0x29, 0xfe, 0x14 } }, { 9, { 0x15, 0x73, 0xa1, 0x50, 0x5d, 0xa6, 0xf5, 0xfe, 0x38 } }, { 9, { 0x35, 0x73, 0xa1, 0x50, 0x5d, 0xa6, 0xf5, 0xfe, 0x38 } }, { 9, { 0x1d, 0xed, 0xd0, 0x68, 0x29, 0xb4, 0xe1, 0x00, 0xb8 } }, { 9, { 0x3d, 0xed, 0xd0, 0x68, 0x29, 0xb4, 0xe1, 0x00, 0xb8 } }, { 3, { 0x80, 0xb3, 0x0a } }, {-1, { 0} } }; static const unsigned char mxb_saa7111_init[] = { 0x00, 0x00, /* 00 - ID byte */ 0x01, 0x00, /* 01 - reserved */ /*front end */ 0x02, 0xd8, /* 02 - FUSE=x, GUDL=x, MODE=x */ 0x03, 0x23, /* 03 - HLNRS=0, VBSL=1, WPOFF=0, HOLDG=0, GAFIX=0, GAI1=256, GAI2=256 */ 0x04, 0x00, /* 04 - GAI1=256 */ 0x05, 0x00, /* 05 - GAI2=256 */ /* decoder */ 0x06, 0xf0, /* 06 - HSB at xx(50Hz) / xx(60Hz) pixels after end of last line */ 0x07, 0x30, /* 07 - HSS at xx(50Hz) / xx(60Hz) pixels after end of last line */ 0x08, 0xa8, /* 08 - AUFD=x, FSEL=x, EXFIL=x, VTRC=x, HPLL=x, VNOI=x */ 0x09, 0x02, /* 09 - BYPS=x, PREF=x, BPSS=x, VBLB=x, UPTCV=x, APER=x */ 0x0a, 0x80, /* 0a - BRIG=128 */ 0x0b, 0x47, /* 0b - CONT=1.109 */ 0x0c, 0x40, /* 0c - SATN=1.0 */ 0x0d, 0x00, /* 0d - HUE=0 */ 0x0e, 0x01, /* 0e - CDTO=0, CSTD=0, DCCF=0, FCTC=0, CHBW=1 */ 0x0f, 0x00, /* 0f - reserved */ 0x10, 0xd0, /* 10 - OFTS=x, HDEL=x, VRLN=x, YDEL=x */ 0x11, 0x8c, /* 11 - GPSW=x, CM99=x, FECO=x, COMPO=x, OEYC=1, OEHV=1, VIPB=0, COLO=0 */ 0x12, 0x80, /* 12 - xx output control 2 */ 0x13, 0x30, /* 13 - xx output control 3 */ 0x14, 0x00, /* 14 - reserved */ 0x15, 0x15, /* 15 - VBI */ 0x16, 0x04, /* 16 - VBI */ 0x17, 0x00, /* 17 - VBI */ }; /* bring hardware to a sane state. this has to be done, just in case someone wants to capture from this device before it has been properly initialized. the capture engine would badly fail, because no valid signal arrives on the saa7146, thus leading to timeouts and stuff. */ static int mxb_init_done(struct saa7146_dev* dev) { struct mxb* mxb = (struct mxb*)dev->ext_priv; struct video_decoder_init init; struct i2c_msg msg; struct tuner_setup tun_setup; int i = 0, err = 0; struct tea6415c_multiplex vm; /* select video mode in saa7111a */ i = VIDEO_MODE_PAL; /* fixme: currently pointless: gets overwritten by configuration below */ mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_NORM, &i); /* write configuration to saa7111a */ init.data = mxb_saa7111_init; init.len = sizeof(mxb_saa7111_init); mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_INIT, &init); /* select tuner-output on saa7111a */ i = 0; mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_INPUT, &i); /* enable vbi bypass */ i = 1; mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_VBI_BYPASS, &i); /* select a tuner type */ tun_setup.mode_mask = T_ANALOG_TV; tun_setup.addr = ADDR_UNSET; tun_setup.type = TUNER_PHILIPS_PAL; mxb->tuner->driver->command(mxb->tuner,TUNER_SET_TYPE_ADDR, &tun_setup); /* tune in some frequency on tuner */ mxb->cur_freq.tuner = 0; mxb->cur_freq.type = V4L2_TUNER_ANALOG_TV; mxb->cur_freq.frequency = freq; mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, &mxb->cur_freq); /* mute audio on tea6420s */ mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]); mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_cd[6][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_cd[6][1]); /* switch to tuner-channel on tea6415c*/ vm.out = 17; vm.in = 3; mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm); /* select tuner-output on multicable on tea6415c*/ vm.in = 3; vm.out = 13; mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm); /* the rest for mxb */ mxb->cur_input = 0; mxb->cur_mute = 1; mxb->cur_mode = V4L2_TUNER_MODE_STEREO; mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &mxb->cur_mode); /* check if the saa7740 (aka 'sound arena module') is present on the mxb. if so, we must initialize it. due to lack of informations about the saa7740, the values were reverse engineered. */ msg.addr = 0x1b; msg.flags = 0; msg.len = mxb_saa7740_init[0].length; msg.buf = &mxb_saa7740_init[0].data[0]; if( 1 == (err = i2c_transfer(&mxb->i2c_adapter, &msg, 1))) { /* the sound arena module is a pos, that's probably the reason philips refuses to hand out a datasheet for the saa7740... it seems to screw up the i2c bus, so we disable fast irq based i2c transactions here and rely on the slow and safe polling method ... */ extension.flags &= ~SAA7146_USE_I2C_IRQ; for(i = 1;;i++) { if( -1 == mxb_saa7740_init[i].length ) { break; } msg.len = mxb_saa7740_init[i].length; msg.buf = &mxb_saa7740_init[i].data[0]; if( 1 != (err = i2c_transfer(&mxb->i2c_adapter, &msg, 1))) { DEB_D(("failed to initialize 'sound arena module'.\n")); goto err; } } INFO(("'sound arena module' detected.\n")); } err: /* the rest for saa7146: you should definitely set some basic values for the input-port handling of the saa7146. */ /* ext->saa has been filled by the core driver */ /* some stuff is done via variables */ saa7146_set_hps_source_and_sync(dev, input_port_selection[mxb->cur_input].hps_source, input_port_selection[mxb->cur_input].hps_sync); /* some stuff is done via direct write to the registers */ /* this is ugly, but because of the fact that this is completely hardware dependend, it should be done directly... */ saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, DD1_INIT, 0x02000200); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); return 0; } /* interrupt-handler. this gets called when irq_mask is != 0. it must clear the interrupt-bits in irq_mask it has handled */ /* void mxb_irq_bh(struct saa7146_dev* dev, u32* irq_mask) { struct mxb* mxb = (struct mxb*)dev->ext_priv; } */ static struct saa7146_ext_vv vv_data; /* this function only gets called when the probing was successful */ static int mxb_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *info) { struct mxb* mxb = (struct mxb*)dev->ext_priv; DEB_EE(("dev:%p\n",dev)); /* checking for i2c-devices can be omitted here, because we already did this in "mxb_vl42_probe" */ saa7146_vv_init(dev,&vv_data); if( 0 != saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { ERR(("cannot register capture v4l2 device. skipping.\n")); return -1; } /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/ if( 0 != MXB_BOARD_CAN_DO_VBI(dev)) { if( 0 != saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) { ERR(("cannot register vbi v4l2 device. skipping.\n")); } } i2c_use_client(mxb->tea6420_1); i2c_use_client(mxb->tea6420_2); i2c_use_client(mxb->tea6415c); i2c_use_client(mxb->tda9840); i2c_use_client(mxb->saa7111a); i2c_use_client(mxb->tuner); printk("mxb: found 'Multimedia eXtension Board'-%d.\n",mxb_num); mxb_num++; mxb_init_done(dev); return 0; } static int mxb_detach(struct saa7146_dev* dev) { struct mxb* mxb = (struct mxb*)dev->ext_priv; DEB_EE(("dev:%p\n",dev)); i2c_release_client(mxb->tea6420_1); i2c_release_client(mxb->tea6420_2); i2c_release_client(mxb->tea6415c); i2c_release_client(mxb->tda9840); i2c_release_client(mxb->saa7111a); i2c_release_client(mxb->tuner); saa7146_unregister_device(&mxb->video_dev,dev); if( 0 != MXB_BOARD_CAN_DO_VBI(dev)) { saa7146_unregister_device(&mxb->vbi_dev,dev); } saa7146_vv_release(dev); mxb_num--; i2c_del_adapter(&mxb->i2c_adapter); kfree(mxb); return 0; } static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg) { struct saa7146_dev *dev = fh->dev; struct mxb* mxb = (struct mxb*)dev->ext_priv; struct saa7146_vv *vv = dev->vv_data; switch(cmd) { case VIDIOC_ENUMINPUT: { struct v4l2_input *i = arg; DEB_EE(("VIDIOC_ENUMINPUT %d.\n",i->index)); if( i->index < 0 || i->index >= MXB_INPUTS) { return -EINVAL; } memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input)); return 0; } /* the saa7146 provides some controls (brightness, contrast, saturation) which gets registered *after* this function. because of this we have to return with a value != 0 even if the function succeded.. */ case VIDIOC_QUERYCTRL: { struct v4l2_queryctrl *qc = arg; int i; for (i = MAXCONTROLS - 1; i >= 0; i--) { if (mxb_controls[i].id == qc->id) { *qc = mxb_controls[i]; DEB_D(("VIDIOC_QUERYCTRL %d.\n",qc->id)); return 0; } } return -EAGAIN; } case VIDIOC_G_CTRL: { struct v4l2_control *vc = arg; int i; for (i = MAXCONTROLS - 1; i >= 0; i--) { if (mxb_controls[i].id == vc->id) { break; } } if( i < 0 ) { return -EAGAIN; } switch (vc->id ) { case V4L2_CID_AUDIO_MUTE: { vc->value = mxb->cur_mute; DEB_D(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n",vc->value)); return 0; } } DEB_EE(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n",vc->value)); return 0; } case VIDIOC_S_CTRL: { struct v4l2_control *vc = arg; int i = 0; for (i = MAXCONTROLS - 1; i >= 0; i--) { if (mxb_controls[i].id == vc->id) { break; } } if( i < 0 ) { return -EAGAIN; } switch (vc->id ) { case V4L2_CID_AUDIO_MUTE: { mxb->cur_mute = vc->value; if( 0 == vc->value ) { /* switch the audio-source */ mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[video_audio_connect[mxb->cur_input]][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[video_audio_connect[mxb->cur_input]][1]); } else { mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]); } DEB_EE(("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d.\n",vc->value)); break; } } return 0; } case VIDIOC_G_INPUT: { int *input = (int *)arg; *input = mxb->cur_input; DEB_EE(("VIDIOC_G_INPUT %d.\n",*input)); return 0; } case VIDIOC_S_INPUT: { int input = *(int *)arg; struct tea6415c_multiplex vm; int i = 0; DEB_EE(("VIDIOC_S_INPUT %d.\n",input)); if (input < 0 || input >= MXB_INPUTS) { return -EINVAL; } /* fixme: locke das setzen des inputs mit hilfe des mutexes down(&dev->lock); video_mux(dev,*i); up(&dev->lock); */ /* fixme: check if streaming capture if ( 0 != dev->streaming ) { DEB_D(("VIDIOC_S_INPUT illegal while streaming.\n")); return -EPERM; } */ mxb->cur_input = input; saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source, input_port_selection[input].hps_sync); /* prepare switching of tea6415c and saa7111a; have a look at the 'background'-file for further informations */ switch( input ) { case TUNER: { i = 0; vm.in = 3; vm.out = 17; if ( 0 != mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm)) { printk("VIDIOC_S_INPUT: could not address tea6415c #1\n"); return -EFAULT; } /* connect tuner-output always to multicable */ vm.in = 3; vm.out = 13; break; } case AUX3_YC: { /* nothing to be done here. aux3_yc is directly connected to the saa711a */ i = 5; break; } case AUX3: { /* nothing to be done here. aux3 is directly connected to the saa711a */ i = 1; break; } case AUX1: { i = 0; vm.in = 1; vm.out = 17; break; } } /* switch video in tea6415c only if necessary */ switch( input ) { case TUNER: case AUX1: { if ( 0 != mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm)) { printk("VIDIOC_S_INPUT: could not address tea6415c #3\n"); return -EFAULT; } break; } default: { break; } } /* switch video in saa7111a */ if ( 0 != mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_INPUT, &i)) { printk("VIDIOC_S_INPUT: could not address saa7111a #1.\n"); } /* switch the audio-source only if necessary */ if( 0 == mxb->cur_mute ) { mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[video_audio_connect[input]][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[video_audio_connect[input]][1]); } return 0; } case VIDIOC_G_TUNER: { struct v4l2_tuner *t = arg; int byte = 0; if( 0 != t->index ) { DEB_D(("VIDIOC_G_TUNER: channel %d does not have a tuner attached.\n", t->index)); return -EINVAL; } DEB_EE(("VIDIOC_G_TUNER: %d\n", t->index)); memset(t,0,sizeof(*t)); strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */ t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */ /* FIXME: add the real signal strength here */ t->signal = 0xffff; t->afc = 0; mxb->tda9840->driver->command(mxb->tda9840,TDA9840_DETECT, &byte); t->audmode = mxb->cur_mode; if( byte < 0 ) { t->rxsubchans = V4L2_TUNER_SUB_MONO; } else { switch(byte) { case TDA9840_MONO_DETECT: { t->rxsubchans = V4L2_TUNER_SUB_MONO; DEB_D(("VIDIOC_G_TUNER: V4L2_TUNER_MODE_MONO.\n")); break; } case TDA9840_DUAL_DETECT: { t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; DEB_D(("VIDIOC_G_TUNER: V4L2_TUNER_MODE_LANG1.\n")); break; } case TDA9840_STEREO_DETECT: { t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; DEB_D(("VIDIOC_G_TUNER: V4L2_TUNER_MODE_STEREO.\n")); break; } default: { /* TDA9840_INCORRECT_DETECT */ t->rxsubchans = V4L2_TUNER_MODE_MONO; DEB_D(("VIDIOC_G_TUNER: TDA9840_INCORRECT_DETECT => V4L2_TUNER_MODE_MONO\n")); break; } } } return 0; } case VIDIOC_S_TUNER: { struct v4l2_tuner *t = arg; int result = 0; int byte = 0; if( 0 != t->index ) { DEB_D(("VIDIOC_S_TUNER: channel %d does not have a tuner attached.\n",t->index)); return -EINVAL; } switch(t->audmode) { case V4L2_TUNER_MODE_STEREO: { mxb->cur_mode = V4L2_TUNER_MODE_STEREO; byte = TDA9840_SET_STEREO; DEB_D(("VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n")); break; } case V4L2_TUNER_MODE_LANG1: { mxb->cur_mode = V4L2_TUNER_MODE_LANG1; byte = TDA9840_SET_LANG1; DEB_D(("VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n")); break; } case V4L2_TUNER_MODE_LANG2: { mxb->cur_mode = V4L2_TUNER_MODE_LANG2; byte = TDA9840_SET_LANG2; DEB_D(("VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n")); break; } default: { /* case V4L2_TUNER_MODE_MONO: {*/ mxb->cur_mode = V4L2_TUNER_MODE_MONO; byte = TDA9840_SET_MONO; DEB_D(("VIDIOC_S_TUNER: TDA9840_SET_MONO\n")); break; } } if( 0 != (result = mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &byte))) { printk("VIDIOC_S_TUNER error. result:%d, byte:%d\n",result,byte); } return 0; } case VIDIOC_G_FREQUENCY: { struct v4l2_frequency *f = arg; if(0 != mxb->cur_input) { DEB_D(("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",mxb->cur_input)); return -EINVAL; } *f = mxb->cur_freq; DEB_EE(("VIDIOC_G_FREQ: freq:0x%08x.\n", mxb->cur_freq.frequency)); return 0; } case VIDIOC_S_FREQUENCY: { struct v4l2_frequency *f = arg; if (0 != f->tuner) return -EINVAL; if (V4L2_TUNER_ANALOG_TV != f->type) return -EINVAL; if(0 != mxb->cur_input) { DEB_D(("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",mxb->cur_input)); return -EINVAL; } mxb->cur_freq = *f; DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency)); /* tune in desired frequency */ mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, &mxb->cur_freq); /* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */ spin_lock(&dev->slock); vv->vbi_fieldcount = 0; spin_unlock(&dev->slock); return 0; } case MXB_S_AUDIO_CD: { int i = *(int*)arg; if( i < 0 || i >= MXB_AUDIOS ) { DEB_D(("illegal argument to MXB_S_AUDIO_CD: i:%d.\n",i)); return -EINVAL; } DEB_EE(("MXB_S_AUDIO_CD: i:%d.\n",i)); mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_cd[i][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_cd[i][1]); return 0; } case MXB_S_AUDIO_LINE: { int i = *(int*)arg; if( i < 0 || i >= MXB_AUDIOS ) { DEB_D(("illegal argument to MXB_S_AUDIO_LINE: i:%d.\n",i)); return -EINVAL; } DEB_EE(("MXB_S_AUDIO_LINE: i:%d.\n",i)); mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[i][0]); mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[i][1]); return 0; } case VIDIOC_G_AUDIO: { struct v4l2_audio *a = arg; if( a->index < 0 || a->index > MXB_INPUTS ) { DEB_D(("VIDIOC_G_AUDIO %d out of range.\n",a->index)); return -EINVAL; } DEB_EE(("VIDIOC_G_AUDIO %d.\n",a->index)); memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio)); return 0; } case VIDIOC_S_AUDIO: { struct v4l2_audio *a = arg; DEB_D(("VIDIOC_S_AUDIO %d.\n",a->index)); return 0; } default: /* DEB2(printk("does not handle this ioctl.\n")); */ return -ENOIOCTLCMD; } return 0; } static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std) { struct mxb* mxb = (struct mxb*)dev->ext_priv; int zero = 0; int one = 1; if(V4L2_STD_PAL_I == std->id ) { DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n")); /* set the 7146 gpio register -- I don't know what this does exactly */ saa7146_write(dev, GPIO_CTRL, 0x00404050); /* unset the 7111 gpio register -- I don't know what this does exactly */ mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero); } else { DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n")); /* set the 7146 gpio register -- I don't know what this does exactly */ saa7146_write(dev, GPIO_CTRL, 0x00404050); /* set the 7111 gpio register -- I don't know what this does exactly */ mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one); } return 0; } static struct saa7146_standard standard[] = { { .name = "PAL-BG", .id = V4L2_STD_PAL_BG, .v_offset = 0x17, .v_field = 288, .h_offset = 0x14, .h_pixels = 680, .v_max_out = 576, .h_max_out = 768, }, { .name = "PAL-I", .id = V4L2_STD_PAL_I, .v_offset = 0x17, .v_field = 288, .h_offset = 0x14, .h_pixels = 680, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x16, .v_field = 240, .h_offset = 0x06, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, }, { .name = "SECAM", .id = V4L2_STD_SECAM, .v_offset = 0x14, .v_field = 288, .h_offset = 0x14, .h_pixels = 720, .v_max_out = 576, .h_max_out = 768, } }; static struct saa7146_pci_extension_data mxb = { .ext_priv = "Multimedia eXtension Board", .ext = &extension, }; static struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x0000, .subdevice = 0x0000, .driver_data = (unsigned long)&mxb, }, { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_ext_vv vv_data = { .inputs = MXB_INPUTS, .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE, .stds = &standard[0], .num_stds = sizeof(standard)/sizeof(struct saa7146_standard), .std_callback = &std_callback, .ioctls = &ioctls[0], .ioctl = mxb_ioctl, }; static struct saa7146_extension extension = { .name = MXB_IDENTIFIER, .flags = SAA7146_USE_I2C_IRQ, .pci_tbl = &pci_tbl[0], .module = THIS_MODULE, .probe = mxb_probe, .attach = mxb_attach, .detach = mxb_detach, .irq_mask = 0, .irq_func = NULL, }; static int __init mxb_init_module(void) { if( 0 != saa7146_register_extension(&extension)) { DEB_S(("failed to register extension.\n")); return -ENODEV; } return 0; } static void __exit mxb_cleanup_module(void) { saa7146_unregister_extension(&extension); } module_init(mxb_init_module); module_exit(mxb_cleanup_module); MODULE_DESCRIPTION("video4linux-2 driver for the Siemens-Nixdorf 'Multimedia eXtension board'"); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_LICENSE("GPL");
ipwndev/DSLinux-Mirror
linux-2.6.x/drivers/media/video/mxb.c
C
gpl-2.0
30,749
/** * @file * Packet buffer management * * Packets are built from the pbuf data structure. It supports dynamic * memory allocation for packet contents or can reference externally * managed packet contents both in RAM and ROM. Quick allocation for * incoming packets is provided through pools with fixed sized pbufs. * * A packet may span over multiple pbufs, chained as a singly linked * list. This is called a "pbuf chain". * * Multiple packets may be queued, also using this singly linked list. * This is called a "packet queue". * * So, a packet queue consists of one or more pbuf chains, each of * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE * NOT SUPPORTED!!! Use helper structs to queue multiple packets. * * The differences between a pbuf chain and a packet queue are very * precise but subtle. * * The last pbuf of a packet has a ->tot_len field that equals the * ->len field. It can be found by traversing the list. If the last * pbuf of a packet has a ->next field other than NULL, more packets * are on the queue. * * Therefore, looping through a pbuf of a single packet, has an * loop end condition (tot_len == p->len), NOT (next == NULL). */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #include "lwip/stats.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/pbuf.h" #include "lwip/sys.h" #if LWIP_TCP && TCP_QUEUE_OOSEQ #include "lwip/tcp_impl.h" #endif #if LWIP_CHECKSUM_ON_COPY #include "lwip/inet_chksum.h" #endif #include <lwk/string.h> #define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ #define PBUF_POOL_IS_EMPTY() #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ #if !NO_SYS #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL #include "lwip/tcpip.h" #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ if(tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \ SYS_ARCH_PROTECT(old_level); \ pbuf_free_ooseq_pending = 0; \ SYS_ARCH_UNPROTECT(old_level); \ } } while(0) #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ #endif /* !NO_SYS */ volatile u8_t pbuf_free_ooseq_pending; #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() /** * Attempt to reclaim some memory from queued out-of-sequence TCP segments * if we run out of pool pbufs. It's better to give priority to new packets * if we're running out. * * This must be done in the correct thread context therefore this function * can only be used with NO_SYS=0 and through tcpip_callback. */ #if !NO_SYS static #endif /* !NO_SYS */ void pbuf_free_ooseq(void) { struct tcp_pcb* pcb; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_pending = 0; SYS_ARCH_UNPROTECT(old_level); for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { if (NULL != pcb->ooseq) { /** Free the ooseq pbufs of one PCB only */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; return; } } } #if !NO_SYS /** * Just a callback function for tcpip_timeout() that calls pbuf_free_ooseq(). */ static void pbuf_free_ooseq_callback(void *arg) { LWIP_UNUSED_ARG(arg); pbuf_free_ooseq(); } #endif /* !NO_SYS */ /** Queue a call to pbuf_free_ooseq if not already queued. */ static void pbuf_pool_is_empty(void) { #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_pending = 1; SYS_ARCH_UNPROTECT(old_level); #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ u8_t queued; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); queued = pbuf_free_ooseq_pending; pbuf_free_ooseq_pending = 1; SYS_ARCH_UNPROTECT(old_level); if(!queued) { /* queue a call to pbuf_free_ooseq if not already queued */ PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); } #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ } #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ /** * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). * * The actual memory allocated for the pbuf is determined by the * layer at which the pbuf is allocated and the requested size * (from the size parameter). * * @param layer flag to define header size * @param length size of the pbuf's payload * @param type this parameter decides how and where the pbuf * should be allocated as follows: * * - PBUF_RAM: buffer memory for pbuf is allocated as one large * chunk. This includes protocol headers as well. * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for * protocol headers. Additional headers must be prepended * by allocating another pbuf and chain in to the front of * the ROM pbuf. It is assumed that the memory used is really * similar to ROM in that it is immutable and will not be * changed. Memory which is dynamic should generally not * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. * - PBUF_REF: no buffer memory is allocated for the pbuf, even for * protocol headers. It is assumed that the pbuf is only * being used in a single thread. If the pbuf gets queued, * then pbuf_take should be called to copy the buffer. * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from * the pbuf pool that is allocated during pbuf_init(). * * @return the allocated pbuf. If multiple pbufs where allocated, this * is the first pbuf of a pbuf chain. */ struct pbuf * pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) { struct pbuf *p, *q, *r; u16_t offset; s32_t rem_len; /* remaining length */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); /* determine header offset */ switch (layer) { case PBUF_TRANSPORT: /* add room for transport (often TCP) layer header */ offset = PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; break; case PBUF_IP: /* add room for IP layer header */ offset = PBUF_LINK_HLEN + PBUF_IP_HLEN; break; case PBUF_LINK: /* add room for link layer header */ offset = PBUF_LINK_HLEN; break; case PBUF_RAW: offset = 0; break; default: LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); return NULL; } switch (type) { case PBUF_POOL: /* allocate head of pbuf chain into p */ p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p)); if (p == NULL) { PBUF_POOL_IS_EMPTY(); return NULL; } p->type = type; p->next = NULL; /* make the payload pointer point 'offset' bytes into pbuf data memory */ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset))); LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned", ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); /* the total length of the pbuf chain is the requested size */ p->tot_len = length; /* set the length of the first pbuf in the chain */ p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)); LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", ((u8_t*)p->payload + p->len <= (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); /* set reference count (needed here in case we fail) */ p->ref = 1; /* now allocate the tail of the pbuf chain */ /* remember first pbuf for linkage in next iteration */ r = p; /* remaining length to be allocated */ rem_len = length - p->len; /* any remaining pbufs to be allocated? */ while (rem_len > 0) { q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); if (q == NULL) { PBUF_POOL_IS_EMPTY(); /* free chain so far allocated */ pbuf_free(p); /* bail out unsuccesfully */ return NULL; } q->type = type; q->flags = 0; q->next = NULL; /* make previous pbuf point to this pbuf */ r->next = q; /* set total length of this pbuf and next in chain */ LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff); q->tot_len = (u16_t)rem_len; /* this pbuf length is pool size, unless smaller sized tail */ q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED); q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF); LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", ((u8_t*)p->payload + p->len <= (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); q->ref = 1; /* calculate remaining length to be allocated */ rem_len -= q->len; /* remember this pbuf for linkage in next iteration */ r = q; } /* end of chain */ /*r->next = NULL;*/ break; case PBUF_RAM: /* If pbuf is to be allocated in RAM, allocate memory for it. */ p = (struct pbuf*)mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length)); if (p == NULL) { return NULL; } /* Set up internal structure of the pbuf. */ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)); p->len = p->tot_len = length; p->next = NULL; p->type = type; LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); break; /* pbuf references existing (non-volatile static constant) ROM payload? */ case PBUF_ROM: /* pbuf references existing (externally allocated) RAM payload? */ case PBUF_REF: /* only allocate memory for the pbuf structure */ p = (struct pbuf *)memp_malloc(MEMP_PBUF); if (p == NULL) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", (type == PBUF_ROM) ? "ROM" : "REF")); return NULL; } /* caller must set this field properly, afterwards */ p->payload = NULL; p->len = p->tot_len = length; p->next = NULL; p->type = type; break; default: LWIP_ASSERT("pbuf_alloc: erroneous type", 0); return NULL; } /* set reference count */ p->ref = 1; /* set flags */ p->flags = 0; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); return p; } #if LWIP_SUPPORT_CUSTOM_PBUF /** Initialize a custom pbuf (already allocated). * * @param layer flag to define header size * @param length size of the pbuf's payload * @param type type of the pbuf (only used to treat the pbuf accordingly, as * this function allocates no memory) * @param p pointer to the custom pbuf to initialize (already allocated) * @param payload_mem pointer to the buffer that is used for payload and headers, * must be at least big enough to hold 'length' plus the header size, * may be NULL if set later. * ATTENTION: The caller is responsible for correct alignment of this buffer!! * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least * big enough to hold 'length' plus the header size */ struct pbuf* pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, void *payload_mem, u16_t payload_mem_len) { u16_t offset; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); /* determine header offset */ switch (l) { case PBUF_TRANSPORT: /* add room for transport (often TCP) layer header */ offset = PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; break; case PBUF_IP: /* add room for IP layer header */ offset = PBUF_LINK_HLEN + PBUF_IP_HLEN; break; case PBUF_LINK: /* add room for link layer header */ offset = PBUF_LINK_HLEN; break; case PBUF_RAW: offset = 0; break; default: LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0); return NULL; } if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); return NULL; } p->pbuf.next = NULL; if (payload_mem != NULL) { p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); } else { p->pbuf.payload = NULL; } p->pbuf.flags = PBUF_FLAG_IS_CUSTOM; p->pbuf.len = p->pbuf.tot_len = length; p->pbuf.type = type; p->pbuf.ref = 1; return &p->pbuf; } #endif /* LWIP_SUPPORT_CUSTOM_PBUF */ /** * Shrink a pbuf chain to a desired length. * * @param p pbuf to shrink. * @param new_len desired new length of pbuf chain * * Depending on the desired length, the first few pbufs in a chain might * be skipped and left unchanged. The new last pbuf in the chain will be * resized, and any remaining pbufs will be freed. * * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. * @note May not be called on a packet queue. * * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). */ void pbuf_realloc(struct pbuf *p, u16_t new_len) { struct pbuf *q; u16_t rem_len; /* remaining length */ s32_t grow; LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL || p->type == PBUF_ROM || p->type == PBUF_RAM || p->type == PBUF_REF); /* desired length larger than current length? */ if (new_len >= p->tot_len) { /* enlarging not yet supported */ return; } /* the pbuf chain grows by (new_len - p->tot_len) bytes * (which may be negative in case of shrinking) */ grow = new_len - p->tot_len; /* first, step over any pbufs that should remain in the chain */ rem_len = new_len; q = p; /* should this pbuf be kept? */ while (rem_len > q->len) { /* decrease remaining length by pbuf length */ rem_len -= q->len; /* decrease total length indicator */ LWIP_ASSERT("grow < max_u16_t", grow < 0xffff); q->tot_len += (u16_t)grow; /* proceed to next pbuf in chain */ q = q->next; LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); } /* we have now reached the new last pbuf (in q) */ /* rem_len == desired length for pbuf q */ /* shrink allocated memory for PBUF_RAM */ /* (other types merely adjust their length fields */ if ((q->type == PBUF_RAM) && (rem_len != q->len)) { /* reallocate and adjust the length of the pbuf that will be split */ q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len); LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); } /* adjust length fields for new last pbuf */ q->len = rem_len; q->tot_len = q->len; /* any remaining pbufs in chain? */ if (q->next != NULL) { /* free remaining pbufs in chain */ pbuf_free(q->next); } /* q is last packet in chain */ q->next = NULL; } /** * Adjusts the payload pointer to hide or reveal headers in the payload. * * Adjusts the ->payload pointer so that space for a header * (dis)appears in the pbuf payload. * * The ->payload, ->tot_len and ->len fields are adjusted. * * @param p pbuf to change the header size. * @param header_size_increment Number of bytes to increment header size which * increases the size of the pbuf. New space is on the front. * (Using a negative value decreases the header size.) * If hdr_size_inc is 0, this function does nothing and returns succesful. * * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so * the call will fail. A check is made that the increase in header size does * not move the payload pointer in front of the start of the buffer. * @return non-zero on failure, zero on success. * */ u8_t pbuf_header(struct pbuf *p, s16_t header_size_increment) { u16_t type; void *payload; u16_t increment_magnitude; LWIP_ASSERT("p != NULL", p != NULL); if ((header_size_increment == 0) || (p == NULL)) { return 0; } if (header_size_increment < 0){ increment_magnitude = -header_size_increment; /* Check that we aren't going to move off the end of the pbuf */ LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); } else { increment_magnitude = header_size_increment; #if 0 /* Can't assert these as some callers speculatively call pbuf_header() to see if it's OK. Will return 1 below instead. */ /* Check that we've got the correct type of pbuf to work with */ LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL", p->type == PBUF_RAM || p->type == PBUF_POOL); /* Check that we aren't going to move off the beginning of the pbuf */ LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF", (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF); #endif } type = p->type; /* remember current payload pointer */ payload = p->payload; /* pbuf types containing payloads? */ if (type == PBUF_RAM || type == PBUF_POOL) { /* set new payload pointer */ p->payload = (u8_t *)p->payload - header_size_increment; /* boundary check fails? */ if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("pbuf_header: failed as %p < %p (not enough space for new header size)\n", (void *)p->payload, (void *)(p + 1))); /* restore old payload pointer */ p->payload = payload; /* bail out unsuccesfully */ return 1; } /* pbuf types refering to external payloads? */ } else if (type == PBUF_REF || type == PBUF_ROM) { /* hide a header in the payload? */ if ((header_size_increment < 0) && (increment_magnitude <= p->len)) { /* increase payload pointer */ p->payload = (u8_t *)p->payload - header_size_increment; } else { /* cannot expand payload to front (yet!) * bail out unsuccesfully */ return 1; } } else { /* Unknown type */ LWIP_ASSERT("bad pbuf type", 0); return 1; } /* modify pbuf length fields */ p->len += header_size_increment; p->tot_len += header_size_increment; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n", (void *)payload, (void *)p->payload, header_size_increment)); return 0; } /** * Dereference a pbuf chain or queue and deallocate any no-longer-used * pbufs at the head of this chain or queue. * * Decrements the pbuf reference count. If it reaches zero, the pbuf is * deallocated. * * For a pbuf chain, this is repeated for each pbuf in the chain, * up to the first pbuf which has a non-zero reference count after * decrementing. So, when all reference counts are one, the whole * chain is free'd. * * @param p The pbuf (chain) to be dereferenced. * * @return the number of pbufs that were de-allocated * from the head of the chain. * * @note MUST NOT be called on a packet queue (Not verified to work yet). * @note the reference counter of a pbuf equals the number of pointers * that refer to the pbuf (or into the pbuf). * * @internal examples: * * Assuming existing chains a->b->c with the following reference * counts, calling pbuf_free(a) results in: * * 1->2->3 becomes ...1->3 * 3->3->3 becomes 2->3->3 * 1->1->2 becomes ......1 * 2->1->1 becomes 1->1->1 * 1->1->1 becomes ....... * */ u8_t pbuf_free(struct pbuf *p) { u16_t type; struct pbuf *q; u8_t count; if (p == NULL) { LWIP_ASSERT("p != NULL", p != NULL); /* if assertions are disabled, proceed with debug output */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("pbuf_free(p == NULL) was called.\n")); return 0; } LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); PERF_START; LWIP_ASSERT("pbuf_free: sane type", p->type == PBUF_RAM || p->type == PBUF_ROM || p->type == PBUF_REF || p->type == PBUF_POOL); count = 0; /* de-allocate all consecutive pbufs from the head of the chain that * obtain a zero reference count after decrementing*/ while (p != NULL) { u16_t ref; SYS_ARCH_DECL_PROTECT(old_level); /* Since decrementing ref cannot be guaranteed to be a single machine operation * we must protect it. We put the new ref into a local variable to prevent * further protection. */ SYS_ARCH_PROTECT(old_level); /* all pbufs in a chain are referenced at least once */ LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); /* decrease reference count (number of pointers to pbuf) */ ref = --(p->ref); SYS_ARCH_UNPROTECT(old_level); /* this pbuf is no longer referenced to? */ if (ref == 0) { /* remember next pbuf in chain for next iteration */ q = p->next; LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); type = p->type; #if LWIP_SUPPORT_CUSTOM_PBUF /* is this a custom pbuf? */ if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { struct pbuf_custom *pc = (struct pbuf_custom*)p; LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); pc->custom_free_function(p); } else #endif /* LWIP_SUPPORT_CUSTOM_PBUF */ { /* is this a pbuf from the pool? */ if (type == PBUF_POOL) { memp_free(MEMP_PBUF_POOL, p); /* is this a ROM or RAM referencing pbuf? */ } else if (type == PBUF_ROM || type == PBUF_REF) { memp_free(MEMP_PBUF, p); /* type == PBUF_RAM */ } else { mem_free(p); } } count++; /* proceed to next pbuf */ p = q; /* p->ref > 0, this pbuf is still referenced to */ /* (and so the remaining pbufs in chain as well) */ } else { LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref)); /* stop walking through the chain */ p = NULL; } } PERF_STOP("pbuf_free"); /* return number of de-allocated pbufs */ return count; } /** * Count number of pbufs in a chain * * @param p first pbuf of chain * @return the number of pbufs in a chain */ u8_t pbuf_clen(struct pbuf *p) { u8_t len; len = 0; while (p != NULL) { ++len; p = p->next; } return len; } /** * Increment the reference count of the pbuf. * * @param p pbuf to increase reference counter of * */ void pbuf_ref(struct pbuf *p) { SYS_ARCH_DECL_PROTECT(old_level); /* pbuf given? */ if (p != NULL) { SYS_ARCH_PROTECT(old_level); ++(p->ref); SYS_ARCH_UNPROTECT(old_level); } } /** * Concatenate two pbufs (each may be a pbuf chain) and take over * the caller's reference of the tail pbuf. * * @note The caller MAY NOT reference the tail pbuf afterwards. * Use pbuf_chain() for that purpose. * * @see pbuf_chain() */ void pbuf_cat(struct pbuf *h, struct pbuf *t) { struct pbuf *p; LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", ((h != NULL) && (t != NULL)), return;); /* proceed to last pbuf of chain */ for (p = h; p->next != NULL; p = p->next) { /* add total length of second chain to all totals of first chain */ p->tot_len += t->tot_len; } /* { p is last pbuf of first h chain, p->next == NULL } */ LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); LWIP_ASSERT("p->next == NULL", p->next == NULL); /* add total length of second chain to last pbuf total of first chain */ p->tot_len += t->tot_len; /* chain last pbuf of head (p) with first of tail (t) */ p->next = t; /* p->next now references t, but the caller will drop its reference to t, * so netto there is no change to the reference count of t. */ } /** * Chain two pbufs (or pbuf chains) together. * * The caller MUST call pbuf_free(t) once it has stopped * using it. Use pbuf_cat() instead if you no longer use t. * * @param h head pbuf (chain) * @param t tail pbuf (chain) * @note The pbufs MUST belong to the same packet. * @note MAY NOT be called on a packet queue. * * The ->tot_len fields of all pbufs of the head chain are adjusted. * The ->next field of the last pbuf of the head chain is adjusted. * The ->ref field of the first pbuf of the tail chain is adjusted. * */ void pbuf_chain(struct pbuf *h, struct pbuf *t) { pbuf_cat(h, t); /* t is now referenced by h */ pbuf_ref(t); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); } /** * Dechains the first pbuf from its succeeding pbufs in the chain. * * Makes p->tot_len field equal to p->len. * @param p pbuf to dechain * @return remainder of the pbuf chain, or NULL if it was de-allocated. * @note May not be called on a packet queue. */ struct pbuf * pbuf_dechain(struct pbuf *p) { struct pbuf *q; u8_t tail_gone = 1; /* tail */ q = p->next; /* pbuf has successor in chain? */ if (q != NULL) { /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); /* enforce invariant if assertion is disabled */ q->tot_len = p->tot_len - p->len; /* decouple pbuf from remainder */ p->next = NULL; /* total length of pbuf p is its own length only */ p->tot_len = p->len; /* q is no longer referenced by p, free it */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); tail_gone = pbuf_free(q); if (tail_gone > 0) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); } /* return remaining tail or NULL if deallocated */ } /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); return ((tail_gone > 0) ? NULL : q); } /** * * Create PBUF_RAM copies of pbufs. * * Used to queue packets on behalf of the lwIP stack, such as * ARP based queueing. * * @note You MUST explicitly use p = pbuf_take(p); * * @note Only one packet is copied, no packet queue! * * @param p_to pbuf destination of the copy * @param p_from pbuf source of the copy * * @return ERR_OK if pbuf was copied * ERR_ARG if one of the pbufs is NULL or p_to is not big * enough to hold p_from */ err_t pbuf_copy(struct pbuf *p_to, struct pbuf *p_from) { u16_t offset_to=0, offset_from=0, len; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", (void*)p_to, (void*)p_from)); /* is the target big enough to hold the source? */ LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); /* iterate through pbuf chain */ do { /* copy one part of the original chain */ if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { /* complete current p_from fits into current p_to */ len = p_from->len - offset_from; } else { /* current p_from does not fit into current p_to */ len = p_to->len - offset_to; } MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len); offset_to += len; offset_from += len; LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); if (offset_from >= p_from->len) { /* on to next p_from (if any) */ offset_from = 0; p_from = p_from->next; } if (offset_to == p_to->len) { /* on to next p_to (if any) */ offset_to = 0; p_to = p_to->next; LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;); } if((p_from != NULL) && (p_from->len == p_from->tot_len)) { /* don't copy more than one packet! */ LWIP_ERROR("pbuf_copy() does not allow packet queues!\n", (p_from->next == NULL), return ERR_VAL;); } if((p_to != NULL) && (p_to->len == p_to->tot_len)) { /* don't copy more than one packet! */ LWIP_ERROR("pbuf_copy() does not allow packet queues!\n", (p_to->next == NULL), return ERR_VAL;); } } while (p_from); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); return ERR_OK; } /** * Copy (part of) the contents of a packet buffer * to an application supplied buffer. * * @param buf the pbuf from which to copy data * @param dataptr the application supplied buffer * @param len length of data to copy (dataptr must be big enough). No more * than buf->tot_len will be copied, irrespective of len * @param offset offset into the packet buffer from where to begin copying len bytes * @return the number of bytes copied, or 0 on failure */ u16_t pbuf_copy_partial(struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) { struct pbuf *p; u16_t left; u16_t buf_copy_len; u16_t copied_total = 0; LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); left = 0; if((buf == NULL) || (dataptr == NULL)) { return 0; } /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ for(p = buf; len != 0 && p != NULL; p = p->next) { if ((offset != 0) && (offset >= p->len)) { /* don't copy from this buffer -> on to the next */ offset -= p->len; } else { /* copy from this buffer. maybe only partially. */ buf_copy_len = p->len - offset; if (buf_copy_len > len) buf_copy_len = len; /* copy the necessary parts of the buffer */ MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len); copied_total += buf_copy_len; left += buf_copy_len; len -= buf_copy_len; offset = 0; } } return copied_total; } /** * Copy application supplied data into a pbuf. * This function can only be used to copy the equivalent of buf->tot_len data. * * @param buf pbuf to fill with data * @param dataptr application supplied data buffer * @param len length of the application supplied data buffer * * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough */ err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) { struct pbuf *p; u16_t buf_copy_len; u16_t total_copy_len = len; u16_t copied_total = 0; LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return 0;); LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return 0;); if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { return ERR_ARG; } /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ for(p = buf; total_copy_len != 0; p = p->next) { LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); buf_copy_len = total_copy_len; if (buf_copy_len > p->len) { /* this pbuf cannot hold all remaining data */ buf_copy_len = p->len; } /* copy the necessary parts of the buffer */ MEMCPY(p->payload, &((char*)dataptr)[copied_total], buf_copy_len); total_copy_len -= buf_copy_len; copied_total += buf_copy_len; } LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); return ERR_OK; } /** * Creates a single pbuf out of a queue of pbufs. * * @remark: Either the source pbuf 'p' is freed by this function or the original * pbuf 'p' is returned, therefore the caller has to check the result! * * @param p the source pbuf * @param layer pbuf_layer of the new pbuf * * @return a new, single pbuf (p->next is NULL) * or the old pbuf if allocation fails */ struct pbuf* pbuf_coalesce(struct pbuf *p, pbuf_layer layer) { struct pbuf *q; err_t err; if (p->next == NULL) { return p; } q = pbuf_alloc(layer, p->tot_len, PBUF_RAM); if (q == NULL) { /* @todo: what do we do now? */ return p; } err = pbuf_copy(q, p); LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); pbuf_free(p); return q; } #if LWIP_CHECKSUM_ON_COPY /** * Copies data into a single pbuf (*not* into a pbuf queue!) and updates * the checksum while copying * * @param p the pbuf to copy data into * @param start_offset offset of p->payload where to copy the data to * @param dataptr data to copy into the pbuf * @param len length of data to copy into the pbuf * @param chksum pointer to the checksum which is updated * @return ERR_OK if successful, another error if the data does not fit * within the (first) pbuf (no pbuf queues!) */ err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, u16_t len, u16_t *chksum) { u32_t acc; u16_t copy_chksum; char *dst_ptr; LWIP_ASSERT("p != NULL", p != NULL); LWIP_ASSERT("dataptr != NULL", dataptr != NULL); LWIP_ASSERT("chksum != NULL", chksum != NULL); LWIP_ASSERT("len != 0", len != 0); if ((start_offset >= p->len) || (start_offset + len > p->len)) { return ERR_ARG; } dst_ptr = ((char*)p->payload) + start_offset; copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); if ((start_offset & 1) != 0) { copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); } acc = *chksum; acc += copy_chksum; *chksum = FOLD_U32T(acc); return ERR_OK; } #endif /* LWIP_CHECKSUM_ON_COPY */ /** Get one byte from the specified position in a pbuf * WARNING: returns zero for offset >= p->tot_len * * @param p pbuf to parse * @param offset offset into p of the byte to return * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len */ u8_t pbuf_get_at(struct pbuf* p, u16_t offset) { u16_t copy_from = offset; struct pbuf* q = p; /* get the correct pbuf */ while ((q != NULL) && (q->len <= copy_from)) { copy_from -= q->len; q = q->next; } /* return requested data if pbuf is OK */ if ((q != NULL) && (q->len > copy_from)) { return ((u8_t*)q->payload)[copy_from]; } return 0; } /** Compare pbuf contents at specified offset with memory s2, both of length n * * @param p pbuf to compare * @param offset offset into p at wich to start comparing * @param s2 buffer to compare * @param n length of buffer to compare * @return zero if equal, nonzero otherwise * (0xffff if p is too short, diffoffset+1 otherwise) */ u16_t pbuf_memcmp(struct pbuf* p, u16_t offset, const void* s2, u16_t n) { u16_t start = offset; struct pbuf* q = p; /* get the correct pbuf */ while ((q != NULL) && (q->len <= start)) { start -= q->len; q = q->next; } /* return requested data if pbuf is OK */ if ((q != NULL) && (q->len > start)) { u16_t i; for(i = 0; i < n; i++) { u8_t a = pbuf_get_at(q, start + i); u8_t b = ((u8_t*)s2)[i]; if (a != b) { return i+1; } } return 0; } return 0xffff; } /** Find occurrence of mem (with length mem_len) in pbuf p, starting at offset * start_offset. * * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as * return value 'not found' * @param mem search for the contents of this buffer * @param mem_len length of 'mem' * @param start_offset offset into p at which to start searching * @return 0xFFFF if substr was not found in p or the index where it was found */ u16_t pbuf_memfind(struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset) { u16_t i; u16_t max = p->tot_len - mem_len; if (p->tot_len >= mem_len + start_offset) { for(i = start_offset; i <= max; ) { u16_t plus = pbuf_memcmp(p, i, mem, mem_len); if (plus == 0) { return i; } else { i += plus; } } } return 0xFFFF; } /** Find occurrence of substr with length substr_len in pbuf p, start at offset * start_offset * WARNING: in contrast to strstr(), this one does not stop at the first \0 in * the pbuf/source string! * * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as * return value 'not found' * @param substr string to search for in p, maximum length is 0xFFFE * @return 0xFFFF if substr was not found in p or the index where it was found */ u16_t pbuf_strstr(struct pbuf* p, const char* substr) { size_t substr_len; if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { return 0xFFFF; } substr_len = strlen(substr); if (substr_len >= 0xFFFF) { return 0xFFFF; } return pbuf_memfind(p, substr, (u16_t)substr_len, 0); }
jnouyang/kitten
net/pbuf.c
C
gpl-2.0
39,009
/* LUFA Library Copyright (C) Dean Camera, 2014. dean [at] fourwalledcubicle [dot] com www.lufa-lib.org */ /* Copyright 2014 Dean Camera (dean [at] fourwalledcubicle [dot] com) Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that the copyright notice and this permission notice and warranty disclaimer appear in supporting documentation, and that the name of the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. The author disclaims all warranties with regard to this software, including all implied warranties of merchantability and fitness. In no event shall the author be liable for any special, indirect or consequential damages or any damages whatsoever resulting from loss of use, data or profits, whether in an action of contract, negligence or other tortious action, arising out of or in connection with the use or performance of this software. */ /** \file * * Functions to manage the physical Dataflash media, including reading and writing of * blocks of data. These functions are called by the SCSI layer when data must be stored * or retrieved to/from the physical storage media. If a different media is used (such * as a SD card or EEPROM), functions similar to these will need to be generated. */ #define INCLUDE_FROM_DATAFLASHMANAGER_C #include "DataflashManager.h" /** Writes blocks (OS blocks, not Dataflash pages) to the storage medium, the board Dataflash IC(s), from * the pre-selected data OUT endpoint. This routine reads in OS sized blocks from the endpoint and writes * them to the Dataflash in Dataflash page sized blocks. * * \param[in] BlockAddress Data block starting address for the write sequence * \param[in] TotalBlocks Number of blocks of data to write */ void DataflashManager_WriteBlocks(const uint32_t BlockAddress, uint16_t TotalBlocks) { uint16_t CurrDFPage = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE); uint16_t CurrDFPageByte = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE); uint8_t CurrDFPageByteDiv16 = (CurrDFPageByte >> 4); bool UsingSecondBuffer = false; /* Select the correct starting Dataflash IC for the block requested */ Dataflash_SelectChipFromPage(CurrDFPage); #if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE) /* Copy selected dataflash's current page contents to the Dataflash buffer */ Dataflash_SendByte(DF_CMD_MAINMEMTOBUFF1); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_WaitWhileBusy(); #endif /* Send the Dataflash buffer write command */ Dataflash_SendByte(DF_CMD_BUFF1WRITE); Dataflash_SendAddressBytes(0, CurrDFPageByte); /* Wait until endpoint is ready before continuing */ if (Endpoint_WaitUntilReady()) return; while (TotalBlocks) { uint8_t BytesInBlockDiv16 = 0; /* Write an endpoint packet sized data block to the Dataflash */ while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) { /* Check if the endpoint is currently empty */ if (!(Endpoint_IsReadWriteAllowed())) { /* Clear the current endpoint bank */ Endpoint_ClearOUT(); /* Wait until the host has sent another packet */ if (Endpoint_WaitUntilReady()) return; } /* Check if end of Dataflash page reached */ if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4)) { /* Write the Dataflash buffer contents back to the Dataflash page */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE); Dataflash_SendAddressBytes(CurrDFPage, 0); /* Reset the Dataflash buffer counter, increment the page counter */ CurrDFPageByteDiv16 = 0; CurrDFPage++; /* Once all the Dataflash ICs have had their first buffers filled, switch buffers to maintain throughput */ if (Dataflash_GetSelectedChip() == DATAFLASH_CHIP_MASK(DATAFLASH_TOTALCHIPS)) UsingSecondBuffer = !(UsingSecondBuffer); /* Select the next Dataflash chip based on the new Dataflash page index */ Dataflash_SelectChipFromPage(CurrDFPage); #if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE) /* If less than one Dataflash page remaining, copy over the existing page to preserve trailing data */ if ((TotalBlocks * (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) < (DATAFLASH_PAGE_SIZE >> 4)) { /* Copy selected dataflash's current page contents to the Dataflash buffer */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_MAINMEMTOBUFF2 : DF_CMD_MAINMEMTOBUFF1); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_WaitWhileBusy(); } #endif /* Send the Dataflash buffer write command */ Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2WRITE : DF_CMD_BUFF1WRITE); Dataflash_SendAddressBytes(0, 0); } /* Write one 16-byte chunk of data to the Dataflash */ Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); Dataflash_SendByte(Endpoint_Read_8()); /* Increment the Dataflash page 16 byte block counter */ CurrDFPageByteDiv16++; /* Increment the block 16 byte block counter */ BytesInBlockDiv16++; /* Check if the current command is being aborted by the host */ if (IsMassStoreReset) return; } /* Decrement the blocks remaining counter */ TotalBlocks--; } /* Write the Dataflash buffer contents back to the Dataflash page */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE); Dataflash_SendAddressBytes(CurrDFPage, 0x00); Dataflash_WaitWhileBusy(); /* If the endpoint is empty, clear it ready for the next packet from the host */ if (!(Endpoint_IsReadWriteAllowed())) Endpoint_ClearOUT(); /* Deselect all Dataflash chips */ Dataflash_DeselectChip(); } /** Reads blocks (OS blocks, not Dataflash pages) from the storage medium, the board Dataflash IC(s), into * the pre-selected data IN endpoint. This routine reads in Dataflash page sized blocks from the Dataflash * and writes them in OS sized blocks to the endpoint. * * \param[in] BlockAddress Data block starting address for the read sequence * \param[in] TotalBlocks Number of blocks of data to read */ void DataflashManager_ReadBlocks(const uint32_t BlockAddress, uint16_t TotalBlocks) { uint16_t CurrDFPage = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE); uint16_t CurrDFPageByte = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE); uint8_t CurrDFPageByteDiv16 = (CurrDFPageByte >> 4); /* Select the correct starting Dataflash IC for the block requested */ Dataflash_SelectChipFromPage(CurrDFPage); /* Send the Dataflash main memory page read command */ Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD); Dataflash_SendAddressBytes(CurrDFPage, CurrDFPageByte); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); /* Wait until endpoint is ready before continuing */ if (Endpoint_WaitUntilReady()) return; while (TotalBlocks) { uint8_t BytesInBlockDiv16 = 0; /* Write an endpoint packet sized data block to the Dataflash */ while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) { /* Check if the endpoint is currently full */ if (!(Endpoint_IsReadWriteAllowed())) { /* Clear the endpoint bank to send its contents to the host */ Endpoint_ClearIN(); /* Wait until the endpoint is ready for more data */ if (Endpoint_WaitUntilReady()) return; } /* Check if end of Dataflash page reached */ if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4)) { /* Reset the Dataflash buffer counter, increment the page counter */ CurrDFPageByteDiv16 = 0; CurrDFPage++; /* Select the next Dataflash chip based on the new Dataflash page index */ Dataflash_SelectChipFromPage(CurrDFPage); /* Send the Dataflash main memory page read command */ Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); } /* Read one 16-byte chunk of data from the Dataflash */ Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); Endpoint_Write_8(Dataflash_ReceiveByte()); /* Increment the Dataflash page 16 byte block counter */ CurrDFPageByteDiv16++; /* Increment the block 16 byte block counter */ BytesInBlockDiv16++; /* Check if the current command is being aborted by the host */ if (IsMassStoreReset) return; } /* Decrement the blocks remaining counter */ TotalBlocks--; } /* If the endpoint is full, send its contents to the host */ if (!(Endpoint_IsReadWriteAllowed())) Endpoint_ClearIN(); /* Deselect all Dataflash chips */ Dataflash_DeselectChip(); } /** Writes blocks (OS blocks, not Dataflash pages) to the storage medium, the board Dataflash IC(s), from * the given RAM buffer. This routine reads in OS sized blocks from the buffer and writes them to the * Dataflash in Dataflash page sized blocks. This can be linked to FAT libraries to write files to the * Dataflash. * * \param[in] BlockAddress Data block starting address for the write sequence * \param[in] TotalBlocks Number of blocks of data to write * \param[in] BufferPtr Pointer to the data source RAM buffer */ void DataflashManager_WriteBlocks_RAM(const uint32_t BlockAddress, uint16_t TotalBlocks, uint8_t* BufferPtr) { uint16_t CurrDFPage = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE); uint16_t CurrDFPageByte = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE); uint8_t CurrDFPageByteDiv16 = (CurrDFPageByte >> 4); bool UsingSecondBuffer = false; /* Select the correct starting Dataflash IC for the block requested */ Dataflash_SelectChipFromPage(CurrDFPage); #if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE) /* Copy selected dataflash's current page contents to the Dataflash buffer */ Dataflash_SendByte(DF_CMD_MAINMEMTOBUFF1); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_WaitWhileBusy(); #endif /* Send the Dataflash buffer write command */ Dataflash_SendByte(DF_CMD_BUFF1WRITE); Dataflash_SendAddressBytes(0, CurrDFPageByte); while (TotalBlocks) { uint8_t BytesInBlockDiv16 = 0; /* Write an endpoint packet sized data block to the Dataflash */ while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) { /* Check if end of Dataflash page reached */ if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4)) { /* Write the Dataflash buffer contents back to the Dataflash page */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE); Dataflash_SendAddressBytes(CurrDFPage, 0); /* Reset the Dataflash buffer counter, increment the page counter */ CurrDFPageByteDiv16 = 0; CurrDFPage++; /* Once all the Dataflash ICs have had their first buffers filled, switch buffers to maintain throughput */ if (Dataflash_GetSelectedChip() == DATAFLASH_CHIP_MASK(DATAFLASH_TOTALCHIPS)) UsingSecondBuffer = !(UsingSecondBuffer); /* Select the next Dataflash chip based on the new Dataflash page index */ Dataflash_SelectChipFromPage(CurrDFPage); #if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE) /* If less than one Dataflash page remaining, copy over the existing page to preserve trailing data */ if ((TotalBlocks * (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) < (DATAFLASH_PAGE_SIZE >> 4)) { /* Copy selected dataflash's current page contents to the Dataflash buffer */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_MAINMEMTOBUFF2 : DF_CMD_MAINMEMTOBUFF1); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_WaitWhileBusy(); } #endif /* Send the Dataflash buffer write command */ Dataflash_ToggleSelectedChipCS(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2WRITE : DF_CMD_BUFF1WRITE); Dataflash_SendAddressBytes(0, 0); } /* Write one 16-byte chunk of data to the Dataflash */ for (uint8_t ByteNum = 0; ByteNum < 16; ByteNum++) Dataflash_SendByte(*(BufferPtr++)); /* Increment the Dataflash page 16 byte block counter */ CurrDFPageByteDiv16++; /* Increment the block 16 byte block counter */ BytesInBlockDiv16++; } /* Decrement the blocks remaining counter */ TotalBlocks--; } /* Write the Dataflash buffer contents back to the Dataflash page */ Dataflash_WaitWhileBusy(); Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE); Dataflash_SendAddressBytes(CurrDFPage, 0x00); Dataflash_WaitWhileBusy(); /* Deselect all Dataflash chips */ Dataflash_DeselectChip(); } /** Reads blocks (OS blocks, not Dataflash pages) from the storage medium, the board Dataflash IC(s), into * the preallocated RAM buffer. This routine reads in Dataflash page sized blocks from the Dataflash * and writes them in OS sized blocks to the given buffer. This can be linked to FAT libraries to read * the files stored on the Dataflash. * * \param[in] BlockAddress Data block starting address for the read sequence * \param[in] TotalBlocks Number of blocks of data to read * \param[out] BufferPtr Pointer to the data destination RAM buffer */ void DataflashManager_ReadBlocks_RAM(const uint32_t BlockAddress, uint16_t TotalBlocks, uint8_t* BufferPtr) { uint16_t CurrDFPage = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE); uint16_t CurrDFPageByte = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE); uint8_t CurrDFPageByteDiv16 = (CurrDFPageByte >> 4); /* Select the correct starting Dataflash IC for the block requested */ Dataflash_SelectChipFromPage(CurrDFPage); /* Send the Dataflash main memory page read command */ Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD); Dataflash_SendAddressBytes(CurrDFPage, CurrDFPageByte); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); while (TotalBlocks) { uint8_t BytesInBlockDiv16 = 0; /* Write an endpoint packet sized data block to the Dataflash */ while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) { /* Check if end of Dataflash page reached */ if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4)) { /* Reset the Dataflash buffer counter, increment the page counter */ CurrDFPageByteDiv16 = 0; CurrDFPage++; /* Select the next Dataflash chip based on the new Dataflash page index */ Dataflash_SelectChipFromPage(CurrDFPage); /* Send the Dataflash main memory page read command */ Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD); Dataflash_SendAddressBytes(CurrDFPage, 0); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); Dataflash_SendByte(0x00); } /* Read one 16-byte chunk of data from the Dataflash */ for (uint8_t ByteNum = 0; ByteNum < 16; ByteNum++) *(BufferPtr++) = Dataflash_ReceiveByte(); /* Increment the Dataflash page 16 byte block counter */ CurrDFPageByteDiv16++; /* Increment the block 16 byte block counter */ BytesInBlockDiv16++; } /* Decrement the blocks remaining counter */ TotalBlocks--; } /* Deselect all Dataflash chips */ Dataflash_DeselectChip(); } /** Disables the Dataflash memory write protection bits on the board Dataflash ICs, if enabled. */ void DataflashManager_ResetDataflashProtections(void) { /* Select first Dataflash chip, send the read status register command */ Dataflash_SelectChip(DATAFLASH_CHIP1); Dataflash_SendByte(DF_CMD_GETSTATUS); /* Check if sector protection is enabled */ if (Dataflash_ReceiveByte() & DF_STATUS_SECTORPROTECTION_ON) { Dataflash_ToggleSelectedChipCS(); /* Send the commands to disable sector protection */ Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[0]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[1]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[2]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[3]); } /* Select second Dataflash chip (if present on selected board), send read status register command */ #if (DATAFLASH_TOTALCHIPS == 2) Dataflash_SelectChip(DATAFLASH_CHIP2); Dataflash_SendByte(DF_CMD_GETSTATUS); /* Check if sector protection is enabled */ if (Dataflash_ReceiveByte() & DF_STATUS_SECTORPROTECTION_ON) { Dataflash_ToggleSelectedChipCS(); /* Send the commands to disable sector protection */ Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[0]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[1]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[2]); Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[3]); } #endif /* Deselect current Dataflash chip */ Dataflash_DeselectChip(); } /** Performs a simple test on the attached Dataflash IC(s) to ensure that they are working. * * \return Boolean \c true if all media chips are working, \c false otherwise */ bool DataflashManager_CheckDataflashOperation(void) { uint8_t ReturnByte; /* Test first Dataflash IC is present and responding to commands */ Dataflash_SelectChip(DATAFLASH_CHIP1); Dataflash_SendByte(DF_CMD_READMANUFACTURERDEVICEINFO); ReturnByte = Dataflash_ReceiveByte(); Dataflash_DeselectChip(); /* If returned data is invalid, fail the command */ if (ReturnByte != DF_MANUFACTURER_ATMEL) return false; #if (DATAFLASH_TOTALCHIPS == 2) /* Test second Dataflash IC is present and responding to commands */ Dataflash_SelectChip(DATAFLASH_CHIP2); Dataflash_SendByte(DF_CMD_READMANUFACTURERDEVICEINFO); ReturnByte = Dataflash_ReceiveByte(); Dataflash_DeselectChip(); /* If returned data is invalid, fail the command */ if (ReturnByte != DF_MANUFACTURER_ATMEL) return false; #endif return true; }
ruriwo/ErgoThumb072_firmware
tmk_core/protocol/lufa/LUFA-git/Demos/Device/LowLevel/MassStorage/Lib/DataflashManager.c
C
gpl-2.0
19,637
/* * Renesas SuperH DMA Engine support * * base is drivers/dma/flsdma.c * * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * - DMA of SuperH does not have Hardware DMA chain mode. * - MAX DMA size is 16MB. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sh_dma.h> #include <linux/notifier.h> #include <linux/kdebug.h> #include <linux/spinlock.h> #include <linux/rculist.h> #include "dmaengine.h" #include "shdma.h" /* */ enum sh_dmae_desc_status { DESC_IDLE, DESC_PREPARED, DESC_SUBMITTED, DESC_COMPLETED, /* */ DESC_WAITING, /* */ }; #define NR_DESCS_PER_CHANNEL 32 /* */ #define LOG2_DEFAULT_XFER_SIZE 2 /* */ static DEFINE_SPINLOCK(sh_dmae_lock); static LIST_HEAD(sh_dmae_devices); /* */ static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); __raw_writel(data, shdev->chan_reg + shdev->pdata->channel[sh_dc->id].chclr_offset); } static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { __raw_writel(data, sh_dc->base + reg / sizeof(u32)); } static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) { return __raw_readl(sh_dc->base + reg / sizeof(u32)); } static u16 dmaor_read(struct sh_dmae_device *shdev) { u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); if (shdev->pdata->dmaor_is_32bit) return __raw_readl(addr); else return __raw_readw(addr); } static void dmaor_write(struct sh_dmae_device *shdev, u16 data) { u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); if (shdev->pdata->dmaor_is_32bit) __raw_writel(data, addr); else __raw_writew(data, addr); } static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); } static u32 chcr_read(struct sh_dmae_chan *sh_dc) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); } /* */ static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev); dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); spin_unlock_irqrestore(&sh_dmae_lock, flags); } static int sh_dmae_rst(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); if (shdev->pdata->chclr_present) { int i; for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (sh_chan) chclr_write(sh_chan, 0); } } dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); dmaor = dmaor_read(shdev); spin_unlock_irqrestore(&sh_dmae_lock, flags); if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); return -EIO; } if (shdev->pdata->dmaor_init & ~dmaor) dev_warn(shdev->common.dev, "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", dmaor, shdev->pdata->dmaor_init); return 0; } static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { u32 chcr = chcr_read(sh_chan); if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) return true; /* */ return false; /* */ } static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); if (cnt >= pdata->ts_shift_num) cnt = 0; return pdata->ts_shift[cnt]; } static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; for (i = 0; i < pdata->ts_shift_num; i++) if (pdata->ts_shift[i] == l2size) break; if (i == pdata->ts_shift_num) i = 0; return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); } static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) { sh_dmae_writel(sh_chan, hw->sar, SAR); sh_dmae_writel(sh_chan, hw->dar, DAR); sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); } static void dmae_start(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); if (shdev->pdata->needs_tend_set) sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); chcr |= CHCR_DE | shdev->chcr_ie_bit; chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_halt(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); chcr_write(sh_chan, chcr); } static void dmae_init(struct sh_dmae_chan *sh_chan) { /* */ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); chcr_write(sh_chan, chcr); } static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) { /* */ if (dmae_is_busy(sh_chan)) return -EBUSY; sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); chcr_write(sh_chan, val); return 0; } static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; u16 __iomem *addr = shdev->dmars; unsigned int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; if (pdata->no_dmars) return 0; /* */ if (!addr) addr = (u16 __iomem *)shdev->chan_reg; addr += chan_pdata->dmars / sizeof(u16); __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), addr); return 0; } static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) { struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); struct sh_dmae_slave *param = tx->chan->private; dma_async_tx_callback callback = tx->callback; dma_cookie_t cookie; bool power_up; spin_lock_irq(&sh_chan->desc_lock); if (list_empty(&sh_chan->ld_queue)) power_up = true; else power_up = false; cookie = dma_cookie_assign(tx); /* */ list_for_each_entry_safe(chunk, c, desc->node.prev, node) { /* */ if (chunk != desc && (chunk->mark == DESC_IDLE || chunk->async_tx.cookie > 0 || chunk->async_tx.cookie == -EBUSY || &chunk->node == &sh_chan->ld_free)) break; chunk->mark = DESC_SUBMITTED; /* */ chunk->async_tx.callback = NULL; chunk->cookie = cookie; list_move_tail(&chunk->node, &sh_chan->ld_queue); last = chunk; } last->async_tx.callback = callback; last->async_tx.callback_param = tx->callback_param; dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", tx->cookie, &last->async_tx, sh_chan->id, desc->hw.sar, desc->hw.tcr, desc->hw.dar); if (power_up) { sh_chan->pm_state = DMAE_PM_BUSY; pm_runtime_get(sh_chan->dev); spin_unlock_irq(&sh_chan->desc_lock); pm_runtime_barrier(sh_chan->dev); spin_lock_irq(&sh_chan->desc_lock); /* */ if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { dev_dbg(sh_chan->dev, "Bring up channel %d\n", sh_chan->id); if (param) { const struct sh_dmae_slave_config *cfg = param->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } if (sh_chan->pm_state == DMAE_PM_PENDING) sh_chan_xfer_ld_queue(sh_chan); sh_chan->pm_state = DMAE_PM_ESTABLISHED; } } else { sh_chan->pm_state = DMAE_PM_PENDING; } spin_unlock_irq(&sh_chan->desc_lock); return cookie; } /* */ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) { struct sh_desc *desc; list_for_each_entry(desc, &sh_chan->ld_free, node) if (desc->mark != DESC_PREPARED) { BUG_ON(desc->mark != DESC_IDLE); list_del(&desc->node); return desc; } return NULL; } static const struct sh_dmae_slave_config *sh_dmae_find_slave( struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; if (param->slave_id >= SH_DMA_SLAVE_NUMBER) return NULL; for (i = 0; i < pdata->slave_num; i++) if (pdata->slave[i].slave_id == param->slave_id) return pdata->slave + i; return NULL; } static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_desc *desc; struct sh_dmae_slave *param = chan->private; int ret; /* */ if (param) { const struct sh_dmae_slave_config *cfg; cfg = sh_dmae_find_slave(sh_chan, param); if (!cfg) { ret = -EINVAL; goto efindslave; } if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { ret = -EBUSY; goto etestused; } param->config = cfg; } while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); if (!desc) break; dma_async_tx_descriptor_init(&desc->async_tx, &sh_chan->common); desc->async_tx.tx_submit = sh_dmae_tx_submit; desc->mark = DESC_IDLE; list_add(&desc->node, &sh_chan->ld_free); sh_chan->descs_allocated++; } if (!sh_chan->descs_allocated) { ret = -ENOMEM; goto edescalloc; } return sh_chan->descs_allocated; edescalloc: if (param) clear_bit(param->slave_id, sh_dmae_slave_used); etestused: efindslave: chan->private = NULL; return ret; } /* */ static void sh_dmae_free_chan_resources(struct dma_chan *chan) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_desc *desc, *_desc; LIST_HEAD(list); /* */ spin_lock_irq(&sh_chan->desc_lock); dmae_halt(sh_chan); spin_unlock_irq(&sh_chan->desc_lock); /* */ /* */ if (!list_empty(&sh_chan->ld_queue)) sh_dmae_chan_ld_cleanup(sh_chan, true); if (chan->private) { /* */ struct sh_dmae_slave *param = chan->private; clear_bit(param->slave_id, sh_dmae_slave_used); chan->private = NULL; } spin_lock_irq(&sh_chan->desc_lock); list_splice_init(&sh_chan->ld_free, &list); sh_chan->descs_allocated = 0; spin_unlock_irq(&sh_chan->desc_lock); list_for_each_entry_safe(desc, _desc, &list, node) kfree(desc); } /* */ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, struct sh_desc **first, enum dma_transfer_direction direction) { struct sh_desc *new; size_t copy_size; if (!*len) return NULL; /* */ new = sh_dmae_get_desc(sh_chan); if (!new) { dev_err(sh_chan->dev, "No free link descriptor available\n"); return NULL; } copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); new->hw.sar = *src; new->hw.dar = *dest; new->hw.tcr = copy_size; if (!*first) { /* */ new->async_tx.cookie = -EBUSY; *first = new; } else { /* */ new->async_tx.cookie = -EINVAL; } dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", copy_size, *len, *src, *dest, &new->async_tx, new->async_tx.cookie, sh_chan->xmit_shift); new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) *src += copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) *dest += copy_size; return new; } /* */ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, enum dma_transfer_direction direction, unsigned long flags) { struct scatterlist *sg; struct sh_desc *first = NULL, *new = NULL /* */; LIST_HEAD(tx_list); int chunks = 0; unsigned long irq_flags; int i; if (!sg_len) return NULL; for_each_sg(sgl, sg, sg_len, i) chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); /* */ spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); /* */ for_each_sg(sgl, sg, sg_len, i) { dma_addr_t sg_addr = sg_dma_address(sg); size_t len = sg_dma_len(sg); if (!len) goto err_get_desc; do { dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", i, sg, len, (unsigned long long)sg_addr); if (direction == DMA_DEV_TO_MEM) new = sh_dmae_add_desc(sh_chan, flags, &sg_addr, addr, &len, &first, direction); else new = sh_dmae_add_desc(sh_chan, flags, addr, &sg_addr, &len, &first, direction); if (!new) goto err_get_desc; new->chunks = chunks--; list_add_tail(&new->node, &tx_list); } while (len); } if (new != first) new->async_tx.cookie = -ENOSPC; /* */ list_splice_tail(&tx_list, &sh_chan->ld_free); spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); return &first->async_tx; err_get_desc: list_for_each_entry(new, &tx_list, node) new->mark = DESC_IDLE; list_splice(&tx_list, &sh_chan->ld_free); spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); return NULL; } static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct sh_dmae_chan *sh_chan; struct scatterlist sg; if (!chan || !len) return NULL; sh_chan = to_sh_chan(chan); sg_init_table(&sg, 1); sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, offset_in_page(dma_src)); sg_dma_address(&sg) = dma_src; sg_dma_len(&sg) = len; return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); } static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct sh_dmae_slave *param; struct sh_dmae_chan *sh_chan; dma_addr_t slave_addr; if (!chan) return NULL; sh_chan = to_sh_chan(chan); param = chan->private; /* */ if (!param || !sg_len) { dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", __func__, param, sg_len, param ? param->slave_id : -1); return NULL; } slave_addr = param->config->addr; /* */ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, direction, flags); } static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); unsigned long flags; /* */ if (cmd != DMA_TERMINATE_ALL) return -ENXIO; if (!chan) return -EINVAL; spin_lock_irqsave(&sh_chan->desc_lock, flags); dmae_halt(sh_chan); if (!list_empty(&sh_chan->ld_queue)) { /* */ struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, struct sh_desc, node); desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << sh_chan->xmit_shift; } spin_unlock_irqrestore(&sh_chan->desc_lock, flags); sh_dmae_chan_ld_cleanup(sh_chan, true); return 0; } static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) { struct sh_desc *desc, *_desc; /* */ bool head_acked = false; dma_cookie_t cookie = 0; dma_async_tx_callback callback = NULL; void *param = NULL; unsigned long flags; spin_lock_irqsave(&sh_chan->desc_lock, flags); list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { struct dma_async_tx_descriptor *tx = &desc->async_tx; BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); BUG_ON(desc->mark != DESC_SUBMITTED && desc->mark != DESC_COMPLETED && desc->mark != DESC_WAITING); /* */ if (!all && desc->mark == DESC_SUBMITTED && desc->cookie != cookie) break; if (tx->cookie > 0) cookie = tx->cookie; if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { if (sh_chan->common.completed_cookie != desc->cookie - 1) dev_dbg(sh_chan->dev, "Completing cookie %d, expected %d\n", desc->cookie, sh_chan->common.completed_cookie + 1); sh_chan->common.completed_cookie = desc->cookie; } /* */ if (desc->mark == DESC_COMPLETED && tx->callback) { desc->mark = DESC_WAITING; callback = tx->callback; param = tx->callback_param; dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", tx->cookie, tx, sh_chan->id); BUG_ON(desc->chunks != 1); break; } if (tx->cookie > 0 || tx->cookie == -EBUSY) { if (desc->mark == DESC_COMPLETED) { BUG_ON(tx->cookie < 0); desc->mark = DESC_WAITING; } head_acked = async_tx_test_ack(tx); } else { switch (desc->mark) { case DESC_COMPLETED: desc->mark = DESC_WAITING; /* */ case DESC_WAITING: if (head_acked) async_tx_ack(&desc->async_tx); } } dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", tx, tx->cookie); if (((desc->mark == DESC_COMPLETED || desc->mark == DESC_WAITING) && async_tx_test_ack(&desc->async_tx)) || all) { /* */ desc->mark = DESC_IDLE; list_move(&desc->node, &sh_chan->ld_free); if (list_empty(&sh_chan->ld_queue)) { dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); pm_runtime_put(sh_chan->dev); } } } if (all && !callback) /* */ sh_chan->common.completed_cookie = sh_chan->common.cookie; spin_unlock_irqrestore(&sh_chan->desc_lock, flags); if (callback) callback(param); return callback; } /* */ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) { while (__ld_cleanup(sh_chan, all)) ; } /* */ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) { struct sh_desc *desc; /* */ if (dmae_is_busy(sh_chan)) return; /* */ list_for_each_entry(desc, &sh_chan->ld_queue, node) if (desc->mark == DESC_SUBMITTED) { dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", desc->async_tx.cookie, sh_chan->id, desc->hw.tcr, desc->hw.sar, desc->hw.dar); /* */ dmae_set_reg(sh_chan, &desc->hw); dmae_start(sh_chan); break; } } static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); spin_lock_irq(&sh_chan->desc_lock); if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) sh_chan_xfer_ld_queue(sh_chan); else sh_chan->pm_state = DMAE_PM_PENDING; spin_unlock_irq(&sh_chan->desc_lock); } static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); enum dma_status status; unsigned long flags; sh_dmae_chan_ld_cleanup(sh_chan, false); spin_lock_irqsave(&sh_chan->desc_lock, flags); status = dma_cookie_status(chan, cookie, txstate); /* */ if (status != DMA_SUCCESS) { struct sh_desc *desc; status = DMA_ERROR; list_for_each_entry(desc, &sh_chan->ld_queue, node) if (desc->cookie == cookie) { status = DMA_IN_PROGRESS; break; } } spin_unlock_irqrestore(&sh_chan->desc_lock, flags); return status; } static irqreturn_t sh_dmae_interrupt(int irq, void *data) { irqreturn_t ret = IRQ_NONE; struct sh_dmae_chan *sh_chan = data; u32 chcr; spin_lock(&sh_chan->desc_lock); chcr = chcr_read(sh_chan); if (chcr & CHCR_TE) { /* */ dmae_halt(sh_chan); ret = IRQ_HANDLED; tasklet_schedule(&sh_chan->tasklet); } spin_unlock(&sh_chan->desc_lock); return ret; } /* */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { unsigned int handled = 0; int i; /* */ sh_dmae_ctl_stop(shdev); /* */ for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; struct sh_desc *desc; LIST_HEAD(dl); if (!sh_chan) continue; spin_lock(&sh_chan->desc_lock); /* */ dmae_halt(sh_chan); list_splice_init(&sh_chan->ld_queue, &dl); if (!list_empty(&dl)) { dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); pm_runtime_put(sh_chan->dev); } sh_chan->pm_state = DMAE_PM_ESTABLISHED; spin_unlock(&sh_chan->desc_lock); /* */ list_for_each_entry(desc, &dl, node) { struct dma_async_tx_descriptor *tx = &desc->async_tx; desc->mark = DESC_IDLE; if (tx->callback) tx->callback(tx->callback_param); } spin_lock(&sh_chan->desc_lock); list_splice(&dl, &sh_chan->ld_free); spin_unlock(&sh_chan->desc_lock); handled++; } sh_dmae_rst(shdev); return !!handled; } static irqreturn_t sh_dmae_err(int irq, void *data) { struct sh_dmae_device *shdev = data; if (!(dmaor_read(shdev) & DMAOR_AE)) return IRQ_NONE; sh_dmae_reset(data); return IRQ_HANDLED; } static void dmae_do_tasklet(unsigned long data) { struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; struct sh_desc *desc; u32 sar_buf = sh_dmae_readl(sh_chan, SAR); u32 dar_buf = sh_dmae_readl(sh_chan, DAR); spin_lock_irq(&sh_chan->desc_lock); list_for_each_entry(desc, &sh_chan->ld_queue, node) { if (desc->mark == DESC_SUBMITTED && ((desc->direction == DMA_DEV_TO_MEM && (desc->hw.dar + desc->hw.tcr) == dar_buf) || (desc->hw.sar + desc->hw.tcr) == sar_buf)) { dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", desc->async_tx.cookie, &desc->async_tx, desc->hw.dar); desc->mark = DESC_COMPLETED; break; } } /* */ sh_chan_xfer_ld_queue(sh_chan); spin_unlock_irq(&sh_chan->desc_lock); sh_dmae_chan_ld_cleanup(sh_chan, false); } static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) { /* */ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) return false; return sh_dmae_reset(shdev); } static int sh_dmae_nmi_handler(struct notifier_block *self, unsigned long cmd, void *data) { struct sh_dmae_device *shdev; int ret = NOTIFY_DONE; bool triggered; /* */ if (!in_nmi()) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { /* */ triggered = sh_dmae_nmi_notify(shdev); if (triggered == true) ret = NOTIFY_OK; } rcu_read_unlock(); return ret; } static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { .notifier_call = sh_dmae_nmi_handler, /* */ .priority = 1, }; static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, int irq, unsigned long flags) { int err; const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; struct platform_device *pdev = to_platform_device(shdev->common.dev); struct sh_dmae_chan *new_sh_chan; /* */ new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); if (!new_sh_chan) { dev_err(shdev->common.dev, "No free memory for allocating dma channels!\n"); return -ENOMEM; } new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; /* */ new_sh_chan->common.device = &shdev->common; dma_cookie_init(&new_sh_chan->common); new_sh_chan->dev = shdev->common.dev; new_sh_chan->id = id; new_sh_chan->irq = irq; new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); /* */ tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, (unsigned long)new_sh_chan); spin_lock_init(&new_sh_chan->desc_lock); /* */ INIT_LIST_HEAD(&new_sh_chan->ld_queue); INIT_LIST_HEAD(&new_sh_chan->ld_free); /* */ list_add_tail(&new_sh_chan->common.device_node, &shdev->common.channels); shdev->common.chancnt++; if (pdev->id >= 0) snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), "sh-dmae%d.%d", pdev->id, new_sh_chan->id); else snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), "sh-dma%d", new_sh_chan->id); /* */ err = request_irq(irq, &sh_dmae_interrupt, flags, new_sh_chan->dev_id, new_sh_chan); if (err) { dev_err(shdev->common.dev, "DMA channel %d request_irq error " "with return %d\n", id, err); goto err_no_irq; } shdev->chan[id] = new_sh_chan; return 0; err_no_irq: /* */ list_del(&new_sh_chan->common.device_node); kfree(new_sh_chan); return err; } static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) { int i; for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { if (shdev->chan[i]) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; free_irq(sh_chan->irq, sh_chan); list_del(&sh_chan->common.device_node); kfree(sh_chan); shdev->chan[i] = NULL; } } shdev->common.chancnt = 0; } static int __init sh_dmae_probe(struct platform_device *pdev) { struct sh_dmae_pdata *pdata = pdev->dev.platform_data; unsigned long irqflags = IRQF_DISABLED, chan_flag[SH_DMAC_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; struct sh_dmae_device *shdev; struct resource *chan, *dmars, *errirq_res, *chanirq_res; /* */ if (!pdata || !pdata->channel_num) return -ENODEV; chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* */ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); /* */ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!chan || !errirq_res) return -ENODEV; if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { dev_err(&pdev->dev, "DMAC register region already claimed\n"); return -EBUSY; } if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); err = -EBUSY; goto ermrdmars; } err = -ENOMEM; shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); if (!shdev) { dev_err(&pdev->dev, "Not enough memory\n"); goto ealloc; } shdev->chan_reg = ioremap(chan->start, resource_size(chan)); if (!shdev->chan_reg) goto emapchan; if (dmars) { shdev->dmars = ioremap(dmars->start, resource_size(dmars)); if (!shdev->dmars) goto emapdmars; } /* */ shdev->pdata = pdata; if (pdata->chcr_offset) shdev->chcr_offset = pdata->chcr_offset; else shdev->chcr_offset = CHCR; if (pdata->chcr_ie_bit) shdev->chcr_ie_bit = pdata->chcr_ie_bit; else shdev->chcr_ie_bit = CHCR_IE; platform_set_drvdata(pdev, shdev); shdev->common.dev = &pdev->dev; pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); spin_lock_irq(&sh_dmae_lock); list_add_tail_rcu(&shdev->node, &sh_dmae_devices); spin_unlock_irq(&sh_dmae_lock); /* */ err = sh_dmae_rst(shdev); if (err) goto rst_err; INIT_LIST_HEAD(&shdev->common.channels); if (!pdata->slave_only) dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); if (pdata->slave && pdata->slave_num) dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); shdev->common.device_alloc_chan_resources = sh_dmae_alloc_chan_resources; shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; shdev->common.device_tx_status = sh_dmae_tx_status; shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; /* */ shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; shdev->common.device_control = sh_dmae_control; /* */ shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!chanirq_res) chanirq_res = errirq_res; else irqres++; if (chanirq_res == errirq_res || (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) irqflags = IRQF_SHARED; errirq = errirq_res->start; err = request_irq(errirq, sh_dmae_err, irqflags, "DMAC Address Error", shdev); if (err) { dev_err(&pdev->dev, "DMA failed requesting irq #%d, error %d\n", errirq, err); goto eirq_err; } #else chanirq_res = errirq_res; #endif /* */ if (chanirq_res->start == chanirq_res->end && !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { /* */ for (; irq_cnt < pdata->channel_num; irq_cnt++) { if (irq_cnt < SH_DMAC_MAX_CHANNELS) { chan_irq[irq_cnt] = chanirq_res->start; chan_flag[irq_cnt] = IRQF_SHARED; } else { irq_cap = 1; break; } } } else { do { for (i = chanirq_res->start; i <= chanirq_res->end; i++) { if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { irq_cap = 1; break; } if ((errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else chan_flag[irq_cnt] = IRQF_DISABLED; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); chan_irq[irq_cnt++] = i; } if (irq_cnt >= SH_DMAC_MAX_CHANNELS) break; chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, ++irqres); } while (irq_cnt < pdata->channel_num && chanirq_res); } /* */ for (i = 0; i < irq_cnt; i++) { err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err; } if (irq_cap) dev_notice(&pdev->dev, "Attempting to register %d DMA " "channels when a maximum of %d are supported.\n", pdata->channel_num, SH_DMAC_MAX_CHANNELS); pm_runtime_put(&pdev->dev); dma_async_device_register(&shdev->common); return err; chan_probe_err: sh_dmae_chan_remove(shdev); #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) free_irq(errirq, shdev); eirq_err: #endif rst_err: spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); if (dmars) iounmap(shdev->dmars); platform_set_drvdata(pdev, NULL); emapdmars: iounmap(shdev->chan_reg); synchronize_rcu(); emapchan: kfree(shdev); ealloc: if (dmars) release_mem_region(dmars->start, resource_size(dmars)); ermrdmars: release_mem_region(chan->start, resource_size(chan)); return err; } static int __exit sh_dmae_remove(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); struct resource *res; int errirq = platform_get_irq(pdev, 0); dma_async_device_unregister(&shdev->common); if (errirq > 0) free_irq(errirq, shdev); spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); /* */ sh_dmae_chan_remove(shdev); pm_runtime_disable(&pdev->dev); if (shdev->dmars) iounmap(shdev->dmars); iounmap(shdev->chan_reg); platform_set_drvdata(pdev, NULL); synchronize_rcu(); kfree(shdev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) release_mem_region(res->start, resource_size(res)); return 0; } static void sh_dmae_shutdown(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); sh_dmae_ctl_stop(shdev); } static int sh_dmae_runtime_suspend(struct device *dev) { return 0; } static int sh_dmae_runtime_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); return sh_dmae_rst(shdev); } #ifdef CONFIG_PM static int sh_dmae_suspend(struct device *dev) { return 0; } static int sh_dmae_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); int i, ret; ret = sh_dmae_rst(shdev); if (ret < 0) dev_err(dev, "Failed to reset!\n"); for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; struct sh_dmae_slave *param = sh_chan->common.private; if (!sh_chan->descs_allocated) continue; if (param) { const struct sh_dmae_slave_config *cfg = param->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } } return 0; } #else #define sh_dmae_suspend NULL #define sh_dmae_resume NULL #endif const struct dev_pm_ops sh_dmae_pm = { .suspend = sh_dmae_suspend, .resume = sh_dmae_resume, .runtime_suspend = sh_dmae_runtime_suspend, .runtime_resume = sh_dmae_runtime_resume, }; static struct platform_driver sh_dmae_driver = { .remove = __exit_p(sh_dmae_remove), .shutdown = sh_dmae_shutdown, .driver = { .owner = THIS_MODULE, .name = "sh-dma-engine", .pm = &sh_dmae_pm, }, }; static int __init sh_dmae_init(void) { /* */ int err = register_die_notifier(&sh_dmae_nmi_notifier); if (err) return err; return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); } module_init(sh_dmae_init); static void __exit sh_dmae_exit(void) { platform_driver_unregister(&sh_dmae_driver); unregister_die_notifier(&sh_dmae_nmi_notifier); } module_exit(sh_dmae_exit); MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sh-dma-engine");
aicjofs/android_kernel_lge_v500_stock
drivers/dma/shdma.c
C
gpl-2.0
39,306
/* * Core maple bus functionality * * Copyright (C) 2007 - 2009 Adrian McMenamin * Copyright (C) 2001 - 2008 Paul Mundt * Copyright (C) 2000 - 2001 YAEGASHI Takeshi * Copyright (C) 2001 M. R. Brown * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/maple.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/io.h> #include <mach/dma.h> #include <mach/sysasic.h> MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); static void maple_dma_handler(struct work_struct *work); static void maple_vblank_handler(struct work_struct *work); static DECLARE_WORK(maple_dma_process, maple_dma_handler); static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); static LIST_HEAD(maple_waitq); static LIST_HEAD(maple_sentq); /* */ static DEFINE_MUTEX(maple_wlist_lock); static struct maple_driver maple_unsupported_device; static struct device maple_bus; static int subdevice_map[MAPLE_PORTS]; static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; static unsigned long maple_pnp_time; static int started, scanning, fullscan; static struct kmem_cache *maple_queue_cache; struct maple_device_specify { int port; int unit; }; static bool checked[MAPLE_PORTS]; static bool empty[MAPLE_PORTS]; static struct maple_device *baseunits[MAPLE_PORTS]; /* */ int maple_driver_register(struct maple_driver *drv) { if (!drv) return -EINVAL; drv->drv.bus = &maple_bus_type; return driver_register(&drv->drv); } EXPORT_SYMBOL_GPL(maple_driver_register); /* */ void maple_driver_unregister(struct maple_driver *drv) { driver_unregister(&drv->drv); } EXPORT_SYMBOL_GPL(maple_driver_unregister); /* */ static void maple_dma_reset(void) { __raw_writel(MAPLE_MAGIC, MAPLE_RESET); /* */ __raw_writel(1, MAPLE_TRIGTYPE); /* */ __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR); __raw_writel(1, MAPLE_ENABLE); } /* */ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq *mq), unsigned long interval, unsigned long function) { dev->callback = callback; dev->interval = interval; dev->function = cpu_to_be32(function); dev->when = jiffies; } EXPORT_SYMBOL_GPL(maple_getcond_callback); static int maple_dma_done(void) { return (__raw_readl(MAPLE_STATE) & 1) == 0; } static void maple_release_device(struct device *dev) { struct maple_device *mdev; struct mapleq *mq; mdev = to_maple_dev(dev); mq = mdev->mq; kmem_cache_free(maple_queue_cache, mq->recvbuf); kfree(mq); kfree(mdev); } /* */ int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, size_t length, void *data) { int ret = 0; void *sendbuf = NULL; if (length) { sendbuf = kzalloc(length * 4, GFP_KERNEL); if (!sendbuf) { ret = -ENOMEM; goto out; } ((__be32 *)sendbuf)[0] = cpu_to_be32(function); } mdev->mq->command = command; mdev->mq->length = length; if (length > 1) memcpy(sendbuf + 4, data, (length - 1) * 4); mdev->mq->sendbuf = sendbuf; mutex_lock(&maple_wlist_lock); list_add_tail(&mdev->mq->list, &maple_waitq); mutex_unlock(&maple_wlist_lock); out: return ret; } EXPORT_SYMBOL_GPL(maple_add_packet); static struct mapleq *maple_allocq(struct maple_device *mdev) { struct mapleq *mq; mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) goto failed_nomem; INIT_LIST_HEAD(&mq->list); mq->dev = mdev; mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); if (!mq->recvbuf) goto failed_p2; mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); return mq; failed_p2: kfree(mq); failed_nomem: dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", mdev->port, mdev->unit); return NULL; } static struct maple_device *maple_alloc_dev(int port, int unit) { struct maple_device *mdev; /* */ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return NULL; mdev->port = port; mdev->unit = unit; mdev->mq = maple_allocq(mdev); if (!mdev->mq) { kfree(mdev); return NULL; } mdev->dev.bus = &maple_bus_type; mdev->dev.parent = &maple_bus; init_waitqueue_head(&mdev->maple_wait); return mdev; } static void maple_free_dev(struct maple_device *mdev) { kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); kfree(mdev->mq); kfree(mdev); } /* */ static void maple_build_block(struct mapleq *mq) { int port, unit, from, to, len; unsigned long *lsendbuf = mq->sendbuf; port = mq->dev->port & 3; unit = mq->dev->unit; len = mq->length; from = port << 6; to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); *maple_lastptr &= 0x7fffffff; maple_lastptr = maple_sendptr; *maple_sendptr++ = (port << 16) | len | 0x80000000; *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf); *maple_sendptr++ = mq->command | (to << 8) | (from << 16) | (len << 24); while (len-- > 0) *maple_sendptr++ = *lsendbuf++; } /* */ static void maple_send(void) { int i, maple_packets = 0; struct mapleq *mq, *nmq; if (!maple_dma_done()) return; /* */ __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) goto finish; mutex_lock(&maple_wlist_lock); if (list_empty(&maple_waitq)) { mutex_unlock(&maple_wlist_lock); goto finish; } maple_lastptr = maple_sendbuf; maple_sendptr = maple_sendbuf; list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { maple_build_block(mq); list_del_init(&mq->list); list_add_tail(&mq->list, &maple_sentq); if (maple_packets++ > MAPLE_MAXPACKETS) break; } mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); } finish: maple_dma_reset(); } /* */ static int maple_check_matching_driver(struct device_driver *driver, void *devptr) { struct maple_driver *maple_drv; struct maple_device *mdev; mdev = devptr; maple_drv = to_maple_driver(driver); if (mdev->devinfo.function & cpu_to_be32(maple_drv->function)) return 1; return 0; } static void maple_detach_driver(struct maple_device *mdev) { device_unregister(&mdev->dev); } /* */ static void maple_attach_driver(struct maple_device *mdev) { char *p, *recvbuf; unsigned long function; int matched, error; recvbuf = mdev->mq->recvbuf->buf; /* */ memcpy(&mdev->devinfo.function, recvbuf + 4, 4); memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); memcpy(mdev->product_name, mdev->devinfo.product_name, 30); mdev->product_name[30] = '\0'; memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); mdev->product_licence[60] = '\0'; for (p = mdev->product_name + 29; mdev->product_name <= p; p--) if (*p == ' ') *p = '\0'; else break; for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) if (*p == ' ') *p = '\0'; else break; function = be32_to_cpu(mdev->devinfo.function); dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", mdev->product_name, function, mdev->port, mdev->unit); if (function > 0x200) { /* */ function = 0; mdev->driver = &maple_unsupported_device; dev_set_name(&mdev->dev, "%d:0.port", mdev->port); } else { matched = bus_for_each_drv(&maple_bus_type, NULL, mdev, maple_check_matching_driver); if (matched == 0) { /* */ dev_info(&mdev->dev, "no driver found\n"); mdev->driver = &maple_unsupported_device; } dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port, mdev->unit, function); } mdev->function = function; mdev->dev.release = &maple_release_device; atomic_set(&mdev->busy, 0); error = device_register(&mdev->dev); if (error) { dev_warn(&mdev->dev, "could not register device at" " (%d, %d), with error 0x%X\n", mdev->unit, mdev->port, error); maple_free_dev(mdev); mdev = NULL; return; } } /* */ static int check_maple_device(struct device *device, void *portptr) { struct maple_device_specify *ds; struct maple_device *mdev; ds = portptr; mdev = to_maple_dev(device); if (mdev->port == ds->port && mdev->unit == ds->unit) return 1; return 0; } static int setup_maple_commands(struct device *device, void *ignored) { int add; struct maple_device *mdev = to_maple_dev(device); if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && time_after(jiffies, mdev->when)) { /* */ add = maple_add_packet(mdev, be32_to_cpu(mdev->devinfo.function), MAPLE_COMMAND_GETCOND, 1, NULL); if (!add) mdev->when = jiffies + mdev->interval; } else { if (time_after(jiffies, maple_pnp_time)) /* */ if (atomic_read(&mdev->busy) == 0) { atomic_set(&mdev->busy, 1); maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); } } return 0; } /* */ static void maple_vblank_handler(struct work_struct *work) { int x, locking; struct maple_device *mdev; if (!maple_dma_done()) return; __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) goto finish; /* */ bus_for_each_dev(&maple_bus_type, NULL, NULL, setup_maple_commands); if (time_after(jiffies, maple_pnp_time)) { /* */ for (x = 0; x < MAPLE_PORTS; x++) { if (checked[x] && empty[x]) { mdev = baseunits[x]; if (!mdev) break; atomic_set(&mdev->busy, 1); locking = maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); if (!locking) break; } } maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; } finish: maple_send(); } /* */ static void maple_map_subunits(struct maple_device *mdev, int submask) { int retval, k, devcheck; struct maple_device *mdev_add; struct maple_device_specify ds; ds.port = mdev->port; for (k = 0; k < 5; k++) { ds.unit = k + 1; retval = bus_for_each_dev(&maple_bus_type, NULL, &ds, check_maple_device); if (retval) { submask = submask >> 1; continue; } devcheck = submask & 0x01; if (devcheck) { mdev_add = maple_alloc_dev(mdev->port, k + 1); if (!mdev_add) return; atomic_set(&mdev_add->busy, 1); maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); /* */ scanning = 1; } submask = submask >> 1; } } /* */ static void maple_clean_submap(struct maple_device *mdev) { int killbit; killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); killbit = ~killbit; killbit &= 0xFF; subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; } /* */ static void maple_response_none(struct maple_device *mdev) { maple_clean_submap(mdev); if (likely(mdev->unit != 0)) { /* */ if (mdev->can_unload) { if (!mdev->can_unload(mdev)) { atomic_set(&mdev->busy, 2); wake_up(&mdev->maple_wait); return; } } dev_info(&mdev->dev, "detaching device at (%d, %d)\n", mdev->port, mdev->unit); maple_detach_driver(mdev); return; } else { if (!started || !fullscan) { if (checked[mdev->port] == false) { checked[mdev->port] = true; empty[mdev->port] = true; dev_info(&mdev->dev, "no devices" " to port %d\n", mdev->port); } return; } } /* */ atomic_set(&mdev->busy, 0); } /* */ static void maple_response_devinfo(struct maple_device *mdev, char *recvbuf) { char submask; if (!started || (scanning == 2) || !fullscan) { if ((mdev->unit == 0) && (checked[mdev->port] == false)) { checked[mdev->port] = true; maple_attach_driver(mdev); } else { if (mdev->unit != 0) maple_attach_driver(mdev); if (mdev->unit == 0) { empty[mdev->port] = false; maple_attach_driver(mdev); } } } if (mdev->unit == 0) { submask = recvbuf[2] & 0x1F; if (submask ^ subdevice_map[mdev->port]) { maple_map_subunits(mdev, submask); subdevice_map[mdev->port] = submask; } } } static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) { if (mdev->fileerr_handler) { mdev->fileerr_handler(mdev, recvbuf); return; } else dev_warn(&mdev->dev, "device at (%d, %d) reports" "file error 0x%X\n", mdev->port, mdev->unit, ((int *)recvbuf)[1]); } static void maple_port_rescan(void) { int i; struct maple_device *mdev; fullscan = 1; for (i = 0; i < MAPLE_PORTS; i++) { if (checked[i] == false) { fullscan = 0; mdev = baseunits[i]; maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); } } } /* */ static void maple_dma_handler(struct work_struct *work) { struct mapleq *mq, *nmq; struct maple_device *mdev; char *recvbuf; enum maple_code code; if (!maple_dma_done()) return; __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) { list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; dma_cache_sync(&mdev->dev, recvbuf, 0x400, DMA_FROM_DEVICE); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); switch (code) { case MAPLE_RESPONSE_NONE: maple_response_none(mdev); break; case MAPLE_RESPONSE_DEVINFO: maple_response_devinfo(mdev, recvbuf); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_DATATRF: if (mdev->callback) mdev->callback(mq); atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; case MAPLE_RESPONSE_FILEERR: maple_response_fileerr(mdev, recvbuf); atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; case MAPLE_RESPONSE_AGAIN: case MAPLE_RESPONSE_BADCMD: case MAPLE_RESPONSE_BADFUNC: dev_warn(&mdev->dev, "non-fatal error" " 0x%X at (%d, %d)\n", code, mdev->port, mdev->unit); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_ALLINFO: dev_notice(&mdev->dev, "extended" " device information request for (%d, %d)" " but call is not supported\n", mdev->port, mdev->unit); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_OK: atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; default: break; } } /* */ if (scanning == 1) { maple_send(); scanning = 2; } else scanning = 0; /* */ if (!fullscan) maple_port_rescan(); /* */ started = 1; } maple_send(); } static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) { /* */ schedule_work(&maple_dma_process); return IRQ_HANDLED; } static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) { schedule_work(&maple_vblank_process); return IRQ_HANDLED; } static int maple_set_dma_interrupt_handler(void) { return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); } static int maple_set_vblank_interrupt_handler(void) { return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); } static int maple_get_dma_buffer(void) { maple_sendbuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, MAPLE_DMA_PAGES); if (!maple_sendbuf) return -ENOMEM; return 0; } static int maple_match_bus_driver(struct device *devptr, struct device_driver *drvptr) { struct maple_driver *maple_drv = to_maple_driver(drvptr); struct maple_device *maple_dev = to_maple_dev(devptr); /* */ if (maple_dev->devinfo.function == 0xFFFFFFFF) return 0; else if (maple_dev->devinfo.function & cpu_to_be32(maple_drv->function)) return 1; return 0; } static int maple_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { return 0; } static void maple_bus_release(struct device *dev) { } static struct maple_driver maple_unsupported_device = { .drv = { .name = "maple_unsupported_device", .bus = &maple_bus_type, }, }; /* */ struct bus_type maple_bus_type = { .name = "maple", .match = maple_match_bus_driver, .uevent = maple_bus_uevent, }; EXPORT_SYMBOL_GPL(maple_bus_type); static struct device maple_bus = { .init_name = "maple", .release = maple_bus_release, }; static int __init maple_bus_init(void) { int retval, i; struct maple_device *mdev[MAPLE_PORTS]; __raw_writel(0, MAPLE_ENABLE); retval = device_register(&maple_bus); if (retval) goto cleanup; retval = bus_register(&maple_bus_type); if (retval) goto cleanup_device; retval = driver_register(&maple_unsupported_device.drv); if (retval) goto cleanup_bus; /* */ retval = maple_get_dma_buffer(); if (retval) { dev_err(&maple_bus, "failed to allocate DMA buffers\n"); goto cleanup_basic; } /* */ retval = maple_set_dma_interrupt_handler(); if (retval) { dev_err(&maple_bus, "bus failed to grab maple " "DMA IRQ\n"); goto cleanup_dma; } /* */ retval = maple_set_vblank_interrupt_handler(); if (retval) { dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); goto cleanup_irq; } maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); if (!maple_queue_cache) goto cleanup_bothirqs; INIT_LIST_HEAD(&maple_waitq); INIT_LIST_HEAD(&maple_sentq); /* */ for (i = 0; i < MAPLE_PORTS; i++) { checked[i] = false; empty[i] = false; mdev[i] = maple_alloc_dev(i, 0); if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); goto cleanup_cache; } baseunits[i] = mdev[i]; atomic_set(&mdev[i]->busy, 1); maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); subdevice_map[i] = 0; } maple_pnp_time = jiffies + HZ; /* */ maple_send(); dev_info(&maple_bus, "bus core now registered\n"); return 0; cleanup_cache: kmem_cache_destroy(maple_queue_cache); cleanup_bothirqs: free_irq(HW_EVENT_VSYNC, 0); cleanup_irq: free_irq(HW_EVENT_MAPLE_DMA, 0); cleanup_dma: free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); cleanup_basic: driver_unregister(&maple_unsupported_device.drv); cleanup_bus: bus_unregister(&maple_bus_type); cleanup_device: device_unregister(&maple_bus); cleanup: printk(KERN_ERR "Maple bus registration failed\n"); return retval; } /* */ fs_initcall(maple_bus_init);
holyangel/LGE_G3
drivers/sh/maple/maple.c
C
gpl-2.0
21,950
/* * OMAP5 thermal driver. * * Copyright (C) 2011-2012 Texas Instruments Inc. * Contact: * Eduardo Valentin <eduardo.valentin@ti.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/ctrl_module_core_54xx.h> #include "omap-bandgap.h" /* TODO: Remove this, ES2.0 samples won't allow this to be programmable*/ #define OMAP5430_MPU_TSHUT_HOT 928 /* 119 degC */ #define OMAP5430_MPU_TSHUT_COLD 900 #define OMAP5430_GPU_TSHUT_HOT 916 /* 114 degC */ #define OMAP5430_GPU_TSHUT_COLD 900 #define OMAP5430_CORE_TSHUT_HOT 943 /* 125 degC */ #define OMAP5430_CORE_TSHUT_COLD 900 /* * OMAP5430 has three instances of thermal sensor for MPU, GPU & CORE, * need to describe the individual registers and bit fields. */ /* * OMAP5430 MPU thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5_CTRL_MODULE_CORE_TEMP_SENSOR_MPU, .bgap_tempsoff_mask = OMAP5_BGAP_TMPSOFF_MPU_MASK, .bgap_eocz_mask = OMAP5_BGAP_EOCZ_MPU_MASK, .bgap_dtemp_mask = OMAP5_BGAP_DTEMP_MPU_MASK, .bgap_mask_ctrl = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .mask_hot_mask = OMAP5_MASK_HOT_MPU_MASK, .mask_cold_mask = OMAP5_MASK_COLD_MPU_MASK, .mask_sidlemode_mask = OMAP5_SIDLEMODE_MASK, .mask_freeze_mask = OMAP5_FREEZE_MPU_MASK, .mask_clear_mask = OMAP5_CLEAR_MPU_MASK, .mask_clear_accum_mask = OMAP5_CLEAR_ACCUM_MPU_MASK, .bgap_counter = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .counter_mask = OMAP5_COUNTER_DELAY_MASK, .bgap_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_THRESHOLD_MPU, .threshold_thot_mask = OMAP5_THOLD_HOT_MPU_MASK, .threshold_tcold_mask = OMAP5_THOLD_COLD_MPU_MASK, .tshut_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_TSHUT_MPU, .tshut_efuse_shift = OMAP5_TSHUT_MUXCTRL_MPU_SHIFT, .tshut_efuse_mask = OMAP5_TSHUT_MUXCTRL_MPU_MASK, .tshut_hot_mask = OMAP5_TSHUT_HOT_MPU_MASK, .tshut_cold_mask = OMAP5_TSHUT_COLD_MPU_MASK, .bgap_status = OMAP5_CTRL_MODULE_CORE_BANDGAP_STATUS, .status_clean_stop_mask = 0x0, .status_bgap_alert_mask = OMAP5_ALERT_MASK, .status_hot_mask = OMAP5_HOT_MPU_MASK, .status_cold_mask = OMAP5_COLD_MPU_MASK, .bgap_cumul_dtemp = OMAP5_CTRL_MODULE_CORE_BANDGAP_CUMUL_DTEMP_MPU, .ctrl_dtemp_0 = OMAP5_CTRL_MODULE_CORE_DTEMP_MPU_0, .ctrl_dtemp_1 = OMAP5_CTRL_MODULE_CORE_DTEMP_MPU_1, .ctrl_dtemp_2 = OMAP5_CTRL_MODULE_CORE_DTEMP_MPU_2, .ctrl_dtemp_3 = OMAP5_CTRL_MODULE_CORE_DTEMP_MPU_3, .ctrl_dtemp_4 = OMAP5_CTRL_MODULE_CORE_DTEMP_MPU_4, .bgap_efuse = OMAP5_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP_MPU, }; /* * OMAP5430 GPU thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_gpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5_CTRL_MODULE_CORE_TEMP_SENSOR_MM, .bgap_tempsoff_mask = OMAP5_BGAP_TMPSOFF_MM_MASK, .bgap_eocz_mask = OMAP5_BGAP_EOCZ_MM_MASK, .bgap_dtemp_mask = OMAP5_BGAP_DTEMP_MM_MASK, .bgap_mask_ctrl = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .mask_hot_mask = OMAP5_MASK_HOT_MM_MASK, .mask_cold_mask = OMAP5_MASK_COLD_MM_MASK, .mask_sidlemode_mask = OMAP5_SIDLEMODE_MASK, .mask_freeze_mask = OMAP5_FREEZE_MM_MASK, .mask_clear_mask = OMAP5_CLEAR_MM_MASK, .mask_clear_accum_mask = OMAP5_CLEAR_ACCUM_MM_MASK, .bgap_counter = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .counter_mask = OMAP5_COUNTER_DELAY_MASK, .bgap_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_THRESHOLD_MM, .threshold_thot_mask = OMAP5_THOLD_HOT_MM_MASK, .threshold_tcold_mask = OMAP5_THOLD_COLD_MM_MASK, .tshut_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_TSHUT_MM, .tshut_efuse_shift = OMAP5_TSHUT_MUXCTRL_MM_SHIFT, .tshut_efuse_mask = OMAP5_TSHUT_MUXCTRL_MM_MASK, .tshut_hot_mask = OMAP5_TSHUT_HOT_MM_MASK, .tshut_cold_mask = OMAP5_TSHUT_COLD_MM_MASK, .bgap_status = OMAP5_CTRL_MODULE_CORE_BANDGAP_STATUS, .status_clean_stop_mask = 0x0, .status_bgap_alert_mask = OMAP5_ALERT_MASK, .status_hot_mask = OMAP5_HOT_MM_MASK, .status_cold_mask = OMAP5_COLD_MM_MASK, .bgap_cumul_dtemp = OMAP5_CTRL_MODULE_CORE_BANDGAP_CUMUL_DTEMP_MM, .ctrl_dtemp_0 = OMAP5_CTRL_MODULE_CORE_DTEMP_MM_0, .ctrl_dtemp_1 = OMAP5_CTRL_MODULE_CORE_DTEMP_MM_1, .ctrl_dtemp_2 = OMAP5_CTRL_MODULE_CORE_DTEMP_MM_2, .ctrl_dtemp_3 = OMAP5_CTRL_MODULE_CORE_DTEMP_MM_3, .ctrl_dtemp_4 = OMAP5_CTRL_MODULE_CORE_DTEMP_MM_4, .bgap_efuse = OMAP5_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP_MM, }; /* * OMAP5430 CORE thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_core_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5_CTRL_MODULE_CORE_TEMP_SENSOR_CORE, .bgap_tempsoff_mask = OMAP5_BGAP_TMPSOFF_CORE_MASK, .bgap_eocz_mask = OMAP5_BGAP_EOCZ_CORE_MASK, .bgap_dtemp_mask = OMAP5_BGAP_DTEMP_CORE_MASK, .bgap_mask_ctrl = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .mask_hot_mask = OMAP5_MASK_HOT_CORE_MASK, .mask_cold_mask = OMAP5_MASK_COLD_CORE_MASK, .mask_sidlemode_mask = OMAP5_SIDLEMODE_MASK, .mask_freeze_mask = OMAP5_FREEZE_CORE_MASK, .mask_clear_mask = OMAP5_CLEAR_CORE_MASK, .mask_clear_accum_mask = OMAP5_CLEAR_ACCUM_CORE_MASK, .bgap_counter = OMAP5_CTRL_MODULE_CORE_BANDGAP_MASK, .counter_mask = OMAP5_COUNTER_DELAY_MASK, .bgap_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_THRESHOLD_CORE, .threshold_thot_mask = OMAP5_THOLD_HOT_CORE_MASK, .threshold_tcold_mask = OMAP5_THOLD_COLD_CORE_MASK, .tshut_threshold = OMAP5_CTRL_MODULE_CORE_BANDGAP_TSHUT_CORE, .tshut_efuse_shift = OMAP5_TSHUT_MUXCTRL_CORE_SHIFT, .tshut_efuse_mask = OMAP5_TSHUT_MUXCTRL_CORE_MASK, .tshut_hot_mask = OMAP5_TSHUT_HOT_CORE_MASK, .tshut_cold_mask = OMAP5_TSHUT_COLD_CORE_MASK, .bgap_status = OMAP5_CTRL_MODULE_CORE_BANDGAP_STATUS, .status_clean_stop_mask = 0x0, .status_bgap_alert_mask = OMAP5_ALERT_MASK, .status_hot_mask = OMAP5_HOT_CORE_MASK, .status_cold_mask = OMAP5_COLD_CORE_MASK, .bgap_cumul_dtemp = OMAP5_CTRL_MODULE_CORE_BANDGAP_CUMUL_DTEMP_CORE, .ctrl_dtemp_0 = OMAP5_CTRL_MODULE_CORE_DTEMP_CORE_0, .ctrl_dtemp_1 = OMAP5_CTRL_MODULE_CORE_DTEMP_CORE_1, .ctrl_dtemp_2 = OMAP5_CTRL_MODULE_CORE_DTEMP_CORE_2, .ctrl_dtemp_3 = OMAP5_CTRL_MODULE_CORE_DTEMP_CORE_3, .ctrl_dtemp_4 = OMAP5_CTRL_MODULE_CORE_DTEMP_CORE_4, .bgap_efuse = OMAP5_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP_CORE, }; /* Thresholds and limits for OMAP5430 MPU temperature sensor */ static struct temp_sensor_data omap5430_mpu_temp_sensor_data = { .tshut_hot = OMAP5430_MPU_TSHUT_HOT, .tshut_cold = OMAP5430_MPU_TSHUT_COLD, .t_hot = OMAP5430_MPU_T_HOT, .t_cold = OMAP5430_MPU_T_COLD, .min_freq = OMAP5430_MPU_MIN_FREQ, .max_freq = OMAP5430_MPU_MAX_FREQ, .max_temp = OMAP5430_MPU_MAX_TEMP, .min_temp = OMAP5430_MPU_MIN_TEMP, .hyst_val = OMAP5430_MPU_HYST_VAL, .adc_start_val = OMAP5430_ES2_ADC_START_VALUE, .adc_end_val = OMAP5430_ES2_ADC_END_VALUE, .update_int1 = 1000, .update_int2 = 2000, .stats_en = 1, .avg_number = 20, .avg_period = 100, .safe_temp_trend = 50, }; /* Thresholds and limits for OMAP5430 GPU temperature sensor */ static struct temp_sensor_data omap5430_gpu_temp_sensor_data = { .tshut_hot = OMAP5430_GPU_TSHUT_HOT, .tshut_cold = OMAP5430_GPU_TSHUT_COLD, .t_hot = OMAP5430_GPU_T_HOT, .t_cold = OMAP5430_GPU_T_COLD, .min_freq = OMAP5430_GPU_MIN_FREQ, .max_freq = OMAP5430_GPU_MAX_FREQ, .max_temp = OMAP5430_GPU_MAX_TEMP, .min_temp = OMAP5430_GPU_MIN_TEMP, .hyst_val = OMAP5430_GPU_HYST_VAL, .adc_start_val = OMAP5430_ES2_ADC_START_VALUE, .adc_end_val = OMAP5430_ES2_ADC_END_VALUE, .update_int1 = 1000, .update_int2 = 2000, .stats_en = 1, .avg_number = 20, .avg_period = 100, .safe_temp_trend = 50, }; /* Thresholds and limits for OMAP5430 CORE temperature sensor */ static struct temp_sensor_data omap5430_core_temp_sensor_data = { .tshut_hot = OMAP5430_CORE_TSHUT_HOT, .tshut_cold = OMAP5430_CORE_TSHUT_COLD, .t_hot = OMAP5430_CORE_T_HOT, .t_cold = OMAP5430_CORE_T_COLD, .min_freq = OMAP5430_CORE_MIN_FREQ, .max_freq = OMAP5430_CORE_MAX_FREQ, .max_temp = OMAP5430_CORE_MAX_TEMP, .min_temp = OMAP5430_CORE_MIN_TEMP, .hyst_val = OMAP5430_CORE_HYST_VAL, .adc_start_val = OMAP5430_ES2_ADC_START_VALUE, .adc_end_val = OMAP5430_ES2_ADC_END_VALUE, .update_int1 = 1000, .update_int2 = 2000, .stats_en = 1, .avg_number = 20, .avg_period = 100, .safe_temp_trend = 50, }; /* * OMAP54xx ES2.0 : Temperature values in milli degree celsius * ADC code values from 540 to 945 */ static int omap5430_adc_to_temp[ OMAP5430_ES2_ADC_END_VALUE - OMAP5430_ES2_ADC_START_VALUE + 1] = { /* Index 540 - 549 */ -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, /* Index 550 - 559 */ -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, -33800, -33400, /* Index 560 - 569 */ -33000, -32600, -32200, -31800, -31400, -31000, -30600, -30200, -29800, -29400, /* Index 570 - 579 */ -29000, -28600, -28200, -27700, -27100, -26600, -26200, -25800, -25400, -25000, /* Index 580 - 589 */ -24600, -24200, -23800, -23400, -23000, -22600, -22200, -21600, -21400, -21000, /* Index 590 - 599 */ -20500, -19900, -19400, -19000, -18600, -18200, -17800, -17400, -17000, -16600, /* Index 600 - 609 */ -16200, -15800, -15400, -15000, -14600, -14200, -13800, -13400, -13000, -12500, /* Index 610 - 619 */ -11900, -11400, -11000, -10600, -10200, -9800, -9400, -9000, -8600, -8200, /* Index 620 - 629 */ -7800, -7400, -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900, /* Index 630 - 639 */ -3400, -3000, -2600, -2200, -1800, -1400, -1000, -600, -200, 200, /* Index 640 - 649 */ 600, 1000, 1400, 1800, 2200, 2600, 3000, 3400, 3900, 4500, /* Index 650 - 659 */ 5000, 5400, 5800, 6200, 6600, 7000, 7400, 7800, 8200, 8600, /* Index 660 - 669 */ 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11800, 12200, 12700, /* Index 670 - 679 */ 13300, 13800, 14200, 14600, 15000, 15400, 15800, 16200, 16600, 17000, /* Index 680 - 689 */ 17400, 17800, 18200, 18600, 19000, 19400, 19800, 20200, 20600, 21100, /* Index 690 - 699 */ 21400, 21900, 22500, 23000, 23400, 23800, 24200, 24600, 25000, 25400, /* Index 700 - 709 */ 25800, 26200, 26600, 27000, 27400, 27800, 28200, 28600, 29000, 29400, /* Index 710 - 719 */ 29800, 30200, 30600, 31000, 31400, 31900, 32500, 33000, 33400, 33800, /* Index 720 - 729 */ 34200, 34600, 35000, 35400, 35800, 36200, 36600, 37000, 37400, 37800, /* Index 730 - 739 */ 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41000, 41400, 41800, /* Index 740 - 749 */ 42200, 42600, 43100, 43700, 44200, 44600, 45000, 45400, 45800, 46200, /* Index 750 - 759 */ 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49400, 49800, 50200, /* Index 760 - 769 */ 50600, 51000, 51400, 51800, 52200, 52600, 53000, 53400, 53800, 54200, /* Index 770 - 779 */ 54600, 55000, 55400, 55900, 56500, 57000, 57400, 57800, 58200, 58600, /* Index 780 - 789 */ 59000, 59400, 59800, 60200, 60600, 61000, 61400, 61800, 62200, 62600, /* Index 790 - 799 */ 63000, 63400, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, /* Index 800 - 809 */ 67000, 67400, 67800, 68200, 68600, 69000, 69400, 69800, 70200, 70600, /* Index 810 - 819 */ 71000, 71500, 72100, 72600, 73000, 73400, 73800, 74200, 74600, 75000, /* Index 820 - 829 */ 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, /* Index 830 - 839 */ 79400, 79800, 80200, 80600, 81000, 81400, 81800, 82200, 82600, 83000, /* Index 840 - 849 */ 83400, 83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, /* Index 850 - 859 */ 87400, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, /* Index 860 - 869 */ 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, /* Index 870 - 879 */ 95400, 95800, 96200, 96600, 97000, 97500, 98100, 98600, 99000, 99400, /* Index 880 - 889 */ 99800, 100200, 100600, 101000, 101400, 101800, 102200, 102600, 103000, 103400, /* Index 890 - 899 */ 103800, 104200, 104600, 105000, 105400, 105800, 106200, 106600, 107000, 107400, /* Index 900 - 909 */ 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, /* Index 910 - 919 */ 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, /* Index 920 - 929 */ 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, /* Index 930 - 939 */ 119800, 120200, 120600, 121000, 121400, 121800, 122400, 122600, 123000, 123400, /* Index 940 - 945 */ 123800, 1242000, 124600, 124900, 125000, 125000, }; /* OMAP54xx ES2.0 data */ /* TODO : Need to update the slope/constant for ES2.0 silicon */ struct omap_bandgap_data omap5430_data = { .features = OMAP_BANDGAP_FEATURE_TSHUT_CONFIG | OMAP_BANDGAP_FEATURE_FREEZE_BIT | OMAP_BANDGAP_FEATURE_TALERT, .fclock_name = "l3instr_ts_gclk_div", .div_ck_name = "l3instr_ts_gclk_div", .conv_table = omap5430_adc_to_temp, .report_temperature = omap_thermal_report_temperature, .expose_sensor = omap_thermal_expose_sensor, .remove_sensor = omap_thermal_remove_sensor, .sensors = { { .registers = &omap5430_mpu_temp_sensor_registers, .ts_data = &omap5430_mpu_temp_sensor_data, .domain = "cpu", .slope = 118, .constant = -2992, }, { .registers = &omap5430_gpu_temp_sensor_registers, .ts_data = &omap5430_gpu_temp_sensor_data, .domain = "gpu", .slope = 61, .constant = -1558, }, { .registers = &omap5430_core_temp_sensor_registers, .ts_data = &omap5430_core_temp_sensor_data, .domain = "core", .slope = 0, .constant = 0, }, }, .sensor_count = 3, };
bsmitty83/kernel_omap
drivers/thermal/omap5-bg-sen-data.c
C
gpl-2.0
13,846
/* * linux/fs/ext4/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz * and Andreas Gruenbacher <agruen@suse.de>. */ /* * Extended attributes are stored directly in inodes (on file systems with * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl * field contains the block number if an inode uses an additional block. All * attributes must fit in the inode and one additional block. Blocks that * contain the identical set of attributes may be shared among several inodes. * Identical blocks are detected by keeping a cache of blocks that have * recently been accessed. * * The attributes in inodes and on blocks have a different header; the entries * are stored in the same format: * * +------------------+ * | header | * | entry 1 | | * | entry 2 | | growing downwards * | entry 3 | v * | four null bytes | * | . . . | * | value 1 | ^ * | value 3 | | growing upwards * | value 2 | | * +------------------+ * * The header is followed by multiple entry descriptors. In disk blocks, the * entry descriptors are kept sorted. In inodes, they are unsorted. The * attribute values are aligned to the end of the block in no specific order. * * Locking strategy * ---------------- * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. * EA blocks are only changed if they are exclusive to an inode, so * holding xattr_sem also means that nothing but the EA block's reference * count can change. Multiple writers to the same block are synchronized * by the buffer lock. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/mbcache.h> #include <linux/quotaops.h> #include <linux/rwsem.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" #include "acl.h" #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data)) #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr)) #define BFIRST(bh) ENTRY(BHDR(bh)+1) #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) #ifdef EXT4_XATTR_DEBUG # define ea_idebug(inode, f...) do { \ printk(KERN_DEBUG "inode %s:%lu: ", \ inode->i_sb->s_id, inode->i_ino); \ printk(f); \ printk("\n"); \ } while (0) # define ea_bdebug(bh, f...) do { \ char b[BDEVNAME_SIZE]; \ printk(KERN_DEBUG "block %s:%lu: ", \ bdevname(bh->b_bdev, b), \ (unsigned long) bh->b_blocknr); \ printk(f); \ printk("\n"); \ } while (0) #else # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__) # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif static void ext4_xattr_cache_insert(struct buffer_head *); static struct buffer_head *ext4_xattr_cache_find(struct inode *, struct ext4_xattr_header *, struct mb_cache_entry **); static void ext4_xattr_rehash(struct ext4_xattr_header *, struct ext4_xattr_entry *); static int ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size); static struct mb_cache *ext4_xattr_cache; static const struct xattr_handler *ext4_xattr_handler_map[] = { [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, #ifdef CONFIG_EXT4_FS_POSIX_ACL [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext4_xattr_acl_access_handler, [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler, #endif [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_SECURITY [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, #endif }; const struct xattr_handler *ext4_xattr_handlers[] = { &ext4_xattr_user_handler, &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_POSIX_ACL &ext4_xattr_acl_access_handler, &ext4_xattr_acl_default_handler, #endif #ifdef CONFIG_EXT4_FS_SECURITY &ext4_xattr_security_handler, #endif NULL }; static inline const struct xattr_handler * ext4_xattr_handler(int name_index) { const struct xattr_handler *handler = NULL; if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map)) handler = ext4_xattr_handler_map[name_index]; return handler; } /* * Inode operation listxattr() * * dentry->d_inode->i_mutex: don't care */ ssize_t ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) { return ext4_xattr_list(dentry, buffer, size); } static int ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) { while (!IS_LAST_ENTRY(entry)) { struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry); if ((void *)next >= end) return -EIO; entry = next; } return 0; } static inline int ext4_xattr_check_block(struct buffer_head *bh) { if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) return -EIO; return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); } static inline int ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) { size_t value_size = le32_to_cpu(entry->e_value_size); if (entry->e_value_block != 0 || value_size > size || le16_to_cpu(entry->e_value_offs) + value_size > size) return -EIO; return 0; } static int ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index, const char *name, size_t size, int sorted) { struct ext4_xattr_entry *entry; size_t name_len; int cmp = 1; if (name == NULL) return -EINVAL; name_len = strlen(name); entry = *pentry; for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { cmp = name_index - entry->e_name_index; if (!cmp) cmp = name_len - entry->e_name_len; if (!cmp) cmp = memcmp(name, entry->e_name, name_len); if (cmp <= 0 && (sorted || cmp == 0)) break; } *pentry = entry; if (!cmp && ext4_xattr_check_entry(entry, size)) return -EIO; return cmp ? -ENODATA : 0; } static int ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct buffer_head *bh = NULL; struct ext4_xattr_entry *entry; size_t size; int error; ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); error = -ENODATA; if (!EXT4_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(bh)) { bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } ext4_xattr_cache_insert(bh); entry = BFIRST(bh); error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); if (error == -EIO) goto bad_block; if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(bh); return error; } static int ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; struct ext4_inode *raw_inode; struct ext4_iloc iloc; size_t size; void *end; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return -ENODATA; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; error = ext4_xattr_check_names(entry, end); if (error) goto cleanup; error = ext4_xattr_find_entry(&entry, name_index, name, end - (void *)entry, 0); if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(iloc.bh); return error; } /* * ext4_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ int ext4_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { int error; down_read(&EXT4_I(inode)->xattr_sem); error = ext4_xattr_ibody_get(inode, name_index, name, buffer, buffer_size); if (error == -ENODATA) error = ext4_xattr_block_get(inode, name_index, name, buffer, buffer_size); up_read(&EXT4_I(inode)->xattr_sem); return error; } static int ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, char *buffer, size_t buffer_size) { size_t rest = buffer_size; for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { const struct xattr_handler *handler = ext4_xattr_handler(entry->e_name_index); if (handler) { size_t size = handler->list(dentry, buffer, rest, entry->e_name, entry->e_name_len, handler->flags); if (buffer) { if (size > rest) return -ERANGE; buffer += size; } rest -= size; } } return buffer_size - rest; } static int ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = dentry->d_inode; struct buffer_head *bh = NULL; int error; ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); error = 0; if (!EXT4_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } ext4_xattr_cache_insert(bh); error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); cleanup: brelse(bh); return error; } static int ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = dentry->d_inode; struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; struct ext4_iloc iloc; void *end; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return 0; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; error = ext4_xattr_check_names(IFIRST(header), end); if (error) goto cleanup; error = ext4_xattr_list_entries(dentry, IFIRST(header), buffer, buffer_size); cleanup: brelse(iloc.bh); return error; } /* * ext4_xattr_list() * * Copy a list of attribute names into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ static int ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { int ret, ret2; down_read(&EXT4_I(dentry->d_inode)->xattr_sem); ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; if (buffer) { buffer += ret; buffer_size -= ret; } ret = ext4_xattr_block_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; ret += ret2; errout: up_read(&EXT4_I(dentry->d_inode)->xattr_sem); return ret; } /* * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is * not set, set it. */ static void ext4_xattr_update_super_block(handle_t *handle, struct super_block *sb) { if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) return; if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); ext4_handle_dirty_super(handle, sb); } } /* * Release the xattr block BH: If the reference count is > 1, decrement * it; otherwise free the block. */ static void ext4_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { struct mb_cache_entry *ce = NULL; int error = 0; ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr); error = ext4_journal_get_write_access(handle, bh); if (error) goto out; lock_buffer(bh); if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { ea_bdebug(bh, "refcount now=0; freeing"); if (ce) mb_cache_entry_free(ce); get_bh(bh); ext4_free_blocks(handle, inode, bh, 0, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); unlock_buffer(bh); } else { le32_add_cpu(&BHDR(bh)->h_refcount, -1); if (ce) mb_cache_entry_release(ce); unlock_buffer(bh); error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } out: ext4_std_error(inode->i_sb, error); return; } /* * Find the available free space for EAs. This also returns the total number of * bytes used by EA entries. */ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last, size_t *min_offs, void *base, int *total) { for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { *total += EXT4_XATTR_LEN(last->e_name_len); if (!last->e_value_block && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < *min_offs) *min_offs = offs; } } return (*min_offs - ((void *)last - base) - sizeof(__u32)); } struct ext4_xattr_info { int name_index; const char *name; const void *value; size_t value_len; }; struct ext4_xattr_search { struct ext4_xattr_entry *first; void *base; void *end; struct ext4_xattr_entry *here; int not_found; }; static int ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s) { struct ext4_xattr_entry *last; size_t free, min_offs = s->end - s->base, name_len = strlen(i->name); /* Compute min_offs and last. */ last = s->first; for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) min_offs = offs; } } free = min_offs - ((void *)last - s->base) - sizeof(__u32); if (!s->not_found) { if (!s->here->e_value_block && s->here->e_value_size) { size_t size = le32_to_cpu(s->here->e_value_size); free += EXT4_XATTR_SIZE(size); } free += EXT4_XATTR_LEN(name_len); } if (i->value) { if (free < EXT4_XATTR_SIZE(i->value_len) || free < EXT4_XATTR_LEN(name_len) + EXT4_XATTR_SIZE(i->value_len)) return -ENOSPC; } if (i->value && s->not_found) { /* Insert the new name. */ size_t size = EXT4_XATTR_LEN(name_len); size_t rest = (void *)last - (void *)s->here + sizeof(__u32); memmove((void *)s->here + size, s->here, rest); memset(s->here, 0, size); s->here->e_name_index = i->name_index; s->here->e_name_len = name_len; memcpy(s->here->e_name, i->name, name_len); } else { if (!s->here->e_value_block && s->here->e_value_size) { void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(s->here->e_value_offs); void *val = s->base + offs; size_t size = EXT4_XATTR_SIZE( le32_to_cpu(s->here->e_value_size)); if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) { /* The old and the new value have the same size. Just replace. */ s->here->e_value_size = cpu_to_le32(i->value_len); memset(val + size - EXT4_XATTR_PAD, 0, EXT4_XATTR_PAD); /* Clear pad bytes. */ memcpy(val, i->value, i->value_len); return 0; } /* Remove the old value. */ memmove(first_val + size, first_val, val - first_val); memset(first_val, 0, size); s->here->e_value_size = 0; s->here->e_value_offs = 0; min_offs += size; /* Adjust all value offsets. */ last = s->first; while (!IS_LAST_ENTRY(last)) { size_t o = le16_to_cpu(last->e_value_offs); if (!last->e_value_block && last->e_value_size && o < offs) last->e_value_offs = cpu_to_le16(o + size); last = EXT4_XATTR_NEXT(last); } } if (!i->value) { /* Remove the old name. */ size_t size = EXT4_XATTR_LEN(name_len); last = ENTRY((void *)last - size); memmove(s->here, (void *)s->here + size, (void *)last - (void *)s->here + sizeof(__u32)); memset(last, 0, size); } } if (i->value) { /* Insert the new value. */ s->here->e_value_size = cpu_to_le32(i->value_len); if (i->value_len) { size_t size = EXT4_XATTR_SIZE(i->value_len); void *val = s->base + min_offs - size; s->here->e_value_offs = cpu_to_le16(min_offs - size); memset(val + size - EXT4_XATTR_PAD, 0, EXT4_XATTR_PAD); /* Clear the pad bytes. */ memcpy(val, i->value, i->value_len); } } return 0; } struct ext4_xattr_block_find { struct ext4_xattr_search s; struct buffer_head *bh; }; static int ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; int error; ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", i->name_index, i->name, i->value, (long)i->value_len); if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bs->bh) goto cleanup; ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); if (ext4_xattr_check_block(bs->bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } /* Find the named attribute. */ bs->s.base = BHDR(bs->bh); bs->s.first = BFIRST(bs->bh); bs->s.end = bs->bh->b_data + bs->bh->b_size; bs->s.here = bs->s.first; error = ext4_xattr_find_entry(&bs->s.here, i->name_index, i->name, bs->bh->b_size, 1); if (error && error != -ENODATA) goto cleanup; bs->s.not_found = error; } error = 0; cleanup: return error; } static int ext4_xattr_block_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; struct ext4_xattr_search *s = &bs->s; struct mb_cache_entry *ce = NULL; int error = 0; #define header(x) ((struct ext4_xattr_header *)(x)) if (i->value && i->value_len > sb->s_blocksize) return -ENOSPC; if (s->base) { ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev, bs->bh->b_blocknr); error = ext4_journal_get_write_access(handle, bs->bh); if (error) goto cleanup; lock_buffer(bs->bh); if (header(s->base)->h_refcount == cpu_to_le32(1)) { if (ce) { mb_cache_entry_free(ce); ce = NULL; } ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s); if (!error) { if (!IS_LAST_ENTRY(s->first)) ext4_xattr_rehash(header(s->base), s->here); ext4_xattr_cache_insert(bs->bh); } unlock_buffer(bs->bh); if (error == -EIO) goto bad_block; if (!error) error = ext4_handle_dirty_metadata(handle, inode, bs->bh); if (error) goto cleanup; goto inserted; } else { int offset = (char *)s->here - bs->bh->b_data; unlock_buffer(bs->bh); ext4_handle_release_buffer(handle, bs->bh); if (ce) { mb_cache_entry_release(ce); ce = NULL; } ea_bdebug(bs->bh, "cloning"); s->base = kmalloc(bs->bh->b_size, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); s->first = ENTRY(header(s->base)+1); header(s->base)->h_refcount = cpu_to_le32(1); s->here = ENTRY(s->base + offset); s->end = s->base + bs->bh->b_size; } } else { /* Allocate a buffer where we construct the new block. */ s->base = kzalloc(sb->s_blocksize, GFP_NOFS); /* assert(header == s->base) */ error = -ENOMEM; if (s->base == NULL) goto cleanup; header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); header(s->base)->h_blocks = cpu_to_le32(1); header(s->base)->h_refcount = cpu_to_le32(1); s->first = ENTRY(header(s->base)+1); s->here = ENTRY(header(s->base)+1); s->end = s->base + sb->s_blocksize; } error = ext4_xattr_set_entry(i, s); if (error == -EIO) goto bad_block; if (error) goto cleanup; if (!IS_LAST_ENTRY(s->first)) ext4_xattr_rehash(header(s->base), s->here); inserted: if (!IS_LAST_ENTRY(s->first)) { new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce); if (new_bh) { /* We found an identical block in the cache. */ if (new_bh == bs->bh) ea_bdebug(new_bh, "keeping"); else { /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); if (error) goto cleanup; error = ext4_journal_get_write_access(handle, new_bh); if (error) goto cleanup_dquot; lock_buffer(new_bh); le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); ea_bdebug(new_bh, "reusing; refcount now=%d", le32_to_cpu(BHDR(new_bh)->h_refcount)); unlock_buffer(new_bh); error = ext4_handle_dirty_metadata(handle, inode, new_bh); if (error) goto cleanup_dquot; } mb_cache_entry_release(ce); ce = NULL; } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ ea_bdebug(bs->bh, "keeping this block"); new_bh = bs->bh; get_bh(new_bh); } else { /* We need to allocate a new block */ ext4_fsblk_t goal, block; goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; /* * take i_data_sem because we will test * i_delalloc_reserved_flag in ext4_mb_new_blocks */ down_read((&EXT4_I(inode)->i_data_sem)); block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); up_read((&EXT4_I(inode)->i_data_sem)); if (error) goto cleanup; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); ea_idebug(inode, "creating block %llu", (unsigned long long)block); new_bh = sb_getblk(sb, block); if (!new_bh) { error = -ENOMEM; getblk_failed: ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA); goto cleanup; } lock_buffer(new_bh); error = ext4_journal_get_create_access(handle, new_bh); if (error) { unlock_buffer(new_bh); error = -EIO; goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_cache_insert(new_bh); error = ext4_handle_dirty_metadata(handle, inode, new_bh); if (error) goto cleanup; } } /* Update the inode. */ EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; /* Drop the previous xattr block. */ if (bs->bh && bs->bh != new_bh) ext4_xattr_release_block(handle, inode, bs->bh); error = 0; cleanup: if (ce) mb_cache_entry_release(ce); brelse(new_bh); if (!(bs->bh && s->base == bs->bh->b_data)) kfree(s->base); return error; cleanup_dquot: dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); goto cleanup; bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); goto cleanup; #undef header } struct ext4_xattr_ibody_find { struct ext4_xattr_search s; struct ext4_iloc iloc; }; static int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; int error; if (EXT4_I(inode)->i_extra_isize == 0) return 0; raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); is->s.here = is->s.first; is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { error = ext4_xattr_check_names(IFIRST(header), is->s.end); if (error) return error; /* Find the named attribute. */ error = ext4_xattr_find_entry(&is->s.here, i->name_index, i->name, is->s.end - (void *)is->s.base, 0); if (error && error != -ENODATA) return error; is->s.not_found = error; } return 0; } static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_search *s = &is->s; int error; if (EXT4_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext4_xattr_set_entry(i, s); if (error) return error; header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); ext4_set_inode_state(inode, EXT4_STATE_XATTR); } else { header->h_magic = cpu_to_le32(0); ext4_clear_inode_state(inode, EXT4_STATE_XATTR); } return 0; } /* * ext4_xattr_set_handle() * * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { struct ext4_xattr_info i = { .name_index = name_index, .name = name, .value = value, .value_len = value_len, }; struct ext4_xattr_ibody_find is = { .s = { .not_found = -ENODATA, }, }; struct ext4_xattr_block_find bs = { .s = { .not_found = -ENODATA, }, }; unsigned long no_expand; int error; if (!name) return -EINVAL; if (strlen(name) > 255) return -ERANGE; down_write(&EXT4_I(inode)->xattr_sem); no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); error = ext4_reserve_inode_write(handle, inode, &is.iloc); if (error) goto cleanup; if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); ext4_clear_inode_state(inode, EXT4_STATE_NEW); } error = ext4_xattr_ibody_find(inode, &i, &is); if (error) goto cleanup; if (is.s.not_found) error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; if (is.s.not_found && bs.s.not_found) { error = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup; error = 0; if (!value) goto cleanup; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto cleanup; } if (!value) { if (!is.s.not_found) error = ext4_xattr_ibody_set(handle, inode, &i, &is); else if (!bs.s.not_found) error = ext4_xattr_block_set(handle, inode, &i, &bs); } else { error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (!error && !bs.s.not_found) { i.value = NULL; error = ext4_xattr_block_set(handle, inode, &i, &bs); } else if (error == -ENOSPC) { if (EXT4_I(inode)->i_file_acl && !bs.s.base) { error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; } error = ext4_xattr_block_set(handle, inode, &i, &bs); if (error) goto cleanup; if (!is.s.not_found) { i.value = NULL; error = ext4_xattr_ibody_set(handle, inode, &i, &is); } } } if (!error) { ext4_xattr_update_super_block(handle, inode->i_sb); inode->i_ctime = ext4_current_time(inode); if (!value) ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); /* * The bh is consumed by ext4_mark_iloc_dirty, even with * error != 0. */ is.iloc.bh = NULL; if (IS_SYNC(inode)) ext4_handle_sync(handle); } cleanup: brelse(is.iloc.bh); brelse(bs.bh); if (no_expand == 0) ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); up_write(&EXT4_I(inode)->xattr_sem); return error; } /* * ext4_xattr_set() * * Like ext4_xattr_set_handle, but start from an inode. This extended * attribute modification is a filesystem transaction by itself. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { handle_t *handle; int error, retries = 0; retry: handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { error = PTR_ERR(handle); } else { int error2; error = ext4_xattr_set_handle(handle, inode, name_index, name, value, value_len, flags); error2 = ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; if (error == 0) error = error2; } return error; } /* * Shift the EA entries in the inode to create space for the increased * i_extra_isize. */ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry, int value_offs_shift, void *to, void *from, size_t n, int blocksize) { struct ext4_xattr_entry *last = entry; int new_offs; /* Adjust the value offsets of the entries */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { new_offs = le16_to_cpu(last->e_value_offs) + value_offs_shift; BUG_ON(new_offs + le32_to_cpu(last->e_value_size) > blocksize); last->e_value_offs = cpu_to_le16(new_offs); } } /* Shift the entries by n bytes */ memmove(to, from, n); } /* * Expand an inode by new_extra_isize bytes when EAs are present. * Returns 0 on success or negative error number on failure. */ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, struct ext4_inode *raw_inode, handle_t *handle) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry, *last, *first; struct buffer_head *bh = NULL; struct ext4_xattr_ibody_find *is = NULL; struct ext4_xattr_block_find *bs = NULL; char *buffer = NULL, *b_entry_name = NULL; size_t min_offs, free; int total_ino, total_blk; void *base, *start, *end; int extra_isize = 0, error = 0, tried_min_extra_isize = 0; int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); down_write(&EXT4_I(inode)->xattr_sem); retry: if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) { up_write(&EXT4_I(inode)->xattr_sem); return 0; } header = IHDR(inode, raw_inode); entry = IFIRST(header); /* * Check if enough free space is available in the inode to shift the * entries ahead by new_extra_isize. */ base = start = entry; end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; min_offs = end - base; last = entry; total_ino = sizeof(struct ext4_xattr_ibody_header); free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); if (free >= new_extra_isize) { entry = IFIRST(header); ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize - new_extra_isize, (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize, (void *)header, total_ino, inode->i_sb->s_blocksize); EXT4_I(inode)->i_extra_isize = new_extra_isize; error = 0; goto cleanup; } /* * Enough free space isn't available in the inode, check if * EA block can hold new_extra_isize bytes. */ if (EXT4_I(inode)->i_file_acl) { bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bh) goto cleanup; if (ext4_xattr_check_block(bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } base = BHDR(bh); first = BFIRST(bh); end = bh->b_data + bh->b_size; min_offs = end - base; free = ext4_xattr_free_space(first, &min_offs, base, &total_blk); if (free < new_extra_isize) { if (!tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; brelse(bh); goto retry; } error = -1; goto cleanup; } } else { free = inode->i_sb->s_blocksize; } while (new_extra_isize > 0) { size_t offs, size, entry_size; struct ext4_xattr_entry *small_entry = NULL; struct ext4_xattr_info i = { .value = NULL, .value_len = 0, }; unsigned int total_size; /* EA entry size + value size */ unsigned int shift_bytes; /* No. of bytes to shift EAs by? */ unsigned int min_total_size = ~0U; is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); if (!is || !bs) { error = -ENOMEM; goto cleanup; } is->s.not_found = -ENODATA; bs->s.not_found = -ENODATA; is->iloc.bh = NULL; bs->bh = NULL; last = IFIRST(header); /* Find the entry best suited to be pushed into EA block */ entry = NULL; for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { total_size = EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) + EXT4_XATTR_LEN(last->e_name_len); if (total_size <= free && total_size < min_total_size) { if (total_size < new_extra_isize) { small_entry = last; } else { entry = last; min_total_size = total_size; } } } if (entry == NULL) { if (small_entry) { entry = small_entry; } else { if (!tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; kfree(is); is = NULL; kfree(bs); bs = NULL; goto retry; } error = -1; goto cleanup; } } offs = le16_to_cpu(entry->e_value_offs); size = le32_to_cpu(entry->e_value_size); entry_size = EXT4_XATTR_LEN(entry->e_name_len); i.name_index = entry->e_name_index, buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS); b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); if (!buffer || !b_entry_name) { error = -ENOMEM; goto cleanup; } /* Save the entry name and the entry value */ memcpy(buffer, (void *)IFIRST(header) + offs, EXT4_XATTR_SIZE(size)); memcpy(b_entry_name, entry->e_name, entry->e_name_len); b_entry_name[entry->e_name_len] = '\0'; i.name = b_entry_name; error = ext4_get_inode_loc(inode, &is->iloc); if (error) goto cleanup; error = ext4_xattr_ibody_find(inode, &i, is); if (error) goto cleanup; /* Remove the chosen entry from the inode */ error = ext4_xattr_ibody_set(handle, inode, &i, is); if (error) goto cleanup; entry = IFIRST(header); if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) shift_bytes = new_extra_isize; else shift_bytes = entry_size + size; /* Adjust the offsets and shift the remaining entries ahead */ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize - shift_bytes, (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes, (void *)header, total_ino - entry_size, inode->i_sb->s_blocksize); extra_isize += shift_bytes; new_extra_isize -= shift_bytes; EXT4_I(inode)->i_extra_isize = extra_isize; i.name = b_entry_name; i.value = buffer; i.value_len = size; error = ext4_xattr_block_find(inode, &i, bs); if (error) goto cleanup; /* Add entry which was removed from the inode into the block */ error = ext4_xattr_block_set(handle, inode, &i, bs); if (error) goto cleanup; kfree(b_entry_name); kfree(buffer); b_entry_name = NULL; buffer = NULL; brelse(is->iloc.bh); kfree(is); kfree(bs); } brelse(bh); up_write(&EXT4_I(inode)->xattr_sem); return 0; cleanup: kfree(b_entry_name); kfree(buffer); if (is) brelse(is->iloc.bh); kfree(is); kfree(bs); brelse(bh); up_write(&EXT4_I(inode)->xattr_sem); return error; } /* * ext4_xattr_delete_inode() * * Free extended attribute resources associated with this inode. This * is called immediately before an inode is freed. We have exclusive * access to the inode. */ void ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) { struct buffer_head *bh = NULL; if (!EXT4_I(inode)->i_file_acl) goto cleanup; bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); if (!bh) { EXT4_ERROR_INODE(inode, "block %llu read error", EXT4_I(inode)->i_file_acl); goto cleanup; } if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); goto cleanup; } ext4_xattr_release_block(handle, inode, bh); EXT4_I(inode)->i_file_acl = 0; cleanup: brelse(bh); } /* * ext4_xattr_put_super() * * This is called when a file system is unmounted. */ void ext4_xattr_put_super(struct super_block *sb) { mb_cache_shrink(sb->s_bdev); } /* * ext4_xattr_cache_insert() * * Create a new entry in the extended attribute cache, and insert * it unless such an entry is already in the cache. * * Returns 0, or a negative error number on failure. */ static void ext4_xattr_cache_insert(struct buffer_head *bh) { __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); struct mb_cache_entry *ce; int error; ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS); if (!ce) { ea_bdebug(bh, "out of memory"); return; } error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); if (error) { mb_cache_entry_free(ce); if (error == -EBUSY) { ea_bdebug(bh, "already in cache"); error = 0; } } else { ea_bdebug(bh, "inserting [%x]", (int)hash); mb_cache_entry_release(ce); } } /* * ext4_xattr_cmp() * * Compare two extended attribute blocks for equality. * * Returns 0 if the blocks are equal, 1 if they differ, and * a negative error number on errors. */ static int ext4_xattr_cmp(struct ext4_xattr_header *header1, struct ext4_xattr_header *header2) { struct ext4_xattr_entry *entry1, *entry2; entry1 = ENTRY(header1+1); entry2 = ENTRY(header2+1); while (!IS_LAST_ENTRY(entry1)) { if (IS_LAST_ENTRY(entry2)) return 1; if (entry1->e_hash != entry2->e_hash || entry1->e_name_index != entry2->e_name_index || entry1->e_name_len != entry2->e_name_len || entry1->e_value_size != entry2->e_value_size || memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) return 1; if (entry1->e_value_block != 0 || entry2->e_value_block != 0) return -EIO; if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), (char *)header2 + le16_to_cpu(entry2->e_value_offs), le32_to_cpu(entry1->e_value_size))) return 1; entry1 = EXT4_XATTR_NEXT(entry1); entry2 = EXT4_XATTR_NEXT(entry2); } if (!IS_LAST_ENTRY(entry2)) return 1; return 0; } /* * ext4_xattr_cache_find() * * Find an identical extended attribute block. * * Returns a pointer to the block found, or NULL if such a block was * not found or an error occurred. */ static struct buffer_head * ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, struct mb_cache_entry **pce) { __u32 hash = le32_to_cpu(header->h_hash); struct mb_cache_entry *ce; if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); again: ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev, hash); while (ce) { struct buffer_head *bh; if (IS_ERR(ce)) { if (PTR_ERR(ce) == -EAGAIN) goto again; break; } bh = sb_bread(inode->i_sb, ce->e_block); if (!bh) { EXT4_ERROR_INODE(inode, "block %lu read error", (unsigned long) ce->e_block); } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= EXT4_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %lu refcount %d>=%d", (unsigned long) ce->e_block, le32_to_cpu(BHDR(bh)->h_refcount), EXT4_XATTR_REFCOUNT_MAX); } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { *pce = ce; return bh; } brelse(bh); ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); } return NULL; } #define NAME_HASH_SHIFT 5 #define VALUE_HASH_SHIFT 16 /* * ext4_xattr_hash_entry() * * Compute the hash of an extended attribute. */ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header, struct ext4_xattr_entry *entry) { __u32 hash = 0; char *name = entry->e_name; int n; for (n = 0; n < entry->e_name_len; n++) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ *name++; } if (entry->e_value_block == 0 && entry->e_value_size != 0) { __le32 *value = (__le32 *)((char *)header + le16_to_cpu(entry->e_value_offs)); for (n = (le32_to_cpu(entry->e_value_size) + EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) { hash = (hash << VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ le32_to_cpu(*value++); } } entry->e_hash = cpu_to_le32(hash); } #undef NAME_HASH_SHIFT #undef VALUE_HASH_SHIFT #define BLOCK_HASH_SHIFT 16 /* * ext4_xattr_rehash() * * Re-compute the extended attribute hash value after an entry has changed. */ static void ext4_xattr_rehash(struct ext4_xattr_header *header, struct ext4_xattr_entry *entry) { struct ext4_xattr_entry *here; __u32 hash = 0; ext4_xattr_hash_entry(header, entry); here = ENTRY(header+1); while (!IS_LAST_ENTRY(here)) { if (!here->e_hash) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ le32_to_cpu(here->e_hash); here = EXT4_XATTR_NEXT(here); } header->h_hash = cpu_to_le32(hash); } #undef BLOCK_HASH_SHIFT int __init ext4_init_xattr(void) { ext4_xattr_cache = mb_cache_create("ext4_xattr", 6); if (!ext4_xattr_cache) return -ENOMEM; return 0; } void ext4_exit_xattr(void) { if (ext4_xattr_cache) mb_cache_destroy(ext4_xattr_cache); ext4_xattr_cache = NULL; }
davidmueller13/AK-Flo
fs/ext4/xattr.c
C
gpl-2.0
43,253
/* * SuperH interrupt controller module * * Copyright (c) 2007 Magnus Damm * Based on sh_timer.c and arm_timer.c by Paul Brook * Copyright (c) 2005-2006 CodeSourcery. * * This code is licensed under the GPL. */ #include "sh_intc.h" #include "hw.h" #include "sh.h" //#define DEBUG_INTC //#define DEBUG_INTC_SOURCES #define INTC_A7(x) ((x) & 0x1fffffff) void sh_intc_toggle_source(struct intc_source *source, int enable_adj, int assert_adj) { int enable_changed = 0; int pending_changed = 0; int old_pending; if ((source->enable_count == source->enable_max) && (enable_adj == -1)) enable_changed = -1; source->enable_count += enable_adj; if (source->enable_count == source->enable_max) enable_changed = 1; source->asserted += assert_adj; old_pending = source->pending; source->pending = source->asserted && (source->enable_count == source->enable_max); if (old_pending != source->pending) pending_changed = 1; if (pending_changed) { if (source->pending) { source->parent->pending++; if (source->parent->pending == 1) cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD); } else { source->parent->pending--; if (source->parent->pending == 0) cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); } } if (enable_changed || assert_adj || pending_changed) { #ifdef DEBUG_INTC_SOURCES printf("sh_intc: (%d/%d/%d/%d) interrupt source 0x%x %s%s%s\n", source->parent->pending, source->asserted, source->enable_count, source->enable_max, source->vect, source->asserted ? "asserted " : assert_adj ? "deasserted" : "", enable_changed == 1 ? "enabled " : enable_changed == -1 ? "disabled " : "", source->pending ? "pending" : ""); #endif } } static void sh_intc_set_irq (void *opaque, int n, int level) { struct intc_desc *desc = opaque; struct intc_source *source = &(desc->sources[n]); if (level && !source->asserted) sh_intc_toggle_source(source, 0, 1); else if (!level && source->asserted) sh_intc_toggle_source(source, 0, -1); } int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) { unsigned int i; /* slow: use a linked lists of pending sources instead */ /* wrong: take interrupt priority into account (one list per priority) */ if (imask == 0x0f) { return -1; /* FIXME, update code to include priority per source */ } for (i = 0; i < desc->nr_sources; i++) { struct intc_source *source = desc->sources + i; if (source->pending) { #ifdef DEBUG_INTC_SOURCES printf("sh_intc: (%d) returning interrupt source 0x%x\n", desc->pending, source->vect); #endif return source->vect; } } abort(); } #define INTC_MODE_NONE 0 #define INTC_MODE_DUAL_SET 1 #define INTC_MODE_DUAL_CLR 2 #define INTC_MODE_ENABLE_REG 3 #define INTC_MODE_MASK_REG 4 #define INTC_MODE_IS_PRIO 8 static unsigned int sh_intc_mode(unsigned long address, unsigned long set_reg, unsigned long clr_reg) { if ((address != INTC_A7(set_reg)) && (address != INTC_A7(clr_reg))) return INTC_MODE_NONE; if (set_reg && clr_reg) { if (address == INTC_A7(set_reg)) return INTC_MODE_DUAL_SET; else return INTC_MODE_DUAL_CLR; } if (set_reg) return INTC_MODE_ENABLE_REG; else return INTC_MODE_MASK_REG; } static void sh_intc_locate(struct intc_desc *desc, unsigned long address, unsigned long **datap, intc_enum **enums, unsigned int *first, unsigned int *width, unsigned int *modep) { unsigned int i, mode; /* this is slow but works for now */ if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { struct intc_mask_reg *mr = desc->mask_regs + i; mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); if (mode == INTC_MODE_NONE) continue; *modep = mode; *datap = &mr->value; *enums = mr->enum_ids; *first = mr->reg_width - 1; *width = 1; return; } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { struct intc_prio_reg *pr = desc->prio_regs + i; mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); if (mode == INTC_MODE_NONE) continue; *modep = mode | INTC_MODE_IS_PRIO; *datap = &pr->value; *enums = pr->enum_ids; *first = (pr->reg_width / pr->field_width) - 1; *width = pr->field_width; return; } } abort(); } static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, int enable, int is_group) { struct intc_source *source = desc->sources + id; if (!id) return; if (!source->next_enum_id && (!source->enable_max || !source->vect)) { #ifdef DEBUG_INTC_SOURCES printf("sh_intc: reserved interrupt source %d modified\n", id); #endif return; } if (source->vect) sh_intc_toggle_source(source, enable ? 1 : -1, 0); #ifdef DEBUG_INTC else { printf("setting interrupt group %d to %d\n", id, !!enable); } #endif if ((is_group || !source->vect) && source->next_enum_id) { sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); } #ifdef DEBUG_INTC if (!source->vect) { printf("setting interrupt group %d to %d - done\n", id, !!enable); } #endif } static uint32_t sh_intc_read(void *opaque, target_phys_addr_t offset) { struct intc_desc *desc = opaque; intc_enum *enum_ids = NULL; unsigned int first = 0; unsigned int width = 0; unsigned int mode = 0; unsigned long *valuep; #ifdef DEBUG_INTC printf("sh_intc_read 0x%lx\n", (unsigned long) offset); #endif sh_intc_locate(desc, (unsigned long)offset, &valuep, &enum_ids, &first, &width, &mode); return *valuep; } static void sh_intc_write(void *opaque, target_phys_addr_t offset, uint32_t value) { struct intc_desc *desc = opaque; intc_enum *enum_ids = NULL; unsigned int first = 0; unsigned int width = 0; unsigned int mode = 0; unsigned int k; unsigned long *valuep; unsigned long mask; #ifdef DEBUG_INTC printf("sh_intc_write 0x%lx 0x%08x\n", (unsigned long) offset, value); #endif sh_intc_locate(desc, (unsigned long)offset, &valuep, &enum_ids, &first, &width, &mode); switch (mode) { case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: break; case INTC_MODE_DUAL_SET: value |= *valuep; break; case INTC_MODE_DUAL_CLR: value = *valuep & ~value; break; default: abort(); } for (k = 0; k <= first; k++) { mask = ((1 << width) - 1) << ((first - k) * width); if ((*valuep & mask) == (value & mask)) continue; #if 0 printf("k = %d, first = %d, enum = %d, mask = 0x%08x\n", k, first, enum_ids[k], (unsigned int)mask); #endif sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); } *valuep = value; #ifdef DEBUG_INTC printf("sh_intc_write 0x%lx -> 0x%08x\n", (unsigned long) offset, value); #endif } static CPUReadMemoryFunc * const sh_intc_readfn[] = { sh_intc_read, sh_intc_read, sh_intc_read }; static CPUWriteMemoryFunc * const sh_intc_writefn[] = { sh_intc_write, sh_intc_write, sh_intc_write }; struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id) { if (id) return desc->sources + id; return NULL; } static void sh_intc_register(struct intc_desc *desc, unsigned long address) { if (address) { cpu_register_physical_memory_offset(P4ADDR(address), 4, desc->iomemtype, INTC_A7(address)); cpu_register_physical_memory_offset(A7ADDR(address), 4, desc->iomemtype, INTC_A7(address)); } } static void sh_intc_register_source(struct intc_desc *desc, intc_enum source, struct intc_group *groups, int nr_groups) { unsigned int i, k; struct intc_source *s; if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { struct intc_mask_reg *mr = desc->mask_regs + i; for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { if (mr->enum_ids[k] != source) continue; s = sh_intc_source(desc, mr->enum_ids[k]); if (s) s->enable_max++; } } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { struct intc_prio_reg *pr = desc->prio_regs + i; for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { if (pr->enum_ids[k] != source) continue; s = sh_intc_source(desc, pr->enum_ids[k]); if (s) s->enable_max++; } } } if (groups) { for (i = 0; i < nr_groups; i++) { struct intc_group *gr = groups + i; for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { if (gr->enum_ids[k] != source) continue; s = sh_intc_source(desc, gr->enum_ids[k]); if (s) s->enable_max++; } } } } void sh_intc_register_sources(struct intc_desc *desc, struct intc_vect *vectors, int nr_vectors, struct intc_group *groups, int nr_groups) { unsigned int i, k; struct intc_source *s; for (i = 0; i < nr_vectors; i++) { struct intc_vect *vect = vectors + i; sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); s = sh_intc_source(desc, vect->enum_id); if (s) s->vect = vect->vect; #ifdef DEBUG_INTC_SOURCES printf("sh_intc: registered source %d -> 0x%04x (%d/%d)\n", vect->enum_id, s->vect, s->enable_count, s->enable_max); #endif } if (groups) { for (i = 0; i < nr_groups; i++) { struct intc_group *gr = groups + i; s = sh_intc_source(desc, gr->enum_id); s->next_enum_id = gr->enum_ids[0]; for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { if (!gr->enum_ids[k]) continue; s = sh_intc_source(desc, gr->enum_ids[k - 1]); s->next_enum_id = gr->enum_ids[k]; } #ifdef DEBUG_INTC_SOURCES printf("sh_intc: registered group %d (%d/%d)\n", gr->enum_id, s->enable_count, s->enable_max); #endif } } } int sh_intc_init(struct intc_desc *desc, int nr_sources, struct intc_mask_reg *mask_regs, int nr_mask_regs, struct intc_prio_reg *prio_regs, int nr_prio_regs) { unsigned int i; desc->pending = 0; desc->nr_sources = nr_sources; desc->mask_regs = mask_regs; desc->nr_mask_regs = nr_mask_regs; desc->prio_regs = prio_regs; desc->nr_prio_regs = nr_prio_regs; i = sizeof(struct intc_source) * nr_sources; desc->sources = qemu_mallocz(i); for (i = 0; i < desc->nr_sources; i++) { struct intc_source *source = desc->sources + i; source->parent = desc; } desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); desc->iomemtype = cpu_register_io_memory(sh_intc_readfn, sh_intc_writefn, desc, DEVICE_NATIVE_ENDIAN); if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { struct intc_mask_reg *mr = desc->mask_regs + i; sh_intc_register(desc, mr->set_reg); sh_intc_register(desc, mr->clr_reg); } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { struct intc_prio_reg *pr = desc->prio_regs + i; sh_intc_register(desc, pr->set_reg); sh_intc_register(desc, pr->clr_reg); } } return 0; } /* Assert level <n> IRL interrupt. 0:deassert. 1:lowest priority,... 15:highest priority. */ void sh_intc_set_irl(void *opaque, int n, int level) { struct intc_source *s = opaque; int i, irl = level ^ 15; for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) { if (i == irl) sh_intc_toggle_source(s, s->enable_count?0:1, s->asserted?0:1); else if (s->asserted) sh_intc_toggle_source(s, 0, -1); } }
SSLab-NTHU/qemu-guest-armvisor
hw/sh_intc.c
C
gpl-2.0
12,189
/* radare2 - LGPL - Copyright 2017 - condret */ #include <r_util.h> #include <r_types.h> ut32 get_msb(ut32 v) { int i; for (i = 31; i > (-1); i--) { if (v & (0x1 << i)) { return (v & (0x1 << i)); } } return 0; } R_API RIDPool* r_id_pool_new(ut32 start_id, ut32 last_id) { RIDPool* pool = NULL; if (start_id < last_id) { pool = R_NEW0 (RIDPool); if (!pool) { return NULL; } pool->next_id = pool->start_id = start_id; pool->last_id = last_id; } return pool; } R_API bool r_id_pool_grab_id(RIDPool* pool, ut32* grabber) { if (!pool || !grabber) { return false; } if (pool->freed_ids) { ut32 grab = (ut32) (size_t)r_queue_dequeue (pool->freed_ids); *grabber = (ut32) grab; if (r_queue_is_empty (pool->freed_ids)) { r_queue_free (pool->freed_ids); pool->freed_ids = NULL; } return true; } if (pool->next_id < pool->last_id) { *grabber = pool->next_id; pool->next_id++; return true; } return false; } R_API bool r_id_pool_kick_id(RIDPool* pool, ut32 kick) { if (!pool || (kick < pool->start_id) || (pool->start_id == pool->next_id)) { return false; } if (kick == (pool->next_id - 1)) { pool->next_id--; return true; } if (!pool->freed_ids) { pool->freed_ids = r_queue_new (2); } r_queue_enqueue (pool->freed_ids, (void*) (size_t) kick); return true; } R_API void r_id_pool_free(RIDPool* pool) { if (pool && pool->freed_ids) { r_queue_free (pool->freed_ids); } free (pool); } R_API RIDStorage* r_id_storage_new(ut32 start_id, ut32 last_id) { RIDPool* pool; RIDStorage* storage = NULL; if ((start_id < 16) && (pool = r_id_pool_new (start_id, last_id))) { storage = R_NEW0 (RIDStorage); if (!storage) { return NULL; } storage->pool = pool; } return storage; } static bool id_storage_reallocate(RIDStorage* storage, ut32 size) { void* data; if (!storage) { return false; } if (storage->size == size) { return true; } if (storage->size > size) { storage->data = realloc (storage->data, size * sizeof(void*)); storage->size = size; return true; } data = storage->data; storage->data = R_NEWS0 (void*, size); if (data) { memcpy (storage->data, data, storage->size * sizeof(void*)); } storage->size = size; return true; } R_API bool r_id_storage_set(RIDStorage* storage, void* data, ut32 id) { ut32 n; if (!storage || !storage->pool || (id >= storage->pool->next_id)) { return false; } n = get_msb (id + 1); if (n > (storage->size - (storage->size / 4))) { if (n < (storage->pool->last_id / 2)) { if (!id_storage_reallocate (storage, n * 2)) { return false; } } else if (n != (storage->pool->last_id)) { if (!id_storage_reallocate (storage, storage->pool->last_id)) { return false; } } } storage->data[id] = data; if (id > storage->top_id) { storage->top_id = id; } return true; } R_API bool r_id_storage_add(RIDStorage* storage, void* data, ut32* id) { if (!storage || !r_id_pool_grab_id (storage->pool, id)) { return false; } return r_id_storage_set (storage, data, *id); } R_API void* r_id_storage_get(RIDStorage* storage, ut32 id) { if (!storage || !storage->data || (storage->size <= id)) { return NULL; } return storage->data[id]; } R_API void r_id_storage_delete(RIDStorage* storage, ut32 id) { if (!storage || !storage->data || (storage->size <= id)) { return; } storage->data[id] = NULL; if (id == storage->top_id) { while (storage->top_id && !storage->data[storage->top_id]) { storage->top_id--; } if (!storage->top_id) { if(storage->data[storage->top_id]) { id_storage_reallocate (storage, 2); } else { RIDPool* pool = r_id_pool_new (storage->pool->start_id, storage->pool->last_id); R_FREE (storage->data); storage->size = 0; r_id_pool_free (storage->pool); storage->pool = pool; return; } } else if ((storage->top_id + 1) < (storage->size / 4)) { id_storage_reallocate (storage, storage->size / 2); } } r_id_pool_kick_id (storage->pool, id); } R_API void* r_id_storage_take(RIDStorage* storage, ut32 id) { void* ret = r_id_storage_get (storage, id); r_id_storage_delete (storage, id); return ret; } R_API bool r_id_storage_foreach(RIDStorage* storage, RIDStorageForeachCb cb, void* user) { ut32 i; if (!cb || !storage || !storage->data) { return false; } for (i = 0; i < storage->top_id; i++) { if (storage->data[i]) { if (!cb (user, storage->data[i], i)) { return false; } } } if (storage->data[i]) { return cb (user, storage->data[i], i); } return true; } R_API void r_id_storage_free(RIDStorage* storage) { if (storage) { r_id_pool_free (storage->pool); free (storage->data); } free (storage); }
Maijin/radare2
libr/util/idpool.c
C
gpl-3.0
4,681
/* Part of SWI-Prolog Author: Jan Wielemaker E-mail: J.Wielemaker@vu.nl WWW: http://www.swi-prolog.org Copyright (c) 1999-2011, University of Amsterdam All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <windows.h> #include <tchar.h> #define _MAKE_DLL 1 #undef _export #include "console.h" #include "console_i.h" #include "common.h" #include <memory.h> #include <string.h> #include <ctype.h> #ifndef EOF #define EOF -1 #endif typedef void (*function)(Line ln, int chr); /* edit-function */ static function dispatch_table[256]; /* general dispatch-table */ static function dispatch_meta[256]; /* ESC-char dispatch */ static RlcCompleteFunc _rlc_complete_function = rlc_complete_file_function; static void init_line_package(RlcData b); static void bind_actions(void); #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #define max(a, b) ((a) > (b) ? (a) : (b)) #endif #ifndef TRUE #define TRUE 1 #define FALSE 0 #endif #ifndef EOS #define EOS 0 #endif #ifndef ESC #define ESC 27 #endif #define COMPLETE_NEWLINE 1 #define COMPLETE_EOF 2 #define ctrl(c) ((c) - '@') #define META_OFFSET 128 #define meta(c) ((c) + META_OFFSET) /******************************* * BUFFER * *******************************/ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - make_room(Line, int room) Make n-characters space after the point. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static void make_room(Line ln, size_t room) { while ( ln->size + room + 1 > ln->allocated ) { if ( !ln->data ) { ln->data = rlc_malloc(256 * sizeof(TCHAR)); ln->allocated = 256; } else { ln->allocated *= 2; ln->data = rlc_realloc(ln->data, ln->allocated * sizeof(TCHAR)); } } memmove(&ln->data[ln->point + room], &ln->data[ln->point], (ln->size - ln->point)*sizeof(TCHAR)); ln->size += room; if ( room > 0 ) ln->change_start = min(ln->change_start, ln->point); } static void set_line(Line ln, const TCHAR *s) { size_t len = _tcslen(s); ln->size = ln->point = 0; make_room(ln, len); _tcsncpy(ln->data, s, len); } static void terminate(Line ln) { if ( !ln->data ) { ln->data = rlc_malloc(sizeof(TCHAR)); ln->allocated = 1; } ln->data[ln->size] = EOS; } static void delete(Line ln, size_t from, size_t len) { if ( from < 0 || from > ln->size || len < 0 || from + len > ln->size ) return; _tcsncpy(&ln->data[from], &ln->data[from+len], ln->size - (from+len)); ln->size -= len; } /******************************* * POSITIONING * *******************************/ static size_t back_word(Line ln, size_t from) { from = min(from, ln->size); from = max(0, from); if ( ln->data ) { while(!rlc_is_word_char(ln->data[from-1]) && from > 0 ) from--; while(rlc_is_word_char(ln->data[from-1]) && from > 0 ) from--; } return from; } static size_t forw_word(Line ln, size_t from) { from = min(from, ln->size); from = max(0, from); if ( ln->data ) { while(!rlc_is_word_char(ln->data[from]) && from < ln->size ) from++; while(rlc_is_word_char(ln->data[from]) && from < ln->size ) from++; } return from; } /******************************* * EDITING FUNCTIONS * *******************************/ static __inline void changed(Line ln, size_t from) { ln->change_start = min(ln->change_start, from); } static void insert_self(Line ln, int chr) { make_room(ln, 1); ln->data[ln->point++] = chr; } static void backward_delete_character(Line ln, int chr) { if ( ln->point > 0 ) { memmove(&ln->data[ln->point-1], &ln->data[ln->point], (ln->size - ln->point)*sizeof(TCHAR)); ln->size--; ln->point--; } changed(ln, ln->point); } static void delete_character(Line ln, int chr) { if ( ln->point < ln->size ) { ln->point++; backward_delete_character(ln, chr); } } static void backward_character(Line ln, int chr) { if ( ln->point > 0 ) ln->point--; } static void forward_character(Line ln, int chr) { if ( ln->point < ln->size ) ln->point++; } static void backward_word(Line ln, int chr) { ln->point = back_word(ln, ln->point); } static void forward_word(Line ln, int chr) { ln->point = forw_word(ln, ln->point); } static void backward_delete_word(Line ln, int chr) { size_t from = back_word(ln, ln->point); memmove(&ln->data[from], &ln->data[ln->point], (ln->size - ln->point)*sizeof(TCHAR)); ln->size -= ln->point - from; ln->point = from; changed(ln, from); } static void forward_delete_word(Line ln, int chr) { size_t to = forw_word(ln, ln->point); memmove(&ln->data[ln->point], &ln->data[to], (ln->size - to)*sizeof(TCHAR)); ln->size -= to - ln->point; changed(ln, ln->point); } static void transpose_chars(Line ln, int chr) { if ( ln->point > 0 && ln->point < ln->size ) { int c0 = ln->data[ln->point-1]; ln->data[ln->point-1] = ln->data[ln->point]; ln->data[ln->point] = c0; changed(ln, ln->point-1); } } static void start_of_line(Line ln, int chr) { ln->point = 0; } static void end_of_line(Line ln, int chr) { ln->point = ln->size; } static void kill_line(Line ln, int chr) { ln->size = ln->point; changed(ln, ln->size); } static void empty_line(Line ln, int chr) { ln->size = ln->point = 0; changed(ln, 0); } static void enter(Line ln, int chr) { ln->point = ln->size; #ifdef DOS_CRNL make_room(ln, 2); ln->data[ln->point++] = '\r'; ln->data[ln->point++] = '\n'; #else make_room(ln, 1); ln->data[ln->point++] = '\n'; #endif terminate(ln); ln->complete = COMPLETE_NEWLINE; } static void eof(Line ln, int chr) { ln->point = ln->size; terminate(ln); ln->complete = COMPLETE_EOF; } static void delete_character_or_eof(Line ln, int chr) { if ( ln->size == 0 ) { ln->point = ln->size; terminate(ln); ln->complete = COMPLETE_EOF; } else delete_character(ln, chr); } static void undefined(Line ln, int chr) { } static void interrupt(Line ln, int chr) { raise(SIGINT); } /******************************* * HISTORY * *******************************/ static void add_history(rlc_console c, const TCHAR *data) { const TCHAR *s = data; while(*s && *s <= ' ') s++; if ( *s ) rlc_add_history(c, s); } static void backward_history(Line ln, int chr) { const TCHAR *h; if ( rlc_at_head_history(ln->console) && ln->size > 0 ) { terminate(ln); add_history(ln->console, ln->data); } if ( (h = rlc_bwd_history(ln->console)) ) { set_line(ln, h); ln->point = ln->size; } } static void forward_history(Line ln, int chr) { if ( !rlc_at_head_history(ln->console) ) { const TCHAR *h = rlc_fwd_history(ln->console); if ( h ) { set_line(ln, h); ln->point = ln->size; } } else empty_line(ln, chr); } /******************************* * COMPLETE * *******************************/ RlcCompleteFunc rlc_complete_hook(RlcCompleteFunc new) { RlcCompleteFunc old = _rlc_complete_function; _rlc_complete_function = new; return old; } static int common(const TCHAR *s1, const TCHAR *s2, int insensitive) { int n = 0; if ( !insensitive ) { while(*s1 && *s1 == *s2) { s1++, s2++; n++; } return n; } else { while(*s1) { if ( _totlower(*s1) == _totlower(*s2) ) { s1++, s2++; n++; } else break; } return n; } } static void complete(Line ln, int chr) { if ( _rlc_complete_function ) { rlc_complete_data dbuf; RlcCompleteData data = &dbuf; memset(data, 0, sizeof(dbuf)); data->line = ln; data->call_type = COMPLETE_INIT; if ( (*_rlc_complete_function)(data) ) { TCHAR match[COMPLETE_MAX_WORD_LEN]; int nmatches = 1; size_t ncommon = _tcslen(data->candidate); size_t patlen = ln->point - data->replace_from; _tcscpy(match, data->candidate); data->call_type = COMPLETE_ENUMERATE; while( (*data->function)(data) ) { ncommon = common(match, data->candidate, data->case_insensitive); match[ncommon] = EOS; nmatches++; } data->call_type = COMPLETE_CLOSE; (*data->function)(data); delete(ln, data->replace_from, patlen); ln->point = data->replace_from; make_room(ln, ncommon); _tcsncpy(&ln->data[data->replace_from], match, ncommon); ln->point += ncommon; if ( nmatches == 1 && data->quote ) insert_self(ln, data->quote); } } } #define MAX_LIST_COMPLETIONS 256 static void list_completions(Line ln, int chr) { if ( _rlc_complete_function ) { rlc_complete_data dbuf; RlcCompleteData data = &dbuf; memset(data, 0, sizeof(dbuf)); data->line = ln; data->call_type = COMPLETE_INIT; if ( (*_rlc_complete_function)(data) ) { TCHAR *buf[COMPLETE_MAX_MATCHES]; int n, nmatches = 0; size_t len = _tcslen(data->candidate) + 1; size_t longest = len; size_t cols; buf[nmatches] = rlc_malloc(len*sizeof(TCHAR)); _tcsncpy(buf[nmatches], data->candidate, len); nmatches++; data->call_type = COMPLETE_ENUMERATE; while( (*data->function)(data) ) { len = _tcslen(data->candidate) + 1; buf[nmatches] = rlc_malloc(len*sizeof(TCHAR)); _tcsncpy(buf[nmatches], data->candidate, len); nmatches++; longest = max(longest, len); if ( nmatches > COMPLETE_MAX_MATCHES ) { TCHAR *msg = _T("\r\n! Too many matches\r\n"); while(*msg) rlc_putchar(ln->console, *msg++); ln->reprompt = TRUE; data->call_type = COMPLETE_CLOSE; (*data->function)(data); return; } } data->call_type = COMPLETE_CLOSE; (*data->function)(data); cols = ScreenCols(ln->console) / longest; rlc_putchar(ln->console, '\r'); rlc_putchar(ln->console, '\n'); for(n=0; n<nmatches; ) { TCHAR *s = buf[n]; len = 0; while(*s) { len++; rlc_putchar(ln->console, *s++); } rlc_free(buf[n++]); if ( n % cols == 0 ) { rlc_putchar(ln->console, '\r'); rlc_putchar(ln->console, '\n'); } else { while( len++ < longest ) rlc_putchar(ln->console, ' '); } } if ( nmatches % cols != 0 ) { rlc_putchar(ln->console, '\r'); rlc_putchar(ln->console, '\n'); } ln->reprompt = TRUE; } } } /******************************* * REPAINT * *******************************/ static void output(rlc_console b, TCHAR *s, size_t len) { while(len-- > 0) { if ( *s == '\n' ) rlc_putchar(b, '\r'); rlc_putchar(b, *s++); } } static void update_display(Line ln) { if ( ln->reprompt ) { const TCHAR *prompt = rlc_prompt(ln->console, NULL); const TCHAR *s = prompt; rlc_putchar(ln->console, '\r'); while(*s) rlc_putchar(ln->console, *s++); rlc_get_mark(ln->console, &ln->origin); ln->change_start = 0; ln->reprompt = FALSE; } rlc_goto_mark(ln->console, &ln->origin, ln->data, ln->change_start); output(ln->console, &ln->data[ln->change_start], ln->size - ln->change_start); rlc_erase_from_caret(ln->console); rlc_goto_mark(ln->console, &ln->origin, ln->data, ln->point); rlc_update(ln->console); ln->change_start = ln->size; } /******************************* * TOPLEVEL * *******************************/ TCHAR * read_line(rlc_console b) { line ln; init_line_package(b); memset(&ln, 0, sizeof(line)); ln.console = b; rlc_get_mark(b, &ln.origin); while(!ln.complete) { int c; rlc_mark m0, m1; function func; rlc_get_mark(b, &m0); if ( (c = getch(b)) == IMODE_SWITCH_CHAR ) return RL_CANCELED_CHARP; if ( c == EOF ) { eof(&ln, c); update_display(&ln); break; } else if ( c == ESC ) { if ( (c = getch(b)) == IMODE_SWITCH_CHAR ) return RL_CANCELED_CHARP; if ( c > 256 ) func = undefined; else func = dispatch_meta[c&0xff]; } else { if ( c >= 256 ) func = insert_self; else func = dispatch_table[c&0xff]; } rlc_get_mark(b, &m1); (*func)(&ln, c); if ( m0.mark_x != m1.mark_x || m0.mark_y != m1.mark_y ) ln.reprompt = TRUE; update_display(&ln); } rlc_clearprompt(b); add_history(b, ln.data); return ln.data; } /******************************* * DISPATCH * *******************************/ static void init_dispatch_table() { static int done; if ( !done ) { int n; for(n=0; n<32; n++) dispatch_table[n] = undefined; for(n=32; n<256; n++) dispatch_table[n] = insert_self; for(n=0; n<256; n++) dispatch_meta[n] = undefined; bind_actions(); done = TRUE; } } static void init_line_package(RlcData b) { init_dispatch_table(); rlc_init_history(b, 50); } /******************************* * BIND * *******************************/ typedef struct _action { char *name; function function; unsigned char keys[4]; } action, *Action; #define ACTION(n, f, k) { n, f, k } static action actions[] = { ACTION("insert_self", insert_self, ""), ACTION("backward_delete_character", backward_delete_character, "\b"), ACTION("complete", complete, "\t"), ACTION("enter", enter, "\r\n"), ACTION("start_of_line", start_of_line, {ctrl('A')}), ACTION("backward_character", backward_character, {ctrl('B')}), ACTION("interrupt", interrupt, {ctrl('C')}), ACTION("end_of_line", end_of_line, {ctrl('E')}), ACTION("forward_character", forward_character, {ctrl('F')}), ACTION("transpose_chars", transpose_chars, {ctrl('T')}), ACTION("kill_line", kill_line, {ctrl('K')}), ACTION("backward_history", backward_history, {ctrl('P')}), ACTION("forward_history", forward_history, {ctrl('N')}), ACTION("empty_line", empty_line, {ctrl('U')}), ACTION("eof", eof, {ctrl('Z')}), ACTION("delete_character_or_eof", delete_character_or_eof, {ctrl('D')}), ACTION("delete_character", delete_character, {127}), { "forward_word", forward_word, {meta(ctrl('F')), meta('f')}}, { "backward_word", backward_word, {meta(ctrl('B')), meta('b')}}, { "forward_delete_word", forward_delete_word, {meta(127), meta('d')}}, ACTION("list_completions", list_completions, {meta('?')}), ACTION("backward_delete_word", backward_delete_word, {meta('\b')}), ACTION(NULL, NULL, "") }; int rlc_bind(int chr, const char *fname) { if ( chr >= 0 && chr <= 256 ) { Action a = actions; for( ; a->name; a++ ) { if ( strcmp(a->name, fname) == 0 ) { if ( chr > META_OFFSET ) dispatch_meta[chr-META_OFFSET] = a->function; else dispatch_table[chr] = a->function; return TRUE; } } } return FALSE; } static void bind_actions() { Action a = actions; for( ; a->name; a++ ) { unsigned char *k = a->keys; for( ; *k; k++ ) { int chr = *k & 0xff; if ( chr > META_OFFSET ) dispatch_meta[chr-META_OFFSET] = a->function; else dispatch_table[chr] = a->function; } } }
edechter/swipl-devel
src/win32/console/edit.c
C
lgpl-2.1
16,384
/**************************************************************************/ /* */ /* OCaml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 2001 Institut National de Recherche en Informatique et */ /* en Automatique. */ /* */ /* All rights reserved. This file is distributed under the terms of */ /* the GNU Lesser General Public License version 2.1, with the */ /* special exception on linking described in the file LICENSE. */ /* */ /**************************************************************************/ #define CAML_INTERNALS /* Registration of global memory roots */ #include "caml/mlvalues.h" #include "caml/roots.h" #include "caml/globroots.h" #include "caml/skiplist.h" /* The three global root lists. Each is represented by a skip list with the key being the address of the root. (The associated data field is unused.) */ struct skiplist caml_global_roots = SKIPLIST_STATIC_INITIALIZER; /* mutable roots, don't know whether old or young */ struct skiplist caml_global_roots_young = SKIPLIST_STATIC_INITIALIZER; /* generational roots pointing to minor or major heap */ struct skiplist caml_global_roots_old = SKIPLIST_STATIC_INITIALIZER; /* generational roots pointing to major heap */ /* The invariant of the generational roots is the following: - If the global root contains a pointer to the minor heap, then the root is in [caml_global_roots_young]; - If the global root contains a pointer to the major heap, then the root is in [caml_global_roots_old] or in [caml_global_roots_young]; - Otherwise (the root contains a pointer outside of the heap or an integer), then neither [caml_global_roots_young] nor [caml_global_roots_old] contain it. */ /* Insertion and deletion */ Caml_inline void caml_insert_global_root(struct skiplist * list, value * r) { caml_skiplist_insert(list, (uintnat) r, 0); } Caml_inline void caml_delete_global_root(struct skiplist * list, value * r) { caml_skiplist_remove(list, (uintnat) r); } /* Iterate a GC scanning action over a global root list */ static void caml_iterate_global_roots(scanning_action f, struct skiplist * rootlist) { FOREACH_SKIPLIST_ELEMENT(e, rootlist, { value * r = (value *) (e->key); f(*r, r); }) } /* Register a global C root of the mutable kind */ CAMLexport void caml_register_global_root(value *r) { CAMLassert (((intnat) r & 3) == 0); /* compact.c demands this (for now) */ caml_insert_global_root(&caml_global_roots, r); } /* Un-register a global C root of the mutable kind */ CAMLexport void caml_remove_global_root(value *r) { caml_delete_global_root(&caml_global_roots, r); } enum gc_root_class { YOUNG, OLD, UNTRACKED }; static enum gc_root_class classify_gc_root(value v) { if(!Is_block(v)) return UNTRACKED; if(Is_young(v)) return YOUNG; #ifndef NO_NAKED_POINTERS if(!Is_in_heap(v)) return UNTRACKED; #endif return OLD; } /* Register a global C root of the generational kind */ CAMLexport void caml_register_generational_global_root(value *r) { CAMLassert (((intnat) r & 3) == 0); /* compact.c demands this (for now) */ switch(classify_gc_root(*r)) { case YOUNG: caml_insert_global_root(&caml_global_roots_young, r); break; case OLD: caml_insert_global_root(&caml_global_roots_old, r); break; case UNTRACKED: break; } } /* Un-register a global C root of the generational kind */ CAMLexport void caml_remove_generational_global_root(value *r) { switch(classify_gc_root(*r)) { case OLD: caml_delete_global_root(&caml_global_roots_old, r); /* Fallthrough: the root can be in the young list while actually being in the major heap. */ case YOUNG: caml_delete_global_root(&caml_global_roots_young, r); break; case UNTRACKED: break; } } /* Modify the value of a global C root of the generational kind */ CAMLexport void caml_modify_generational_global_root(value *r, value newval) { enum gc_root_class c; /* See PRs #4704, #607 and #8656 */ switch(classify_gc_root(newval)) { case YOUNG: c = classify_gc_root(*r); if(c == OLD) caml_delete_global_root(&caml_global_roots_old, r); if(c != YOUNG) caml_insert_global_root(&caml_global_roots_young, r); break; case OLD: /* If the old class is YOUNG, then we do not need to do anything: It is OK to have a root in roots_young that suddenly points to the old generation -- the next minor GC will take care of that. */ if(classify_gc_root(*r) == UNTRACKED) caml_insert_global_root(&caml_global_roots_old, r); break; case UNTRACKED: caml_remove_generational_global_root(r); break; } *r = newval; } /* Scan all global roots */ void caml_scan_global_roots(scanning_action f) { caml_iterate_global_roots(f, &caml_global_roots); caml_iterate_global_roots(f, &caml_global_roots_young); caml_iterate_global_roots(f, &caml_global_roots_old); } /* Scan global roots for a minor collection */ void caml_scan_global_young_roots(scanning_action f) { caml_iterate_global_roots(f, &caml_global_roots); caml_iterate_global_roots(f, &caml_global_roots_young); /* Move young roots to old roots */ FOREACH_SKIPLIST_ELEMENT(e, &caml_global_roots_young, { value * r = (value *) (e->key); caml_insert_global_root(&caml_global_roots_old, r); }); caml_skiplist_empty(&caml_global_roots_young); }
gerdstolpmann/ocaml
runtime/globroots.c
C
lgpl-2.1
6,109
/**************************************************************************** * sched/pthread_condwait.c * * Copyright (C) 2007-2009, 2012 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <gnutt@nuttx.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <unistd.h> #include <pthread.h> #include <sched.h> #include <errno.h> #include <debug.h> #include "pthread_internal.h" /**************************************************************************** * Definitions ****************************************************************************/ /**************************************************************************** * Private Type Declarations ****************************************************************************/ /**************************************************************************** * Global Variables ****************************************************************************/ /**************************************************************************** * Private Variables ****************************************************************************/ /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: int pthread_cond_wait * * Description: * A thread can wait for a condition variable to be signalled or broadcast. * * Parameters: * None * * Return Value: * None * * Assumptions: * ****************************************************************************/ int pthread_cond_wait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex) { int ret; sdbg("cond=0x%p mutex=0x%p\n", cond, mutex); /* Make sure that non-NULL references were provided. */ if (!cond || !mutex) { ret = EINVAL; } /* Make sure that the caller holds the mutex */ else if (mutex->pid != (int)getpid()) { ret = EPERM; } else { /* Give up the mutex */ sdbg("Give up mutex / take cond\n"); sched_lock(); mutex->pid = 0; ret = pthread_givesemaphore((sem_t*)&mutex->sem); /* Take the semaphore */ ret |= pthread_takesemaphore((sem_t*)&cond->sem); sched_unlock(); /* Reacquire the mutex */ sdbg("Reacquire mutex...\n"); ret |= pthread_takesemaphore((sem_t*)&mutex->sem); if (!ret) { mutex->pid = getpid();; } } sdbg("Returning %d\n", ret); return ret; }
gcds/project_xxx
nuttx/sched/pthread_condwait.c
C
bsd-3-clause
4,507
/* * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> #include <stdint.h> #include <string.h> #include <lib/mmio.h> #include <lib/fconf/fconf.h> #include <plat/arm/common/plat_arm.h> #include <plat/arm/common/fconf_nv_cntr_getter.h> #include <plat/common/platform.h> #include <platform_def.h> #include <tools_share/tbbr_oid.h> /* * Return the ROTPK hash in the following ASN.1 structure in DER format: * * AlgorithmIdentifier ::= SEQUENCE { * algorithm OBJECT IDENTIFIER, * parameters ANY DEFINED BY algorithm OPTIONAL * } * * DigestInfo ::= SEQUENCE { * digestAlgorithm AlgorithmIdentifier, * digest OCTET STRING * } */ int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, unsigned int *flags) { return arm_get_rotpk_info(cookie, key_ptr, key_len, flags); } /* * Store a new non-volatile counter value. * * On some FVP versions, the non-volatile counters are read-only so this * function will always fail. * * Return: 0 = success, Otherwise = error */ int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr) { const char *oid; uintptr_t nv_ctr_addr; assert(cookie != NULL); oid = (const char *)cookie; if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) { nv_ctr_addr = FCONF_GET_PROPERTY(cot, nv_cntr_addr, TRUSTED_NV_CTR_ID); } else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) { nv_ctr_addr = FCONF_GET_PROPERTY(cot, nv_cntr_addr, NON_TRUSTED_NV_CTR_ID); } else { return 1; } mmio_write_32(nv_ctr_addr, nv_ctr); /* * If the FVP models a locked counter then its value cannot be updated * and the above write operation has been silently ignored. */ return (mmio_read_32(nv_ctr_addr) == nv_ctr) ? 0 : 1; }
jenswi-linaro/arm-trusted-firmware
plat/arm/board/fvp/fvp_trusted_boot.c
C
bsd-3-clause
1,842
/** * @file * lwIP network interface abstraction * * @defgroup netif Network interface (NETIF) * @ingroup callbackstyle_api * * @defgroup netif_ip4 IPv4 address handling * @ingroup netif * * @defgroup netif_ip6 IPv6 address handling * @ingroup netif * * @defgroup netif_cd Client data handling * Store data (void*) on a netif for application usage. * @see @ref LWIP_NUM_NETIF_CLIENT_DATA * @ingroup netif */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> */ #include "lwip/opt.h" #include <string.h> #include "lwip/def.h" #include "lwip/ip_addr.h" #include "lwip/ip6_addr.h" #include "lwip/netif.h" #include "lwip/priv/tcp_priv.h" #include "lwip/udp.h" #include "lwip/raw.h" #include "lwip/snmp.h" #include "lwip/igmp.h" #include "lwip/etharp.h" #include "lwip/stats.h" #include "lwip/sys.h" #include "lwip/ip.h" #if ENABLE_LOOPBACK #if LWIP_NETIF_LOOPBACK_MULTITHREADING #include "lwip/tcpip.h" #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #include "netif/ethernet.h" #if LWIP_AUTOIP #include "lwip/autoip.h" #endif /* LWIP_AUTOIP */ #if LWIP_DHCP #include "lwip/dhcp.h" #endif /* LWIP_DHCP */ #if LWIP_IPV6_DHCP6 #include "lwip/dhcp6.h" #endif /* LWIP_IPV6_DHCP6 */ #if LWIP_IPV6_MLD #include "lwip/mld6.h" #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6 #include "lwip/nd6.h" #endif #if LWIP_NETIF_STATUS_CALLBACK #define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) #else #define NETIF_STATUS_CALLBACK(n) #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK #define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) #else #define NETIF_LINK_CALLBACK(n) #endif /* LWIP_NETIF_LINK_CALLBACK */ struct netif *netif_list; struct netif *netif_default; static u8_t netif_num; #if LWIP_NUM_NETIF_CLIENT_DATA > 0 static u8_t netif_client_id; #endif #define NETIF_REPORT_TYPE_IPV4 0x01 #define NETIF_REPORT_TYPE_IPV6 0x02 static void netif_issue_reports(struct netif* netif, u8_t report_type); #if LWIP_IPV6 static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); #endif /* LWIP_IPV6 */ #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr); #endif #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr); #endif static struct netif loop_netif; /** * Initialize a lwip network interface structure for a loopback interface * * @param netif the lwip network interface structure for this loopif * @return ERR_OK if the loopif is initialized * ERR_MEM if private data couldn't be allocated */ static err_t netif_loopif_init(struct netif *netif) { /* initialize the snmp variables and counters inside the struct netif * ifSpeed: no assumption can be made! */ MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); netif->name[0] = 'l'; netif->name[1] = 'o'; #if LWIP_IPV4 netif->output = netif_loop_output_ipv4; #endif #if LWIP_IPV6 netif->output_ip6 = netif_loop_output_ipv6; #endif #if LWIP_LOOPIF_MULTICAST netif->flags |= NETIF_FLAG_IGMP; #endif return ERR_OK; } #endif /* LWIP_HAVE_LOOPIF */ void netif_init(void) { #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 #define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; IP4_ADDR(&loop_gw, 127,0,0,1); IP4_ADDR(&loop_ipaddr, 127,0,0,1); IP4_ADDR(&loop_netmask, 255,0,0,0); #else /* LWIP_IPV4 */ #define LOOPIF_ADDRINIT #endif /* LWIP_IPV4 */ #if NO_SYS netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); #else /* NO_SYS */ netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); #endif /* NO_SYS */ #if LWIP_IPV6 IP_ADDR6(loop_netif.ip6_addr, 0, 0, 0, PP_HTONL(0x00000001UL)); loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; #endif /* LWIP_IPV6 */ netif_set_link_up(&loop_netif); netif_set_up(&loop_netif); #endif /* LWIP_HAVE_LOOPIF */ } /** * @ingroup lwip_nosys * Forwards a received packet for input processing with * ethernet_input() or ip_input() depending on netif flags. * Don't call directly, pass to netif_add() and call * netif->input(). * Only works if the netif driver correctly sets * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! */ err_t netif_input(struct pbuf *p, struct netif *inp) { #if LWIP_ETHERNET if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { return ethernet_input(p, inp); } else #endif /* LWIP_ETHERNET */ return ip_input(p, inp); } /** * @ingroup netif * Add a network interface to the list of lwIP netifs. * * @param netif a pre-allocated netif structure * @param ipaddr IP address for the new netif * @param netmask network mask for the new netif * @param gw default gateway IP address for the new netif * @param state opaque data passed to the new netif * @param init callback function that initializes the interface * @param input callback function that is called to pass * ingress packets up in the protocol layer stack.\n * It is recommended to use a function that passes the input directly * to the stack (netif_input(), NO_SYS=1 mode) or via sending a * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET * to decide whether to forward to ethernet_input() or ip_input(). * In other words, the functions only work when the netif * driver is implemented correctly!\n * Most members of struct netif should be be initialized by the * netif init function = netif driver (init parameter of this function).\n * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after * setting the MAC address in struct netif.hwaddr * (IPv6 requires a link-local address). * * @return netif, or NULL if failed. */ struct netif * netif_add(struct netif *netif, #if LWIP_IPV4 const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, #endif /* LWIP_IPV4 */ void *state, netif_init_fn init, netif_input_fn input) { #if LWIP_IPV6 s8_t i; #endif LWIP_ASSERT("No init function given", init != NULL); /* reset new interface configuration state */ #if LWIP_IPV4 ip_addr_set_zero_ip4(&netif->ip_addr); ip_addr_set_zero_ip4(&netif->netmask); ip_addr_set_zero_ip4(&netif->gw); #endif /* LWIP_IPV4 */ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { ip_addr_set_zero_ip6(&netif->ip6_addr[i]); netif->ip6_addr_state[i] = IP6_ADDR_INVALID; } netif->output_ip6 = netif_null_output_ip6; #endif /* LWIP_IPV6 */ NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); netif->flags = 0; #ifdef netif_get_client_data memset(netif->client_data, 0, sizeof(netif->client_data)); #endif /* LWIP_NUM_NETIF_CLIENT_DATA */ #if LWIP_IPV6_AUTOCONFIG /* IPv6 address autoconfiguration not enabled by default */ netif->ip6_autoconfig_enabled = 0; #endif /* LWIP_IPV6_AUTOCONFIG */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ #if LWIP_NETIF_STATUS_CALLBACK netif->status_callback = NULL; #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK netif->link_callback = NULL; #endif /* LWIP_NETIF_LINK_CALLBACK */ #if LWIP_IGMP netif->igmp_mac_filter = NULL; #endif /* LWIP_IGMP */ #if LWIP_IPV6 && LWIP_IPV6_MLD netif->mld_mac_filter = NULL; #endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ #if ENABLE_LOOPBACK netif->loop_first = NULL; netif->loop_last = NULL; #endif /* ENABLE_LOOPBACK */ /* remember netif specific state information data */ netif->state = state; netif->num = netif_num++; netif->input = input; NETIF_SET_HWADDRHINT(netif, NULL); #if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS netif->loop_cnt_current = 0; #endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ #if LWIP_IPV4 netif_set_addr(netif, ipaddr, netmask, gw); #endif /* LWIP_IPV4 */ /* call user specified initialization function for netif */ if (init(netif) != ERR_OK) { return NULL; } /* add this netif to the list */ netif->next = netif_list; netif_list = netif; mib2_netif_added(netif); #if LWIP_IGMP /* start IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_start(netif); } #endif /* LWIP_IGMP */ LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", netif->name[0], netif->name[1])); #if LWIP_IPV4 LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); ip4_addr_debug_print(NETIF_DEBUG, ipaddr); LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); ip4_addr_debug_print(NETIF_DEBUG, netmask); LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); ip4_addr_debug_print(NETIF_DEBUG, gw); #endif /* LWIP_IPV4 */ LWIP_DEBUGF(NETIF_DEBUG, ("\n")); return netif; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change IP address configuration for a network interface (including netmask * and default gateway). * * @param netif the network interface to change * @param ipaddr the new IP address * @param netmask the new netmask * @param gw the new default gateway */ void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw) { if (ip4_addr_isany(ipaddr)) { /* when removing an address, we have to remove it *before* changing netmask/gw to ensure that tcp RST segment can be sent correctly */ netif_set_ipaddr(netif, ipaddr); netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); } else { netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ netif_set_ipaddr(netif, ipaddr); } } #endif /* LWIP_IPV4*/ /** * @ingroup netif * Remove a network interface from the list of lwIP netifs. * * @param netif the network interface to remove */ void netif_remove(struct netif *netif) { #if LWIP_IPV6 int i; #endif if (netif == NULL) { return; } #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_RAW */ } #if LWIP_IGMP /* stop IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_stop(netif); } #endif /* LWIP_IGMP */ #endif /* LWIP_IPV4*/ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_RAW */ } } #if LWIP_IPV6_MLD /* stop MLD processing */ mld6_stop(netif); #endif /* LWIP_IPV6_MLD */ #endif /* LWIP_IPV6 */ if (netif_is_up(netif)) { /* set netif down before removing (call callback function) */ netif_set_down(netif); } mib2_remove_ip4(netif); /* this netif is default? */ if (netif_default == netif) { /* reset default netif */ netif_set_default(NULL); } /* is it the first netif? */ if (netif_list == netif) { netif_list = netif->next; } else { /* look for netif further down the list */ struct netif * tmp_netif; for (tmp_netif = netif_list; tmp_netif != NULL; tmp_netif = tmp_netif->next) { if (tmp_netif->next == netif) { tmp_netif->next = netif->next; break; } } if (tmp_netif == NULL) { return; /* netif is not on the list */ } } mib2_netif_removed(netif); #if LWIP_NETIF_REMOVE_CALLBACK if (netif->remove_callback) { netif->remove_callback(netif); } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); } /** * @ingroup netif * Find a network interface by searching for its name * * @param name the name of the netif (like netif->name) plus concatenated number * in ascii representation (e.g. 'en0') */ struct netif * netif_find(const char *name) { struct netif *netif; u8_t num; if (name == NULL) { return NULL; } num = name[2] - '0'; for (netif = netif_list; netif != NULL; netif = netif->next) { if (num == netif->num && name[0] == netif->name[0] && name[1] == netif->name[1]) { LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); return netif; } } LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); return NULL; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change the IP address of a network interface * * @param netif the network interface to change * @param ipaddr the new IP address * * @note call netif_set_addr() if you also want to change netmask and * default gateway */ void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) { ip_addr_t new_addr; *ip_2_ip4(&new_addr) = (ipaddr ? *ipaddr : *IP4_ADDR_ANY4); IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); /* address is actually being changed? */ if (ip4_addr_cmp(ip_2_ip4(&new_addr), netif_ip4_addr(netif)) == 0) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_RAW */ mib2_remove_ip4(netif); mib2_remove_route_ip4(0, netif); /* set new IP address to netif */ ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); mib2_add_ip4(netif); mib2_add_route_ip4(0, netif); netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); NETIF_STATUS_CALLBACK(netif); } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IP address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_addr(netif)), ip4_addr2_16(netif_ip4_addr(netif)), ip4_addr3_16(netif_ip4_addr(netif)), ip4_addr4_16(netif_ip4_addr(netif)))); } /** * @ingroup netif_ip4 * Change the default gateway for a network interface * * @param netif the network interface to change * @param gw the new default gateway * * @note call netif_set_addr() if you also want to change ip address and netmask */ void netif_set_gw(struct netif *netif, const ip4_addr_t *gw) { ip4_addr_set(ip_2_ip4(&netif->gw), gw); IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_gw(netif)), ip4_addr2_16(netif_ip4_gw(netif)), ip4_addr3_16(netif_ip4_gw(netif)), ip4_addr4_16(netif_ip4_gw(netif)))); } /** * @ingroup netif_ip4 * Change the netmask of a network interface * * @param netif the network interface to change * @param netmask the new netmask * * @note call netif_set_addr() if you also want to change ip address and * default gateway */ void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) { mib2_remove_route_ip4(0, netif); /* set new netmask to netif */ ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); mib2_add_route_ip4(0, netif); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_netmask(netif)), ip4_addr2_16(netif_ip4_netmask(netif)), ip4_addr3_16(netif_ip4_netmask(netif)), ip4_addr4_16(netif_ip4_netmask(netif)))); } #endif /* LWIP_IPV4 */ /** * @ingroup netif * Set a network interface as the default network interface * (used to output all packets for which no specific route is found) * * @param netif the default network interface */ void netif_set_default(struct netif *netif) { if (netif == NULL) { /* remove default route */ mib2_remove_route_ip4(1, netif); } else { /* install default route */ mib2_add_route_ip4(1, netif); } netif_default = netif; LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); } /** * @ingroup netif * Bring an interface up, available for processing * traffic. */ void netif_set_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_UP)) { netif->flags |= NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); NETIF_STATUS_CALLBACK(netif); if (netif->flags & NETIF_FLAG_LINK_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } } } /** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change */ static void netif_issue_reports(struct netif* netif, u8_t report_type) { #if LWIP_IPV4 if ((report_type & NETIF_REPORT_TYPE_IPV4) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_ARP /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ if (netif->flags & (NETIF_FLAG_ETHARP)) { etharp_gratuitous(netif); } #endif /* LWIP_ARP */ #if LWIP_IGMP /* resend IGMP memberships */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_report_groups(netif); } #endif /* LWIP_IGMP */ } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 if (report_type & NETIF_REPORT_TYPE_IPV6) { #if LWIP_IPV6_MLD /* send mld memberships */ mld6_report_groups(netif); #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT /* Send Router Solicitation messages. */ netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ } #endif /* LWIP_IPV6 */ } /** * @ingroup netif * Bring an interface down, disabling any traffic processing. */ void netif_set_down(struct netif *netif) { if (netif->flags & NETIF_FLAG_UP) { netif->flags &= ~NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); #if LWIP_IPV4 && LWIP_ARP if (netif->flags & NETIF_FLAG_ETHARP) { etharp_cleanup_netif(netif); } #endif /* LWIP_IPV4 && LWIP_ARP */ #if LWIP_IPV6 nd6_cleanup_netif(netif); #endif /* LWIP_IPV6 */ NETIF_STATUS_CALLBACK(netif); } } #if LWIP_NETIF_STATUS_CALLBACK /** * @ingroup netif * Set callback to be called when interface is brought up/down or address is changed while up */ void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) { if (netif) { netif->status_callback = status_callback; } } #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_REMOVE_CALLBACK /** * @ingroup netif * Set callback to be called when the interface has been removed */ void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) { if (netif) { netif->remove_callback = remove_callback; } } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ /** * @ingroup netif * Called by a driver when its link goes up */ void netif_set_link_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_LINK_UP)) { netif->flags |= NETIF_FLAG_LINK_UP; #if LWIP_DHCP dhcp_network_changed(netif); #endif /* LWIP_DHCP */ #if LWIP_AUTOIP autoip_network_changed(netif); #endif /* LWIP_AUTOIP */ if (netif->flags & NETIF_FLAG_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } NETIF_LINK_CALLBACK(netif); } } /** * @ingroup netif * Called by a driver when its link goes down */ void netif_set_link_down(struct netif *netif ) { if (netif->flags & NETIF_FLAG_LINK_UP) { netif->flags &= ~NETIF_FLAG_LINK_UP; NETIF_LINK_CALLBACK(netif); } } #if LWIP_NETIF_LINK_CALLBACK /** * @ingroup netif * Set callback to be called when link is brought up/down */ void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) { if (netif) { netif->link_callback = link_callback; } } #endif /* LWIP_NETIF_LINK_CALLBACK */ #if ENABLE_LOOPBACK /** * @ingroup netif * Send an IP packet to be received on the same netif (loopif-like). * The pbuf is simply copied and handed back to netif->input. * In multithreaded mode, this is done directly since netif->input must put * the packet on a queue. * In callback mode, the packet is put on an internal queue and is fed to * netif->input by netif_poll(). * * @param netif the lwip network interface structure * @param p the (IP) packet to 'send' * @return ERR_OK if the packet has been sent * ERR_MEM if the pbuf used to copy the packet couldn't be allocated */ err_t netif_loop_output(struct netif *netif, struct pbuf *p) { struct pbuf *r; err_t err; struct pbuf *last; #if LWIP_LOOPBACK_MAX_PBUFS u16_t clen = 0; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); /* Allocate a new pbuf */ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); if (r == NULL) { LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } #if LWIP_LOOPBACK_MAX_PBUFS clen = pbuf_clen(r); /* check for overflow or too many pbuf on queue */ if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } netif->loop_cnt_current += clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* Copy the whole pbuf queue p into the single pbuf r */ if ((err = pbuf_copy(r, p)) != ERR_OK) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return err; } /* Put the packet on a linked list which gets emptied through calling netif_poll(). */ /* let last point to the last pbuf in chain r */ for (last = r; last->next != NULL; last = last->next); SYS_ARCH_PROTECT(lev); if (netif->loop_first != NULL) { LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); netif->loop_last->next = r; netif->loop_last = last; } else { netif->loop_first = r; netif->loop_last = last; } SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.xmit); MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); #if LWIP_NETIF_LOOPBACK_MULTITHREADING /* For multithreading environment, schedule a call to netif_poll */ tcpip_callback_with_block((tcpip_callback_fn)netif_poll, netif, 0); #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ return ERR_OK; } #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV6 */ #endif /* LWIP_HAVE_LOOPIF */ /** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { struct pbuf *in; /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); do { /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); in = netif->loop_first; if (in != NULL) { struct pbuf *in_end = in; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = 1; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ while (in_end->len != in_end->tot_len) { LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); in_end = in_end->next; #if LWIP_LOOPBACK_MAX_PBUFS clen++; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ } #if LWIP_LOOPBACK_MAX_PBUFS /* adjust the number of pbufs on queue */ LWIP_ASSERT("netif->loop_cnt_current underflow", ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* 'in_end' now points to the last pbuf from 'in' */ if (in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; } SYS_ARCH_UNPROTECT(lev); if (in != NULL) { LINK_STATS_INC(link.recv); MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); /* loopback packets are always IP packets! */ if (ip_input(in, netif) != ERR_OK) { pbuf_free(in); } /* Don't reference the packet any more! */ in = NULL; } /* go on while there is a packet on the list */ } while (netif->loop_first != NULL); } #if !LWIP_NETIF_LOOPBACK_MULTITHREADING /** * Calls netif_poll() for every netif on the netif_list. */ void netif_poll_all(void) { struct netif *netif = netif_list; /* loop through netifs */ while (netif != NULL) { netif_poll(netif); /* proceed to next network interface */ netif = netif->next; } } #endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #if LWIP_NUM_NETIF_CLIENT_DATA > 0 /** * @ingroup netif_cd * Allocate an index to store data in client_data member of struct netif. * Returned value is an index in mentioned array. * @see LWIP_NUM_NETIF_CLIENT_DATA */ u8_t netif_alloc_client_data_id(void) { u8_t result = netif_client_id; netif_client_id++; LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); return result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX; } #endif #if LWIP_IPV6 /** * @ingroup netif_ip6 * Change an IPv6 address of a network interface * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param addr6 the new IPv6 address * * @note call netif_ip6_addr_set_state() to set the address valid/temptative */ void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) { LWIP_ASSERT("addr6 != NULL", addr6 != NULL); netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], addr6->addr[2], addr6->addr[3]); } /* * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param i0 word0 of the new IPv6 address * @param i1 word1 of the new IPv6 address * @param i2 word2 of the new IPv6 address * @param i3 word3 of the new IPv6 address */ void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) { const ip6_addr_t *old_addr; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_addr = netif_ip6_addr(netif, addr_idx); /* address is actually being changed? */ if ((old_addr->addr[0] != i0) || (old_addr->addr[1] != i1) || (old_addr->addr[2] != i2) || (old_addr->addr[3] != i3)) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { #if LWIP_TCP || LWIP_UDP ip_addr_t new_ipaddr; IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); #endif /* LWIP_TCP || LWIP_UDP */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_RAW */ } /* @todo: remove/readd mib2 ip6 entries? */ IP6_ADDR(ip_2_ip6(&(netif->ip6_addr[addr_idx])), i0, i1, i2, i3); IP_SET_TYPE_VAL(netif->ip6_addr[addr_idx], IPADDR_TYPE_V6); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * @ingroup netif_ip6 * Change the state of an IPv6 address of a network interface * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE * includes the number of checks done, see ip6_addr.h) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param state the new IPv6 address state */ void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state) { u8_t old_state; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_state = netif_ip6_addr_state(netif, addr_idx); /* state is actually being changed? */ if (old_state != state) { u8_t old_valid = old_state & IP6_ADDR_VALID; u8_t new_valid = state & IP6_ADDR_VALID; LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); if (old_valid && !new_valid) { /* address about to be removed by setting invalid */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_RAW */ /* @todo: remove mib2 ip6 entries? */ } netif->ip6_addr_state[addr_idx] = state; if (!old_valid && new_valid) { /* address added by setting valid */ /* @todo: add mib2 ip6 entries? */ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); } if ((old_state & IP6_ADDR_PREFERRED) != (state & IP6_ADDR_PREFERRED)) { /* address state has changed (valid flag changed or switched between preferred and deprecated) -> call the callback function */ NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * Checks if a specific address is assigned to the netif and returns its * index. * * @param netif the netif to check * @param ip6addr the IPv6 address to find * @return >= 0: address found, this is its index * -1: address not found on this netif */ s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) { s8_t i; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(netif_ip6_addr(netif, i), ip6addr)) { return i; } } return -1; } /** * @ingroup netif_ip6 * Create a link-local IPv6 address on a netif (stored in slot 0) * * @param netif the netif to create the address on * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) * if == 0, use hwaddr directly as interface ID */ void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) { u8_t i, addr_index; /* Link-local prefix. */ ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; /* Generate interface ID. */ if (from_mac_48bit) { /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | ((u32_t)(netif->hwaddr[1]) << 16) | ((u32_t)(netif->hwaddr[2]) << 8) | (0xff)); ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((0xfeul << 24) | ((u32_t)(netif->hwaddr[3]) << 16) | ((u32_t)(netif->hwaddr[4]) << 8) | (netif->hwaddr[5])); } else { /* Use hwaddr directly as interface ID. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; addr_index = 3; for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { if (i == 4) { addr_index--; } ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= ((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03)); } } /* Set address state. */ #if LWIP_IPV6_DUP_DETECT_ATTEMPTS /* Will perform duplicate address detection (DAD). */ netif->ip6_addr_state[0] = IP6_ADDR_TENTATIVE; #else /* Consider address valid. */ netif->ip6_addr_state[0] = IP6_ADDR_PREFERRED; #endif /* LWIP_IPV6_AUTOCONFIG */ } /** * @ingroup netif_ip6 * This function allows for the easy addition of a new IPv6 address to an interface. * It takes care of finding an empty slot and then sets the address tentative * (to make sure that all the subsequent processing happens). * * @param netif netif to add the address on * @param ip6addr address to add * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here */ err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) { s8_t i; i = netif_get_ip6_addr_match(netif, ip6addr); if (i >= 0) { /* Address already added */ if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } /* Find a free slot -- musn't be the first one (reserved for link local) */ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (!ip6_addr_isvalid(netif->ip6_addr_state[i])) { ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } } if (chosen_idx != NULL) { *chosen_idx = -1; } return ERR_VAL; } /** Dummy IPv6 output function for netifs not supporting IPv6 */ static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) { LWIP_UNUSED_ARG(netif); LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(ipaddr); return ERR_IF; } #endif /* LWIP_IPV6 */
chinesebear/rtt-net
rtt-2.1/components/net/lwip-2.0.0/src/core/netif.c
C
mit
37,448
/* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2014 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <assert.h> #include <string.h> #include "py/nlr.h" #include "py/runtime.h" #if MICROPY_PY_UHASHLIB #include "crypto-algorithms/sha256.h" #if MICROPY_PY_UHASHLIB_SHA1 #include "lib/axtls/crypto/crypto.h" #endif typedef struct _mp_obj_hash_t { mp_obj_base_t base; char state[0]; } mp_obj_hash_t; STATIC mp_obj_t hash_update(mp_obj_t self_in, mp_obj_t arg); STATIC mp_obj_t hash_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) { mp_arg_check_num(n_args, n_kw, 0, 1, false); mp_obj_hash_t *o = m_new_obj_var(mp_obj_hash_t, char, sizeof(CRYAL_SHA256_CTX)); o->base.type = type; sha256_init((CRYAL_SHA256_CTX*)o->state); if (n_args == 1) { hash_update(MP_OBJ_FROM_PTR(o), args[0]); } return MP_OBJ_FROM_PTR(o); } #if MICROPY_PY_UHASHLIB_SHA1 STATIC mp_obj_t sha1_update(mp_obj_t self_in, mp_obj_t arg); STATIC mp_obj_t sha1_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) { mp_arg_check_num(n_args, n_kw, 0, 1, false); mp_obj_hash_t *o = m_new_obj_var(mp_obj_hash_t, char, sizeof(SHA1_CTX)); o->base.type = type; SHA1_Init((SHA1_CTX*)o->state); if (n_args == 1) { sha1_update(MP_OBJ_FROM_PTR(o), args[0]); } return MP_OBJ_FROM_PTR(o); } #endif STATIC mp_obj_t hash_update(mp_obj_t self_in, mp_obj_t arg) { mp_obj_hash_t *self = MP_OBJ_TO_PTR(self_in); mp_buffer_info_t bufinfo; mp_get_buffer_raise(arg, &bufinfo, MP_BUFFER_READ); sha256_update((CRYAL_SHA256_CTX*)self->state, bufinfo.buf, bufinfo.len); return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_2(hash_update_obj, hash_update); #if MICROPY_PY_UHASHLIB_SHA1 STATIC mp_obj_t sha1_update(mp_obj_t self_in, mp_obj_t arg) { mp_obj_hash_t *self = MP_OBJ_TO_PTR(self_in); mp_buffer_info_t bufinfo; mp_get_buffer_raise(arg, &bufinfo, MP_BUFFER_READ); SHA1_Update((SHA1_CTX*)self->state, bufinfo.buf, bufinfo.len); return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_2(sha1_update_obj, sha1_update); #endif STATIC mp_obj_t hash_digest(mp_obj_t self_in) { mp_obj_hash_t *self = MP_OBJ_TO_PTR(self_in); vstr_t vstr; vstr_init_len(&vstr, SHA256_BLOCK_SIZE); sha256_final((CRYAL_SHA256_CTX*)self->state, (byte*)vstr.buf); return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr); } MP_DEFINE_CONST_FUN_OBJ_1(hash_digest_obj, hash_digest); #if MICROPY_PY_UHASHLIB_SHA1 STATIC mp_obj_t sha1_digest(mp_obj_t self_in) { mp_obj_hash_t *self = MP_OBJ_TO_PTR(self_in); vstr_t vstr; vstr_init_len(&vstr, SHA1_SIZE); SHA1_Final((byte*)vstr.buf, (SHA1_CTX*)self->state); return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr); } MP_DEFINE_CONST_FUN_OBJ_1(sha1_digest_obj, sha1_digest); #endif STATIC const mp_rom_map_elem_t hash_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&hash_update_obj) }, { MP_ROM_QSTR(MP_QSTR_digest), MP_ROM_PTR(&hash_digest_obj) }, }; STATIC MP_DEFINE_CONST_DICT(hash_locals_dict, hash_locals_dict_table); STATIC const mp_obj_type_t sha256_type = { { &mp_type_type }, .name = MP_QSTR_sha256, .make_new = hash_make_new, .locals_dict = (void*)&hash_locals_dict, }; #if MICROPY_PY_UHASHLIB_SHA1 STATIC const mp_rom_map_elem_t sha1_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&sha1_update_obj) }, { MP_ROM_QSTR(MP_QSTR_digest), MP_ROM_PTR(&sha1_digest_obj) }, }; STATIC MP_DEFINE_CONST_DICT(sha1_locals_dict, sha1_locals_dict_table); STATIC const mp_obj_type_t sha1_type = { { &mp_type_type }, .name = MP_QSTR_sha1, .make_new = sha1_make_new, .locals_dict = (void*)&sha1_locals_dict, }; #endif STATIC const mp_rom_map_elem_t mp_module_hashlib_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_uhashlib) }, { MP_ROM_QSTR(MP_QSTR_sha256), MP_ROM_PTR(&sha256_type) }, #if MICROPY_PY_UHASHLIB_SHA1 { MP_ROM_QSTR(MP_QSTR_sha1), MP_ROM_PTR(&sha1_type) }, #endif }; STATIC MP_DEFINE_CONST_DICT(mp_module_hashlib_globals, mp_module_hashlib_globals_table); const mp_obj_module_t mp_module_uhashlib = { .base = { &mp_type_module }, .globals = (mp_obj_dict_t*)&mp_module_hashlib_globals, }; #include "crypto-algorithms/sha256.c" #endif //MICROPY_PY_UHASHLIB
ignorabimus/micropython-c-api
micropython/extmod/moduhashlib.c
C
mit
5,524
/* * ath79-mbox.c -- ALSA MBOX DMA management functions * * Copyright (c) 2013 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/dmapool.h> #include <linux/delay.h> #include <sound/core.h> #include <asm/mach-ath79/ar71xx_regs.h> #include <asm/mach-ath79/ath79.h> #include "ath79-pcm.h" #include "ath79-i2s.h" spinlock_t ath79_pcm_lock; static struct dma_pool *ath79_pcm_cache; void ath79_mbox_reset(void) { u32 t; spin_lock(&ath79_pcm_lock); t = ath79_reset_rr(AR934X_RESET_REG_RESET_MODULE); t |= AR934X_RESET_MBOX; ath79_reset_wr(AR934X_RESET_REG_RESET_MODULE, t); udelay(50); t &= ~(AR934X_RESET_MBOX); ath79_reset_wr(AR934X_RESET_REG_RESET_MODULE, t); spin_unlock(&ath79_pcm_lock); } void ath79_mbox_fifo_reset(u32 mask) { ath79_dma_wr(AR934X_DMA_REG_MBOX_FIFO_RESET, mask); udelay(50); /* Datasheet says we should reset the stereo controller whenever * we reset the MBOX DMA controller */ ath79_stereo_reset(); } void ath79_mbox_interrupt_enable(u32 mask) { u32 t; spin_lock(&ath79_pcm_lock); t = ath79_dma_rr(AR934X_DMA_REG_MBOX_INT_ENABLE); t |= mask; ath79_dma_wr(AR934X_DMA_REG_MBOX_INT_ENABLE, t); spin_unlock(&ath79_pcm_lock); } void ath79_mbox_interrupt_ack(u32 mask) { ath79_dma_wr(AR934X_DMA_REG_MBOX_INT_STATUS, mask); ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_STATUS, ~(MISC_INT_DMA)); /* Flush these two registers */ ath79_dma_rr(AR934X_DMA_REG_MBOX_INT_STATUS); ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_STATUS); } void ath79_mbox_dma_start(struct ath79_pcm_rt_priv *rtpriv) { if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) { ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL, AR934X_DMA_MBOX_DMA_CONTROL_START); ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL); } else { ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL, AR934X_DMA_MBOX_DMA_CONTROL_START); ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL); } } void ath79_mbox_dma_stop(struct ath79_pcm_rt_priv *rtpriv) { if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) { ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL, AR934X_DMA_MBOX_DMA_CONTROL_STOP); ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_RX_CONTROL); } else { ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL, AR934X_DMA_MBOX_DMA_CONTROL_STOP); ath79_dma_rr(AR934X_DMA_REG_MBOX0_DMA_TX_CONTROL); } /* Delay for the dynamically calculated max time based on sample size, channel, sample rate + margin to ensure that the DMA engine will be truly idle. */ mdelay(rtpriv->delay_time); } void ath79_mbox_dma_reset(void) { ath79_mbox_reset(); ath79_mbox_fifo_reset(AR934X_DMA_MBOX0_FIFO_RESET_RX | AR934X_DMA_MBOX0_FIFO_RESET_TX); } void ath79_mbox_dma_prepare(struct ath79_pcm_rt_priv *rtpriv) { struct ath79_pcm_desc *desc; u32 t; if (rtpriv->direction == SNDRV_PCM_STREAM_PLAYBACK) { /* Request the DMA channel to the controller */ t = ath79_dma_rr(AR934X_DMA_REG_MBOX_DMA_POLICY); ath79_dma_wr(AR934X_DMA_REG_MBOX_DMA_POLICY, t | AR934X_DMA_MBOX_DMA_POLICY_RX_QUANTUM | (6 << AR934X_DMA_MBOX_DMA_POLICY_TX_FIFO_THRESH_SHIFT)); /* The direction is indicated from the DMA engine perspective * i.e. we'll be using the RX registers for Playback and * the TX registers for capture */ desc = list_first_entry(&rtpriv->dma_head, struct ath79_pcm_desc, list); ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_RX_DESCRIPTOR_BASE, (u32) desc->phys); ath79_mbox_interrupt_enable(AR934X_DMA_MBOX0_INT_RX_COMPLETE); } else { /* Request the DMA channel to the controller */ t = ath79_dma_rr(AR934X_DMA_REG_MBOX_DMA_POLICY); ath79_dma_wr(AR934X_DMA_REG_MBOX_DMA_POLICY, t | AR934X_DMA_MBOX_DMA_POLICY_TX_QUANTUM | (6 << AR934X_DMA_MBOX_DMA_POLICY_TX_FIFO_THRESH_SHIFT)); desc = list_first_entry(&rtpriv->dma_head, struct ath79_pcm_desc, list); ath79_dma_wr(AR934X_DMA_REG_MBOX0_DMA_TX_DESCRIPTOR_BASE, (u32) desc->phys); ath79_mbox_interrupt_enable(AR934X_DMA_MBOX0_INT_TX_COMPLETE); } } int ath79_mbox_dma_map(struct ath79_pcm_rt_priv *rtpriv, dma_addr_t baseaddr, int period_bytes,int bufsize) { struct list_head *head = &rtpriv->dma_head; struct ath79_pcm_desc *desc, *prev; dma_addr_t desc_p; unsigned int offset = 0; spin_lock(&ath79_pcm_lock); rtpriv->elapsed_size = 0; /* We loop until we have enough buffers to map the requested DMA area */ do { /* Allocate a descriptor and insert it into the DMA ring */ desc = dma_pool_alloc(ath79_pcm_cache, GFP_KERNEL, &desc_p); if(!desc) { return -ENOMEM; } memset(desc, 0, sizeof(struct ath79_pcm_desc)); desc->phys = desc_p; list_add_tail(&desc->list, head); desc->OWN = 1; desc->rsvd1 = desc->rsvd2 = desc->rsvd3 = desc->EOM = 0; /* buffer size may not be a multiple of period_bytes */ if (bufsize >= offset + period_bytes) { desc->size = period_bytes; } else { desc->size = bufsize - offset; } desc->BufPtr = baseaddr + offset; /* For now, we assume the buffer is always full * -->length == size */ desc->length = desc->size; /* We need to make sure we are not the first descriptor. * If we are, prev doesn't point to a struct ath79_pcm_desc */ if (desc->list.prev != head) { prev = list_entry(desc->list.prev, struct ath79_pcm_desc, list); prev->NextPtr = desc->phys; } offset += desc->size; } while (offset < bufsize); /* Once all the descriptors have been created, we can close the loop * by pointing from the last one to the first one */ desc = list_first_entry(head, struct ath79_pcm_desc, list); prev = list_entry(head->prev, struct ath79_pcm_desc, list); prev->NextPtr = desc->phys; spin_unlock(&ath79_pcm_lock); return 0; } void ath79_mbox_dma_unmap(struct ath79_pcm_rt_priv *rtpriv) { struct list_head *head = &rtpriv->dma_head; struct ath79_pcm_desc *desc, *n; spin_lock(&ath79_pcm_lock); list_for_each_entry_safe(desc, n, head, list) { list_del(&desc->list); dma_pool_free(ath79_pcm_cache, desc, desc->phys); } spin_unlock(&ath79_pcm_lock); return; } int ath79_mbox_dma_init(struct device *dev) { int ret = 0; /* Allocate a DMA pool to store the MBOX descriptor */ ath79_pcm_cache = dma_pool_create("ath79_pcm_pool", dev, sizeof(struct ath79_pcm_desc), 4, 0); if (!ath79_pcm_cache) ret = -ENOMEM; return ret; } void ath79_mbox_dma_exit(void) { dma_pool_destroy(ath79_pcm_cache); ath79_pcm_cache = NULL; }
male-puppies/opwrt12
target/linux/ar71xx/files/sound/soc/ath79/ath79-mbox.c
C
gpl-2.0
7,215
/* PR target/95211 target/95256 */ /* { dg-do compile { target { ! ia32 } } } */ /* { dg-options "-O2 -ftree-slp-vectorize -march=skylake-avx512" } */ extern float f[4]; extern long long l[2]; extern long long ul[2]; void fix_128 (void) { l[0] = f[0]; l[1] = f[1]; } void fixuns_128 (void) { ul[0] = f[0]; ul[1] = f[1]; } void float_128 (void) { f[0] = l[0]; f[1] = l[1]; } void floatuns_128 (void) { f[0] = ul[0]; f[1] = ul[1]; } /* { dg-final { scan-assembler-times "vcvttps2qq" 2 } } */ /* { dg-final { scan-assembler-times "vcvtqq2ps" 2 } } */
Gurgel100/gcc
gcc/testsuite/gcc.target/i386/pr95211.c
C
gpl-2.0
570
// SPDX-License-Identifier: GPL-2.0 /* * Functions for working with device tree overlays * * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com> * Copyright (C) 2012 Texas Instruments Inc. */ #define pr_fmt(fmt) "OF: overlay: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_fdt.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/libfdt.h> #include <linux/err.h> #include <linux/idr.h> #include "of_private.h" /** * struct target - info about current target node as recursing through overlay * @np: node where current level of overlay will be applied * @in_livetree: @np is a node in the live devicetree * * Used in the algorithm to create the portion of a changeset that describes * an overlay fragment, which is a devicetree subtree. Initially @np is a node * in the live devicetree where the overlay subtree is targeted to be grafted * into. When recursing to the next level of the overlay subtree, the target * also recurses to the next level of the live devicetree, as long as overlay * subtree node also exists in the live devicetree. When a node in the overlay * subtree does not exist at the same level in the live devicetree, target->np * points to a newly allocated node, and all subsequent targets in the subtree * will be newly allocated nodes. */ struct target { struct device_node *np; bool in_livetree; }; /** * struct fragment - info about fragment nodes in overlay expanded device tree * @target: target of the overlay operation * @overlay: pointer to the __overlay__ node */ struct fragment { struct device_node *overlay; struct device_node *target; }; /** * struct overlay_changeset * @id: changeset identifier * @ovcs_list: list on which we are located * @fdt: FDT that was unflattened to create @overlay_tree * @overlay_tree: expanded device tree that contains the fragment nodes * @count: count of fragment structures * @fragments: fragment nodes in the overlay expanded device tree * @symbols_fragment: last element of @fragments[] is the __symbols__ node * @cset: changeset to apply fragments to live device tree */ struct overlay_changeset { int id; struct list_head ovcs_list; const void *fdt; struct device_node *overlay_tree; int count; struct fragment *fragments; bool symbols_fragment; struct of_changeset cset; }; /* flags are sticky - once set, do not reset */ static int devicetree_state_flags; #define DTSF_APPLY_FAIL 0x01 #define DTSF_REVERT_FAIL 0x02 /* * If a changeset apply or revert encounters an error, an attempt will * be made to undo partial changes, but may fail. If the undo fails * we do not know the state of the devicetree. */ static int devicetree_corrupt(void) { return devicetree_state_flags & (DTSF_APPLY_FAIL | DTSF_REVERT_FAIL); } static int build_changeset_next_level(struct overlay_changeset *ovcs, struct target *target, const struct device_node *overlay_node); /* * of_resolve_phandles() finds the largest phandle in the live tree. * of_overlay_apply() may add a larger phandle to the live tree. * Do not allow race between two overlays being applied simultaneously: * mutex_lock(&of_overlay_phandle_mutex) * of_resolve_phandles() * of_overlay_apply() * mutex_unlock(&of_overlay_phandle_mutex) */ static DEFINE_MUTEX(of_overlay_phandle_mutex); void of_overlay_mutex_lock(void) { mutex_lock(&of_overlay_phandle_mutex); } void of_overlay_mutex_unlock(void) { mutex_unlock(&of_overlay_phandle_mutex); } static LIST_HEAD(ovcs_list); static DEFINE_IDR(ovcs_idr); static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); /** * of_overlay_notifier_register() - Register notifier for overlay operations * @nb: Notifier block to register * * Register for notification on overlay operations on device tree nodes. The * reported actions definied by @of_reconfig_change. The notifier callback * furthermore receives a pointer to the affected device tree node. * * Note that a notifier callback is not supposed to store pointers to a device * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the * respective node it received. */ int of_overlay_notifier_register(struct notifier_block *nb) { return blocking_notifier_chain_register(&overlay_notify_chain, nb); } EXPORT_SYMBOL_GPL(of_overlay_notifier_register); /** * of_overlay_notifier_register() - Unregister notifier for overlay operations * @nb: Notifier block to unregister */ int of_overlay_notifier_unregister(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&overlay_notify_chain, nb); } EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister); static char *of_overlay_action_name[] = { "pre-apply", "post-apply", "pre-remove", "post-remove", }; static int overlay_notify(struct overlay_changeset *ovcs, enum of_overlay_notify_action action) { struct of_overlay_notify_data nd; int i, ret; for (i = 0; i < ovcs->count; i++) { struct fragment *fragment = &ovcs->fragments[i]; nd.target = fragment->target; nd.overlay = fragment->overlay; ret = blocking_notifier_call_chain(&overlay_notify_chain, action, &nd); if (ret == NOTIFY_OK || ret == NOTIFY_STOP) return 0; if (ret) { ret = notifier_to_errno(ret); pr_err("overlay changeset %s notifier error %d, target: %pOF\n", of_overlay_action_name[action], ret, nd.target); return ret; } } return 0; } /* * The values of properties in the "/__symbols__" node are paths in * the ovcs->overlay_tree. When duplicating the properties, the paths * need to be adjusted to be the correct path for the live device tree. * * The paths refer to a node in the subtree of a fragment node's "__overlay__" * node, for example "/fragment@0/__overlay__/symbol_path_tail", * where symbol_path_tail can be a single node or it may be a multi-node path. * * The duplicated property value will be modified by replacing the * "/fragment_name/__overlay/" portion of the value with the target * path from the fragment node. */ static struct property *dup_and_fixup_symbol_prop( struct overlay_changeset *ovcs, const struct property *prop) { struct fragment *fragment; struct property *new_prop; struct device_node *fragment_node; struct device_node *overlay_node; const char *path; const char *path_tail; const char *target_path; int k; int overlay_name_len; int path_len; int path_tail_len; int target_path_len; if (!prop->value) return NULL; if (strnlen(prop->value, prop->length) >= prop->length) return NULL; path = prop->value; path_len = strlen(path); if (path_len < 1) return NULL; fragment_node = __of_find_node_by_path(ovcs->overlay_tree, path + 1); overlay_node = __of_find_node_by_path(fragment_node, "__overlay__/"); of_node_put(fragment_node); of_node_put(overlay_node); for (k = 0; k < ovcs->count; k++) { fragment = &ovcs->fragments[k]; if (fragment->overlay == overlay_node) break; } if (k >= ovcs->count) return NULL; overlay_name_len = snprintf(NULL, 0, "%pOF", fragment->overlay); if (overlay_name_len > path_len) return NULL; path_tail = path + overlay_name_len; path_tail_len = strlen(path_tail); target_path = kasprintf(GFP_KERNEL, "%pOF", fragment->target); if (!target_path) return NULL; target_path_len = strlen(target_path); new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); if (!new_prop) goto err_free_target_path; new_prop->name = kstrdup(prop->name, GFP_KERNEL); new_prop->length = target_path_len + path_tail_len + 1; new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); if (!new_prop->name || !new_prop->value) goto err_free_new_prop; strcpy(new_prop->value, target_path); strcpy(new_prop->value + target_path_len, path_tail); of_property_set_flag(new_prop, OF_DYNAMIC); return new_prop; err_free_new_prop: kfree(new_prop->name); kfree(new_prop->value); kfree(new_prop); err_free_target_path: kfree(target_path); return NULL; } /** * add_changeset_property() - add @overlay_prop to overlay changeset * @ovcs: overlay changeset * @target: where @overlay_prop will be placed * @overlay_prop: property to add or update, from overlay tree * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__" * * If @overlay_prop does not already exist in live devicetree, add changeset * entry to add @overlay_prop in @target, else add changeset entry to update * value of @overlay_prop. * * @target may be either in the live devicetree or in a new subtree that * is contained in the changeset. * * Some special properties are not added or updated (no error returned): * "name", "phandle", "linux,phandle". * * Properties "#address-cells" and "#size-cells" are not updated if they * are already in the live tree, but if present in the live tree, the values * in the overlay must match the values in the live tree. * * Update of property in symbols node is not allowed. * * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if * invalid @overlay. */ static int add_changeset_property(struct overlay_changeset *ovcs, struct target *target, struct property *overlay_prop, bool is_symbols_prop) { struct property *new_prop = NULL, *prop; int ret = 0; if (target->in_livetree) if (!of_prop_cmp(overlay_prop->name, "name") || !of_prop_cmp(overlay_prop->name, "phandle") || !of_prop_cmp(overlay_prop->name, "linux,phandle")) return 0; if (target->in_livetree) prop = of_find_property(target->np, overlay_prop->name, NULL); else prop = NULL; if (prop) { if (!of_prop_cmp(prop->name, "#address-cells")) { if (!of_prop_val_eq(prop, overlay_prop)) { pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", target->np); ret = -EINVAL; } return ret; } else if (!of_prop_cmp(prop->name, "#size-cells")) { if (!of_prop_val_eq(prop, overlay_prop)) { pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", target->np); ret = -EINVAL; } return ret; } } if (is_symbols_prop) { if (prop) return -EINVAL; new_prop = dup_and_fixup_symbol_prop(ovcs, overlay_prop); } else { new_prop = __of_prop_dup(overlay_prop, GFP_KERNEL); } if (!new_prop) return -ENOMEM; if (!prop) { if (!target->in_livetree) { new_prop->next = target->np->deadprops; target->np->deadprops = new_prop; } ret = of_changeset_add_property(&ovcs->cset, target->np, new_prop); } else { ret = of_changeset_update_property(&ovcs->cset, target->np, new_prop); } if (!of_node_check_flag(target->np, OF_OVERLAY)) pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", target->np, new_prop->name); if (ret) { kfree(new_prop->name); kfree(new_prop->value); kfree(new_prop); } return ret; } /** * add_changeset_node() - add @node (and children) to overlay changeset * @ovcs: overlay changeset * @target: where @node will be placed in live tree or changeset * @node: node from within overlay device tree fragment * * If @node does not already exist in @target, add changeset entry * to add @node in @target. * * If @node already exists in @target, and the existing node has * a phandle, the overlay node is not allowed to have a phandle. * * If @node has child nodes, add the children recursively via * build_changeset_next_level(). * * NOTE_1: A live devicetree created from a flattened device tree (FDT) will * not contain the full path in node->full_name. Thus an overlay * created from an FDT also will not contain the full path in * node->full_name. However, a live devicetree created from Open * Firmware may have the full path in node->full_name. * * add_changeset_node() follows the FDT convention and does not include * the full path in node->full_name. Even though it expects the overlay * to not contain the full path, it uses kbasename() to remove the * full path should it exist. It also uses kbasename() in comparisons * to nodes in the live devicetree so that it can apply an overlay to * a live devicetree created from Open Firmware. * * NOTE_2: Multiple mods of created nodes not supported. * * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if * invalid @overlay. */ static int add_changeset_node(struct overlay_changeset *ovcs, struct target *target, struct device_node *node) { const char *node_kbasename; const __be32 *phandle; struct device_node *tchild; struct target target_child; int ret = 0, size; node_kbasename = kbasename(node->full_name); for_each_child_of_node(target->np, tchild) if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name))) break; if (!tchild) { tchild = __of_node_dup(NULL, node_kbasename); if (!tchild) return -ENOMEM; tchild->parent = target->np; tchild->name = __of_get_property(node, "name", NULL); if (!tchild->name) tchild->name = "<NULL>"; /* ignore obsolete "linux,phandle" */ phandle = __of_get_property(node, "phandle", &size); if (phandle && (size == 4)) tchild->phandle = be32_to_cpup(phandle); of_node_set_flag(tchild, OF_OVERLAY); ret = of_changeset_attach_node(&ovcs->cset, tchild); if (ret) return ret; target_child.np = tchild; target_child.in_livetree = false; ret = build_changeset_next_level(ovcs, &target_child, node); of_node_put(tchild); return ret; } if (node->phandle && tchild->phandle) { ret = -EINVAL; } else { target_child.np = tchild; target_child.in_livetree = target->in_livetree; ret = build_changeset_next_level(ovcs, &target_child, node); } of_node_put(tchild); return ret; } /** * build_changeset_next_level() - add level of overlay changeset * @ovcs: overlay changeset * @target: where to place @overlay_node in live tree * @overlay_node: node from within an overlay device tree fragment * * Add the properties (if any) and nodes (if any) from @overlay_node to the * @ovcs->cset changeset. If an added node has child nodes, they will * be added recursively. * * Do not allow symbols node to have any children. * * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if * invalid @overlay_node. */ static int build_changeset_next_level(struct overlay_changeset *ovcs, struct target *target, const struct device_node *overlay_node) { struct device_node *child; struct property *prop; int ret; for_each_property_of_node(overlay_node, prop) { ret = add_changeset_property(ovcs, target, prop, 0); if (ret) { pr_debug("Failed to apply prop @%pOF/%s, err=%d\n", target->np, prop->name, ret); return ret; } } for_each_child_of_node(overlay_node, child) { ret = add_changeset_node(ovcs, target, child); if (ret) { pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n", target->np, child, ret); of_node_put(child); return ret; } } return 0; } /* * Add the properties from __overlay__ node to the @ovcs->cset changeset. */ static int build_changeset_symbols_node(struct overlay_changeset *ovcs, struct target *target, const struct device_node *overlay_symbols_node) { struct property *prop; int ret; for_each_property_of_node(overlay_symbols_node, prop) { ret = add_changeset_property(ovcs, target, prop, 1); if (ret) { pr_debug("Failed to apply symbols prop @%pOF/%s, err=%d\n", target->np, prop->name, ret); return ret; } } return 0; } static int find_dup_cset_node_entry(struct overlay_changeset *ovcs, struct of_changeset_entry *ce_1) { struct of_changeset_entry *ce_2; char *fn_1, *fn_2; int node_path_match; if (ce_1->action != OF_RECONFIG_ATTACH_NODE && ce_1->action != OF_RECONFIG_DETACH_NODE) return 0; ce_2 = ce_1; list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { if ((ce_2->action != OF_RECONFIG_ATTACH_NODE && ce_2->action != OF_RECONFIG_DETACH_NODE) || of_node_cmp(ce_1->np->full_name, ce_2->np->full_name)) continue; fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); node_path_match = !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match) { pr_err("ERROR: multiple fragments add and/or delete node %pOF\n", ce_1->np); return -EINVAL; } } return 0; } static int find_dup_cset_prop(struct overlay_changeset *ovcs, struct of_changeset_entry *ce_1) { struct of_changeset_entry *ce_2; char *fn_1, *fn_2; int node_path_match; if (ce_1->action != OF_RECONFIG_ADD_PROPERTY && ce_1->action != OF_RECONFIG_REMOVE_PROPERTY && ce_1->action != OF_RECONFIG_UPDATE_PROPERTY) return 0; ce_2 = ce_1; list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { if ((ce_2->action != OF_RECONFIG_ADD_PROPERTY && ce_2->action != OF_RECONFIG_REMOVE_PROPERTY && ce_2->action != OF_RECONFIG_UPDATE_PROPERTY) || of_node_cmp(ce_1->np->full_name, ce_2->np->full_name)) continue; fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); node_path_match = !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match && !of_prop_cmp(ce_1->prop->name, ce_2->prop->name)) { pr_err("ERROR: multiple fragments add, update, and/or delete property %pOF/%s\n", ce_1->np, ce_1->prop->name); return -EINVAL; } } return 0; } /** * changeset_dup_entry_check() - check for duplicate entries * @ovcs: Overlay changeset * * Check changeset @ovcs->cset for multiple {add or delete} node entries for * the same node or duplicate {add, delete, or update} properties entries * for the same property. * * Returns 0 on success, or -EINVAL if duplicate changeset entry found. */ static int changeset_dup_entry_check(struct overlay_changeset *ovcs) { struct of_changeset_entry *ce_1; int dup_entry = 0; list_for_each_entry(ce_1, &ovcs->cset.entries, node) { dup_entry |= find_dup_cset_node_entry(ovcs, ce_1); dup_entry |= find_dup_cset_prop(ovcs, ce_1); } return dup_entry ? -EINVAL : 0; } /** * build_changeset() - populate overlay changeset in @ovcs from @ovcs->fragments * @ovcs: Overlay changeset * * Create changeset @ovcs->cset to contain the nodes and properties of the * overlay device tree fragments in @ovcs->fragments[]. If an error occurs, * any portions of the changeset that were successfully created will remain * in @ovcs->cset. * * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if * invalid overlay in @ovcs->fragments[]. */ static int build_changeset(struct overlay_changeset *ovcs) { struct fragment *fragment; struct target target; int fragments_count, i, ret; /* * if there is a symbols fragment in ovcs->fragments[i] it is * the final element in the array */ if (ovcs->symbols_fragment) fragments_count = ovcs->count - 1; else fragments_count = ovcs->count; for (i = 0; i < fragments_count; i++) { fragment = &ovcs->fragments[i]; target.np = fragment->target; target.in_livetree = true; ret = build_changeset_next_level(ovcs, &target, fragment->overlay); if (ret) { pr_debug("fragment apply failed '%pOF'\n", fragment->target); return ret; } } if (ovcs->symbols_fragment) { fragment = &ovcs->fragments[ovcs->count - 1]; target.np = fragment->target; target.in_livetree = true; ret = build_changeset_symbols_node(ovcs, &target, fragment->overlay); if (ret) { pr_debug("symbols fragment apply failed '%pOF'\n", fragment->target); return ret; } } return changeset_dup_entry_check(ovcs); } /* * Find the target node using a number of different strategies * in order of preference: * * 1) "target" property containing the phandle of the target * 2) "target-path" property containing the path of the target */ static struct device_node *find_target(struct device_node *info_node) { struct device_node *node; const char *path; u32 val; int ret; ret = of_property_read_u32(info_node, "target", &val); if (!ret) { node = of_find_node_by_phandle(val); if (!node) pr_err("find target, node: %pOF, phandle 0x%x not found\n", info_node, val); return node; } ret = of_property_read_string(info_node, "target-path", &path); if (!ret) { node = of_find_node_by_path(path); if (!node) pr_err("find target, node: %pOF, path '%s' not found\n", info_node, path); return node; } pr_err("find target, node: %pOF, no target property\n", info_node); return NULL; } /** * init_overlay_changeset() - initialize overlay changeset from overlay tree * @ovcs: Overlay changeset to build * @fdt: the FDT that was unflattened to create @tree * @tree: Contains all the overlay fragments and overlay fixup nodes * * Initialize @ovcs. Populate @ovcs->fragments with node information from * the top level of @tree. The relevant top level nodes are the fragment * nodes and the __symbols__ node. Any other top level node will be ignored. * * Returns 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error * detected in @tree, or -ENOSPC if idr_alloc() error. */ static int init_overlay_changeset(struct overlay_changeset *ovcs, const void *fdt, struct device_node *tree) { struct device_node *node, *overlay_node; struct fragment *fragment; struct fragment *fragments; int cnt, id, ret; /* * Warn for some issues. Can not return -EINVAL for these until * of_unittest_apply_overlay() is fixed to pass these checks. */ if (!of_node_check_flag(tree, OF_DYNAMIC)) pr_debug("%s() tree is not dynamic\n", __func__); if (!of_node_check_flag(tree, OF_DETACHED)) pr_debug("%s() tree is not detached\n", __func__); if (!of_node_is_root(tree)) pr_debug("%s() tree is not root\n", __func__); ovcs->overlay_tree = tree; ovcs->fdt = fdt; INIT_LIST_HEAD(&ovcs->ovcs_list); of_changeset_init(&ovcs->cset); id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL); if (id <= 0) return id; cnt = 0; /* fragment nodes */ for_each_child_of_node(tree, node) { overlay_node = of_get_child_by_name(node, "__overlay__"); if (overlay_node) { cnt++; of_node_put(overlay_node); } } node = of_get_child_by_name(tree, "__symbols__"); if (node) { cnt++; of_node_put(node); } fragments = kcalloc(cnt, sizeof(*fragments), GFP_KERNEL); if (!fragments) { ret = -ENOMEM; goto err_free_idr; } cnt = 0; for_each_child_of_node(tree, node) { overlay_node = of_get_child_by_name(node, "__overlay__"); if (!overlay_node) continue; fragment = &fragments[cnt]; fragment->overlay = overlay_node; fragment->target = find_target(node); if (!fragment->target) { of_node_put(fragment->overlay); ret = -EINVAL; goto err_free_fragments; } cnt++; } /* * if there is a symbols fragment in ovcs->fragments[i] it is * the final element in the array */ node = of_get_child_by_name(tree, "__symbols__"); if (node) { ovcs->symbols_fragment = 1; fragment = &fragments[cnt]; fragment->overlay = node; fragment->target = of_find_node_by_path("/__symbols__"); if (!fragment->target) { pr_err("symbols in overlay, but not in live tree\n"); ret = -EINVAL; goto err_free_fragments; } cnt++; } if (!cnt) { pr_err("no fragments or symbols in overlay\n"); ret = -EINVAL; goto err_free_fragments; } ovcs->id = id; ovcs->count = cnt; ovcs->fragments = fragments; return 0; err_free_fragments: kfree(fragments); err_free_idr: idr_remove(&ovcs_idr, id); pr_err("%s() failed, ret = %d\n", __func__, ret); return ret; } static void free_overlay_changeset(struct overlay_changeset *ovcs) { int i; if (ovcs->cset.entries.next) of_changeset_destroy(&ovcs->cset); if (ovcs->id) idr_remove(&ovcs_idr, ovcs->id); for (i = 0; i < ovcs->count; i++) { of_node_put(ovcs->fragments[i].target); of_node_put(ovcs->fragments[i].overlay); } kfree(ovcs->fragments); /* * There should be no live pointers into ovcs->overlay_tree and * ovcs->fdt due to the policy that overlay notifiers are not allowed * to retain pointers into the overlay devicetree. */ kfree(ovcs->overlay_tree); kfree(ovcs->fdt); kfree(ovcs); } /* * internal documentation * * of_overlay_apply() - Create and apply an overlay changeset * @fdt: the FDT that was unflattened to create @tree * @tree: Expanded overlay device tree * @ovcs_id: Pointer to overlay changeset id * * Creates and applies an overlay changeset. * * If an error occurs in a pre-apply notifier, then no changes are made * to the device tree. * * A non-zero return value will not have created the changeset if error is from: * - parameter checks * - building the changeset * - overlay changeset pre-apply notifier * * If an error is returned by an overlay changeset pre-apply notifier * then no further overlay changeset pre-apply notifier will be called. * * A non-zero return value will have created the changeset if error is from: * - overlay changeset entry notifier * - overlay changeset post-apply notifier * * If an error is returned by an overlay changeset post-apply notifier * then no further overlay changeset post-apply notifier will be called. * * If more than one notifier returns an error, then the last notifier * error to occur is returned. * * If an error occurred while applying the overlay changeset, then an * attempt is made to revert any changes that were made to the * device tree. If there were any errors during the revert attempt * then the state of the device tree can not be determined, and any * following attempt to apply or remove an overlay changeset will be * refused. * * Returns 0 on success, or a negative error number. Overlay changeset * id is returned to *ovcs_id. */ static int of_overlay_apply(const void *fdt, struct device_node *tree, int *ovcs_id) { struct overlay_changeset *ovcs; int ret = 0, ret_revert, ret_tmp; /* * As of this point, fdt and tree belong to the overlay changeset. * overlay changeset code is responsible for freeing them. */ if (devicetree_corrupt()) { pr_err("devicetree state suspect, refuse to apply overlay\n"); kfree(fdt); kfree(tree); ret = -EBUSY; goto out; } ovcs = kzalloc(sizeof(*ovcs), GFP_KERNEL); if (!ovcs) { kfree(fdt); kfree(tree); ret = -ENOMEM; goto out; } of_overlay_mutex_lock(); mutex_lock(&of_mutex); ret = of_resolve_phandles(tree); if (ret) goto err_free_tree; ret = init_overlay_changeset(ovcs, fdt, tree); if (ret) goto err_free_tree; /* * after overlay_notify(), ovcs->overlay_tree related pointers may have * leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree; * and can not free fdt, aka ovcs->fdt */ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY); if (ret) { pr_err("overlay changeset pre-apply notify error %d\n", ret); goto err_free_overlay_changeset; } ret = build_changeset(ovcs); if (ret) goto err_free_overlay_changeset; ret_revert = 0; ret = __of_changeset_apply_entries(&ovcs->cset, &ret_revert); if (ret) { if (ret_revert) { pr_debug("overlay changeset revert error %d\n", ret_revert); devicetree_state_flags |= DTSF_APPLY_FAIL; } goto err_free_overlay_changeset; } of_populate_phandle_cache(); ret = __of_changeset_apply_notify(&ovcs->cset); if (ret) pr_err("overlay apply changeset entry notify error %d\n", ret); /* notify failure is not fatal, continue */ list_add_tail(&ovcs->ovcs_list, &ovcs_list); *ovcs_id = ovcs->id; ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_APPLY); if (ret_tmp) { pr_err("overlay changeset post-apply notify error %d\n", ret_tmp); if (!ret) ret = ret_tmp; } goto out_unlock; err_free_tree: kfree(fdt); kfree(tree); err_free_overlay_changeset: free_overlay_changeset(ovcs); out_unlock: mutex_unlock(&of_mutex); of_overlay_mutex_unlock(); out: pr_debug("%s() err=%d\n", __func__, ret); return ret; } int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id) { const void *new_fdt; int ret; u32 size; struct device_node *overlay_root; *ovcs_id = 0; ret = 0; if (overlay_fdt_size < sizeof(struct fdt_header) || fdt_check_header(overlay_fdt)) { pr_err("Invalid overlay_fdt header\n"); return -EINVAL; } size = fdt_totalsize(overlay_fdt); if (overlay_fdt_size < size) return -EINVAL; /* * Must create permanent copy of FDT because of_fdt_unflatten_tree() * will create pointers to the passed in FDT in the unflattened tree. */ new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL); if (!new_fdt) return -ENOMEM; of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root); if (!overlay_root) { pr_err("unable to unflatten overlay_fdt\n"); ret = -EINVAL; goto out_free_new_fdt; } ret = of_overlay_apply(new_fdt, overlay_root, ovcs_id); if (ret < 0) { /* * new_fdt and overlay_root now belong to the overlay * changeset. * overlay changeset code is responsible for freeing them. */ goto out; } return 0; out_free_new_fdt: kfree(new_fdt); out: return ret; } EXPORT_SYMBOL_GPL(of_overlay_fdt_apply); /* * Find @np in @tree. * * Returns 1 if @np is @tree or is contained in @tree, else 0 */ static int find_node(struct device_node *tree, struct device_node *np) { struct device_node *child; if (tree == np) return 1; for_each_child_of_node(tree, child) { if (find_node(child, np)) { of_node_put(child); return 1; } } return 0; } /* * Is @remove_ce_node a child of, a parent of, or the same as any * node in an overlay changeset more topmost than @remove_ovcs? * * Returns 1 if found, else 0 */ static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs, struct device_node *remove_ce_node) { struct overlay_changeset *ovcs; struct of_changeset_entry *ce; list_for_each_entry_reverse(ovcs, &ovcs_list, ovcs_list) { if (ovcs == remove_ovcs) break; list_for_each_entry(ce, &ovcs->cset.entries, node) { if (find_node(ce->np, remove_ce_node)) { pr_err("%s: #%d overlaps with #%d @%pOF\n", __func__, remove_ovcs->id, ovcs->id, remove_ce_node); return 1; } if (find_node(remove_ce_node, ce->np)) { pr_err("%s: #%d overlaps with #%d @%pOF\n", __func__, remove_ovcs->id, ovcs->id, remove_ce_node); return 1; } } } return 0; } /* * We can safely remove the overlay only if it's the top-most one. * Newly applied overlays are inserted at the tail of the overlay list, * so a top most overlay is the one that is closest to the tail. * * The topmost check is done by exploiting this property. For each * affected device node in the log list we check if this overlay is * the one closest to the tail. If another overlay has affected this * device node and is closest to the tail, then removal is not permited. */ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs) { struct of_changeset_entry *remove_ce; list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) { if (node_overlaps_later_cs(remove_ovcs, remove_ce->np)) { pr_err("overlay #%d is not topmost\n", remove_ovcs->id); return 0; } } return 1; } /** * of_overlay_remove() - Revert and free an overlay changeset * @ovcs_id: Pointer to overlay changeset id * * Removes an overlay if it is permissible. @ovcs_id was previously returned * by of_overlay_fdt_apply(). * * If an error occurred while attempting to revert the overlay changeset, * then an attempt is made to re-apply any changeset entry that was * reverted. If an error occurs on re-apply then the state of the device * tree can not be determined, and any following attempt to apply or remove * an overlay changeset will be refused. * * A non-zero return value will not revert the changeset if error is from: * - parameter checks * - overlay changeset pre-remove notifier * - overlay changeset entry revert * * If an error is returned by an overlay changeset pre-remove notifier * then no further overlay changeset pre-remove notifier will be called. * * If more than one notifier returns an error, then the last notifier * error to occur is returned. * * A non-zero return value will revert the changeset if error is from: * - overlay changeset entry notifier * - overlay changeset post-remove notifier * * If an error is returned by an overlay changeset post-remove notifier * then no further overlay changeset post-remove notifier will be called. * * Returns 0 on success, or a negative error number. *ovcs_id is set to * zero after reverting the changeset, even if a subsequent error occurs. */ int of_overlay_remove(int *ovcs_id) { struct overlay_changeset *ovcs; int ret, ret_apply, ret_tmp; ret = 0; if (devicetree_corrupt()) { pr_err("suspect devicetree state, refuse to remove overlay\n"); ret = -EBUSY; goto out; } mutex_lock(&of_mutex); ovcs = idr_find(&ovcs_idr, *ovcs_id); if (!ovcs) { ret = -ENODEV; pr_err("remove: Could not find overlay #%d\n", *ovcs_id); goto out_unlock; } if (!overlay_removal_is_ok(ovcs)) { ret = -EBUSY; goto out_unlock; } ret = overlay_notify(ovcs, OF_OVERLAY_PRE_REMOVE); if (ret) { pr_err("overlay changeset pre-remove notify error %d\n", ret); goto out_unlock; } list_del(&ovcs->ovcs_list); /* * Disable phandle cache. Avoids race condition that would arise * from removing cache entry when the associated node is deleted. */ of_free_phandle_cache(); ret_apply = 0; ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); of_populate_phandle_cache(); if (ret) { if (ret_apply) devicetree_state_flags |= DTSF_REVERT_FAIL; goto out_unlock; } ret = __of_changeset_revert_notify(&ovcs->cset); if (ret) pr_err("overlay remove changeset entry notify error %d\n", ret); /* notify failure is not fatal, continue */ *ovcs_id = 0; ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE); if (ret_tmp) { pr_err("overlay changeset post-remove notify error %d\n", ret_tmp); if (!ret) ret = ret_tmp; } free_overlay_changeset(ovcs); out_unlock: mutex_unlock(&of_mutex); out: pr_debug("%s() err=%d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(of_overlay_remove); /** * of_overlay_remove_all() - Reverts and frees all overlay changesets * * Removes all overlays from the system in the correct order. * * Returns 0 on success, or a negative error number */ int of_overlay_remove_all(void) { struct overlay_changeset *ovcs, *ovcs_n; int ret; /* the tail of list is guaranteed to be safe to remove */ list_for_each_entry_safe_reverse(ovcs, ovcs_n, &ovcs_list, ovcs_list) { ret = of_overlay_remove(&ovcs->id); if (ret) return ret; } return 0; } EXPORT_SYMBOL_GPL(of_overlay_remove_all);
Pingmin/linux
drivers/of/overlay.c
C
gpl-2.0
34,958
/* * Freescale SSI ALSA SoC Digital Audio Interface (DAI) driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2007-2015 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * * Some notes why imx-pcm-fiq is used instead of DMA on some boards: * * The i.MX SSI core has some nasty limitations in AC97 mode. While most * sane processor vendors have a FIFO per AC97 slot, the i.MX has only * one FIFO which combines all valid receive slots. We cannot even select * which slots we want to receive. The WM9712 with which this driver * was developed with always sends GPIO status data in slot 12 which * we receive in our (PCM-) data stream. The only chance we have is to * manually skip this data in the FIQ handler. With sampling rates different * from 48000Hz not every frame has valid receive data, so the ratio * between pcm data and GPIO status data changes. Our FIQ handler is not * able to handle this, hence this driver only works with 48000Hz sampling * rate. * Reading and writing AC97 registers is another challenge. The core * provides us status bits when the read register is updated with *another* * value. When we read the same register two times (and the register still * contains the same value) these status bits are not set. We work * around this by not polling these bits but only wait a fixed delay. */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <linux/busfreq-imx.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #include "fsl_ssi.h" #include "imx-pcm.h" /** * FSLSSI_I2S_RATES: sample rates supported by the I2S * * This driver currently only supports the SSI running in I2S slave mode, * which means the codec determines the sample rate. Therefore, we tell * ALSA that we support all rates and let the codec driver decide what rates * are really supported. */ #define FSLSSI_I2S_RATES SNDRV_PCM_RATE_CONTINUOUS /** * FSLSSI_I2S_FORMATS: audio formats supported by the SSI * * This driver currently only supports the SSI running in I2S slave mode. * * The SSI has a limitation in that the samples must be in the same byte * order as the host CPU. This is because when multiple bytes are written * to the STX register, the bytes and bits must be written in the same * order. The STX is a shift register, so all the bits need to be aligned * (bit-endianness must match byte-endianness). Processors typically write * the bits within a byte in the same order that the bytes of a word are * written in. So if the host CPU is big-endian, then only big-endian * samples will be written to STX properly. */ #ifdef __BIG_ENDIAN #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \ SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE) #else #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE) #endif #define FSLSSI_SIER_DBG_RX_FLAGS (CCSR_SSI_SIER_RFF0_EN | \ CCSR_SSI_SIER_RLS_EN | CCSR_SSI_SIER_RFS_EN | \ CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_RFRC_EN) #define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \ CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \ CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN) enum fsl_ssi_type { FSL_SSI_MCP8610, FSL_SSI_MX21, FSL_SSI_MX35, FSL_SSI_MX51, }; struct fsl_ssi_reg_val { u32 sier; u32 srcr; u32 stcr; u32 scr; }; struct fsl_ssi_rxtx_reg_val { struct fsl_ssi_reg_val rx; struct fsl_ssi_reg_val tx; }; static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg) { switch (reg) { case CCSR_SSI_STX0: case CCSR_SSI_STX1: case CCSR_SSI_SRX0: case CCSR_SSI_SRX1: case CCSR_SSI_SCR: case CCSR_SSI_SISR: case CCSR_SSI_SIER: case CCSR_SSI_STCR: case CCSR_SSI_SRCR: case CCSR_SSI_STCCR: case CCSR_SSI_SRCCR: case CCSR_SSI_SFCSR: case CCSR_SSI_STR: case CCSR_SSI_SOR: case CCSR_SSI_SACNT: case CCSR_SSI_SACADD: case CCSR_SSI_SACDAT: case CCSR_SSI_SATAG: case CCSR_SSI_STMSK: case CCSR_SSI_SRMSK: case CCSR_SSI_SACCST: case CCSR_SSI_SACCEN: case CCSR_SSI_SACCDIS: return true; default: return false; } } static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case CCSR_SSI_STX0: case CCSR_SSI_STX1: case CCSR_SSI_SRX0: case CCSR_SSI_SRX1: case CCSR_SSI_SISR: case CCSR_SSI_SFCSR: case CCSR_SSI_SACADD: case CCSR_SSI_SACDAT: case CCSR_SSI_SATAG: case CCSR_SSI_SACCST: return true; default: return false; } } static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case CCSR_SSI_STX0: case CCSR_SSI_STX1: case CCSR_SSI_SCR: case CCSR_SSI_SISR: case CCSR_SSI_SIER: case CCSR_SSI_STCR: case CCSR_SSI_SRCR: case CCSR_SSI_STCCR: case CCSR_SSI_SRCCR: case CCSR_SSI_SFCSR: case CCSR_SSI_STR: case CCSR_SSI_SOR: case CCSR_SSI_SACNT: case CCSR_SSI_SACADD: case CCSR_SSI_SACDAT: case CCSR_SSI_SATAG: case CCSR_SSI_STMSK: case CCSR_SSI_SRMSK: case CCSR_SSI_SACCEN: case CCSR_SSI_SACCDIS: return true; default: return false; } } static const struct regmap_config fsl_ssi_regconfig = { .max_register = CCSR_SSI_SACCDIS, .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .val_format_endian = REGMAP_ENDIAN_NATIVE, .readable_reg = fsl_ssi_readable_reg, .volatile_reg = fsl_ssi_volatile_reg, .writeable_reg = fsl_ssi_writeable_reg, .cache_type = REGCACHE_RBTREE, }; struct fsl_ssi_soc_data { bool imx; bool offline_config; u32 sisr_write_mask; }; /** * fsl_ssi_private: per-SSI private data * * @reg: Pointer to the regmap registers * @irq: IRQ of this SSI * @cpu_dai_drv: CPU DAI driver for this device * * @dai_fmt: DAI configuration this device is currently used with * @i2s_mode: i2s and network mode configuration of the device. Is used to * switch between normal and i2s/network mode * mode depending on the number of channels * @use_dma: DMA is used or FIQ with stream filter * @use_dual_fifo: DMA with support for both FIFOs used * @fifo_deph: Depth of the SSI FIFOs * @rxtx_reg_val: Specific register settings for receive/transmit configuration * * @clk: SSI clock * @baudclk: SSI baud clock for master mode * @baudclk_streams: Active streams that are using baudclk * @bitclk_freq: bitclock frequency set by .set_dai_sysclk * * @dma_params_tx: DMA transmit parameters * @dma_params_rx: DMA receive parameters * @ssi_phys: physical address of the SSI registers * * @fiq_params: FIQ stream filtering parameters * * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card * * @dbg_stats: Debugging statistics * * @soc: SoC specifc data */ struct fsl_ssi_private { struct regmap *regs; unsigned int irq; struct snd_soc_dai_driver cpu_dai_drv; unsigned int dai_fmt; u8 i2s_mode; bool use_dma; bool use_dual_fifo; bool has_ipg_clk_name; unsigned int fifo_depth; struct fsl_ssi_rxtx_reg_val rxtx_reg_val; struct clk *clk; struct clk *baudclk; unsigned int baudclk_streams; unsigned int bitclk_freq; /*regcache for SFCSR*/ u32 regcache_sfcsr; /* DMA params */ struct snd_dmaengine_dai_dma_data dma_params_tx; struct snd_dmaengine_dai_dma_data dma_params_rx; dma_addr_t ssi_phys; /* params for non-dma FIQ stream filtered mode */ struct imx_pcm_fiq_params fiq_params; /* Used when using fsl-ssi as sound-card. This is only used by ppc and * should be replaced with simple-sound-card. */ struct platform_device *pdev; struct fsl_ssi_dbg dbg_stats; const struct fsl_ssi_soc_data *soc; }; /* * imx51 and later SoCs have a slightly different IP that allows the * SSI configuration while the SSI unit is running. * * More important, it is necessary on those SoCs to configure the * sperate TX/RX DMA bits just before starting the stream * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi * sends any DMA requests to the SDMA unit, otherwise it is not defined * how the SDMA unit handles the DMA request. * * SDMA units are present on devices starting at imx35 but the imx35 * reference manual states that the DMA bits should not be changed * while the SSI unit is running (SSIEN). So we support the necessary * online configuration of fsl-ssi starting at imx51. */ static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = { .imx = false, .offline_config = true, .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC | CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 | CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1, }; static struct fsl_ssi_soc_data fsl_ssi_imx21 = { .imx = true, .offline_config = true, .sisr_write_mask = 0, }; static struct fsl_ssi_soc_data fsl_ssi_imx35 = { .imx = true, .offline_config = true, .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC | CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 | CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1, }; static struct fsl_ssi_soc_data fsl_ssi_imx51 = { .imx = true, .offline_config = false, .sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 | CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1, }; static const struct of_device_id fsl_ssi_ids[] = { { .compatible = "fsl,mpc8610-ssi", .data = &fsl_ssi_mpc8610 }, { .compatible = "fsl,imx51-ssi", .data = &fsl_ssi_imx51 }, { .compatible = "fsl,imx35-ssi", .data = &fsl_ssi_imx35 }, { .compatible = "fsl,imx21-ssi", .data = &fsl_ssi_imx21 }, {} }; MODULE_DEVICE_TABLE(of, fsl_ssi_ids); static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) { return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97); } static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) { return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBS_CFS; } /** * fsl_ssi_isr: SSI interrupt handler * * Although it's possible to use the interrupt handler to send and receive * data to/from the SSI, we use the DMA instead. Programming is more * complicated, but the performance is much better. * * This interrupt handler is used only to gather statistics. * * @irq: IRQ of the SSI device * @dev_id: pointer to the ssi_private structure for this SSI device */ static irqreturn_t fsl_ssi_isr(int irq, void *dev_id) { struct fsl_ssi_private *ssi_private = dev_id; struct regmap *regs = ssi_private->regs; __be32 sisr; __be32 sisr2; /* We got an interrupt, so read the status register to see what we were interrupted for. We mask it with the Interrupt Enable register so that we only check for events that we're interested in. */ regmap_read(regs, CCSR_SSI_SISR, &sisr); sisr2 = sisr & ssi_private->soc->sisr_write_mask; /* Clear the bits that we set */ if (sisr2) regmap_write(regs, CCSR_SSI_SISR, sisr2); fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr); return IRQ_HANDLED; } /* * Enable/Disable all rx/tx config flags at once. */ static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private, bool enable) { struct regmap *regs = ssi_private->regs; struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val; if (enable) { regmap_update_bits(regs, CCSR_SSI_SIER, vals->rx.sier | vals->tx.sier, vals->rx.sier | vals->tx.sier); regmap_update_bits(regs, CCSR_SSI_SRCR, vals->rx.srcr | vals->tx.srcr, vals->rx.srcr | vals->tx.srcr); regmap_update_bits(regs, CCSR_SSI_STCR, vals->rx.stcr | vals->tx.stcr, vals->rx.stcr | vals->tx.stcr); } else { regmap_update_bits(regs, CCSR_SSI_SRCR, vals->rx.srcr | vals->tx.srcr, 0); regmap_update_bits(regs, CCSR_SSI_STCR, vals->rx.stcr | vals->tx.stcr, 0); regmap_update_bits(regs, CCSR_SSI_SIER, vals->rx.sier | vals->tx.sier, 0); } } /* * Calculate the bits that have to be disabled for the current stream that is * getting disabled. This keeps the bits enabled that are necessary for the * second stream to work if 'stream_active' is true. * * Detailed calculation: * These are the values that need to be active after disabling. For non-active * second stream, this is 0: * vals_stream * !!stream_active * * The following computes the overall differences between the setup for the * to-disable stream and the active stream, a simple XOR: * vals_disable ^ (vals_stream * !!(stream_active)) * * The full expression adds a mask on all values we care about */ #define fsl_ssi_disable_val(vals_disable, vals_stream, stream_active) \ ((vals_disable) & \ ((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active)))) /* * Enable/Disable a ssi configuration. You have to pass either * ssi_private->rxtx_reg_val.rx or tx as vals parameter. */ static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable, struct fsl_ssi_reg_val *vals) { struct regmap *regs = ssi_private->regs; struct fsl_ssi_reg_val *avals; int nr_active_streams; u32 scr_val; int keep_active; regmap_read(regs, CCSR_SSI_SCR, &scr_val); nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) + !!(scr_val & CCSR_SSI_SCR_RE); if (nr_active_streams - 1 > 0) keep_active = 1; else keep_active = 0; /* Find the other direction values rx or tx which we do not want to * modify */ if (&ssi_private->rxtx_reg_val.rx == vals) avals = &ssi_private->rxtx_reg_val.tx; else avals = &ssi_private->rxtx_reg_val.rx; /* If vals should be disabled, start with disabling the unit */ if (!enable) { u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr, keep_active); regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0); } /* * We are running on a SoC which does not support online SSI * reconfiguration, so we have to enable all necessary flags at once * even if we do not use them later (capture and playback configuration) */ if (ssi_private->soc->offline_config) { if ((enable && !nr_active_streams) || (!enable && !keep_active)) fsl_ssi_rxtx_config(ssi_private, enable); goto config_done; } /* * Configure single direction units while the SSI unit is running * (online configuration) */ if (enable) { regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier); regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr); regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr); } else { u32 sier; u32 srcr; u32 stcr; /* * Disabling the necessary flags for one of rx/tx while the * other stream is active is a little bit more difficult. We * have to disable only those flags that differ between both * streams (rx XOR tx) and that are set in the stream that is * disabled now. Otherwise we could alter flags of the other * stream */ /* These assignments are simply vals without bits set in avals*/ sier = fsl_ssi_disable_val(vals->sier, avals->sier, keep_active); srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr, keep_active); stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr, keep_active); regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0); regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0); regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0); } config_done: /* Enabling of subunits is done after configuration */ if (enable) regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr); } static void fsl_ssi_rx_config(struct fsl_ssi_private *ssi_private, bool enable) { fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.rx); } static void fsl_ssi_tx_config(struct fsl_ssi_private *ssi_private, bool enable) { fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.tx); } /* * Setup rx/tx register values used to enable/disable the streams. These will * be used later in fsl_ssi_config to setup the streams without the need to * check for all different SSI modes. */ static void fsl_ssi_setup_reg_vals(struct fsl_ssi_private *ssi_private) { struct fsl_ssi_rxtx_reg_val *reg = &ssi_private->rxtx_reg_val; reg->rx.sier = CCSR_SSI_SIER_RFF0_EN; reg->rx.srcr = CCSR_SSI_SRCR_RFEN0; reg->rx.scr = 0; reg->tx.sier = CCSR_SSI_SIER_TFE0_EN; reg->tx.stcr = CCSR_SSI_STCR_TFEN0; reg->tx.scr = 0; if (!fsl_ssi_is_ac97(ssi_private)) { reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE; reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN; reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE; reg->tx.sier |= CCSR_SSI_SIER_TFE0_EN; } if (ssi_private->use_dma) { reg->rx.sier |= CCSR_SSI_SIER_RDMAE; reg->tx.sier |= CCSR_SSI_SIER_TDMAE; } else { reg->rx.sier |= CCSR_SSI_SIER_RIE; reg->tx.sier |= CCSR_SSI_SIER_TIE; } reg->rx.sier |= FSLSSI_SIER_DBG_RX_FLAGS; reg->tx.sier |= FSLSSI_SIER_DBG_TX_FLAGS; } static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private) { struct regmap *regs = ssi_private->regs; /* * Setup the clock control register */ regmap_write(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13)); regmap_write(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13)); /* * Enable AC97 mode and startup the SSI */ regmap_write(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV); regmap_write(regs, CCSR_SSI_SACCDIS, 0xff); regmap_write(regs, CCSR_SSI_SACCEN, 0x300); /* * Enable SSI, Transmit and Receive. AC97 has to communicate with the * codec before a stream is started. */ regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE); regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3)); } /** * fsl_ssi_startup: create a new substream * * This is the first function called when a stream is opened. * * If this is the first stream open, then grab the IRQ and program most of * the SSI registers. */ static int fsl_ssi_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); int ret; ret = clk_prepare_enable(ssi_private->clk); if (ret) return ret; pm_runtime_get_sync(dai->dev); /* When using dual fifo mode, it is safer to ensure an even period * size. If appearing to an odd number while DMA always starts its * task from fifo0, fifo1 would be neglected at the end of each * period. But SSI would still access fifo1 with an invalid data. */ if (ssi_private->use_dual_fifo) snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2); return 0; } /** * fsl_ssi_shutdown: shutdown the SSI * */ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); pm_runtime_put_sync(dai->dev); clk_disable_unprepare(ssi_private->clk); } /** * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock * * Note: This function can be only called when using SSI as DAI master * * Quick instruction for parameters: * freq: Output BCLK frequency = samplerate * 32 (fixed) * channels * dir: SND_SOC_CLOCK_OUT -> TxBCLK, SND_SOC_CLOCK_IN -> RxBCLK. */ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai, struct snd_pcm_hw_params *hw_params) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regs = ssi_private->regs; int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret; u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i; unsigned long clkrate, baudrate, tmprate; u64 sub, savesub = 100000; unsigned int freq; bool baudclk_is_used; /* Prefer the explicitly set bitclock frequency */ if (ssi_private->bitclk_freq) freq = ssi_private->bitclk_freq; else freq = params_channels(hw_params) * 32 * params_rate(hw_params); /* Don't apply it to any non-baudclk circumstance */ if (IS_ERR(ssi_private->baudclk)) return -EINVAL; baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream)); /* It should be already enough to divide clock by setting pm alone */ psr = 0; div2 = 0; factor = (div2 + 1) * (7 * psr + 1) * 2; for (i = 0; i < 255; i++) { /* The bclk rate must be smaller than 1/5 sysclk rate */ if (factor * (i + 1) < 5) continue; tmprate = freq * factor * (i + 2); if (baudclk_is_used) clkrate = clk_get_rate(ssi_private->baudclk); else clkrate = clk_round_rate(ssi_private->baudclk, tmprate); clkrate /= factor; afreq = clkrate / (i + 1); if (freq == afreq) sub = 0; else if (freq / afreq == 1) sub = freq - afreq; else if (afreq / freq == 1) sub = afreq - freq; else continue; /* Calculate the fraction */ sub *= 100000; do_div(sub, freq); if (sub < savesub) { baudrate = tmprate; savesub = sub; pm = i; } /* We are lucky */ if (savesub == 0) break; } /* No proper pm found if it is still remaining the initial value */ if (pm == 999) { dev_err(cpu_dai->dev, "failed to handle the required sysclk\n"); return -EINVAL; } stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) | (psr ? CCSR_SSI_SxCCR_PSR : 0); mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 | CCSR_SSI_SxCCR_PSR; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous) regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr); else regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr); if (!baudclk_is_used) { ret = clk_set_rate(ssi_private->baudclk, baudrate); if (ret) { dev_err(cpu_dai->dev, "failed to set baudclk rate\n"); return -EINVAL; } } return 0; } static int fsl_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); ssi_private->bitclk_freq = freq; return 0; } /** * fsl_ssi_hw_params - program the sample size * * Most of the SSI registers have been programmed in the startup function, * but the word length must be programmed here. Unfortunately, programming * the SxCCR.WL bits requires the SSI to be temporarily disabled. This can * cause a problem with supporting simultaneous playback and capture. If * the SSI is already playing a stream, then that stream may be temporarily * stopped when you start capture. * * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the * clock master. */ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regs = ssi_private->regs; unsigned int channels = params_channels(hw_params); unsigned int sample_size = snd_pcm_format_width(params_format(hw_params)); u32 wl = CCSR_SSI_SxCCR_WL(sample_size); int ret; u32 scr_val; int enabled; regmap_read(regs, CCSR_SSI_SCR, &scr_val); enabled = scr_val & CCSR_SSI_SCR_SSIEN; /* * If we're in synchronous mode, and the SSI is already enabled, * then STCCR is already set properly. */ if (enabled && ssi_private->cpu_dai_drv.symmetric_rates) return 0; if (fsl_ssi_is_i2s_master(ssi_private)) { ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params); if (ret) return ret; /* Do not enable the clock if it is already enabled */ if (!(ssi_private->baudclk_streams & BIT(substream->stream))) { ret = clk_prepare_enable(ssi_private->baudclk); if (ret) return ret; ssi_private->baudclk_streams |= BIT(substream->stream); } } /* * FIXME: The documentation says that SxCCR[WL] should not be * modified while the SSI is enabled. The only time this can * happen is if we're trying to do simultaneous playback and * capture in asynchronous mode. Unfortunately, I have been enable * to get that to work at all on the P1022DS. Therefore, we don't * bother to disable/enable the SSI when setting SxCCR[WL], because * the SSI will stop anyway. Maybe one day, this will get fixed. */ /* In synchronous mode, the SSI uses STCCR for capture */ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) || ssi_private->cpu_dai_drv.symmetric_rates) regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK, wl); else regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK, wl); if (!fsl_ssi_is_ac97(ssi_private)) regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK, channels == 1 ? 0 : ssi_private->i2s_mode); return 0; } static int fsl_ssi_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); if (fsl_ssi_is_i2s_master(ssi_private) && ssi_private->baudclk_streams & BIT(substream->stream)) { clk_disable_unprepare(ssi_private->baudclk); ssi_private->baudclk_streams &= ~BIT(substream->stream); } return 0; } static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, unsigned int fmt) { struct regmap *regs = ssi_private->regs; u32 strcr = 0, stcr, srcr, scr, mask; u8 wm; ssi_private->dai_fmt = fmt; if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); return -EINVAL; } fsl_ssi_setup_reg_vals(ssi_private); regmap_read(regs, CCSR_SSI_SCR, &scr); scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK); scr |= CCSR_SSI_SCR_SYNC_TX_FS; mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR | CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TEFS; regmap_read(regs, CCSR_SSI_STCR, &stcr); regmap_read(regs, CCSR_SSI_SRCR, &srcr); stcr &= ~mask; srcr &= ~mask; ssi_private->i2s_mode = CCSR_SSI_SCR_NET; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK, CCSR_SSI_SxCCR_DC(2)); regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK, CCSR_SSI_SxCCR_DC(2)); break; case SND_SOC_DAIFMT_CBM_CFM: ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE; break; default: return -EINVAL; } /* Data on rising edge of bclk, frame low, 1clk before data */ strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS; break; case SND_SOC_DAIFMT_LEFT_J: /* Data on rising edge of bclk, frame high */ strcr |= CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_DSP_A: /* Data on rising edge of bclk, frame high, 1clk before data */ strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS; break; case SND_SOC_DAIFMT_DSP_B: /* Data on rising edge of bclk, frame high */ strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TXBIT0; break; case SND_SOC_DAIFMT_AC97: ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL; break; default: return -EINVAL; } scr |= ssi_private->i2s_mode; /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* Nothing to do for both normal cases */ break; case SND_SOC_DAIFMT_IB_NF: /* Invert bit clock */ strcr ^= CCSR_SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_NB_IF: /* Invert frame clock */ strcr ^= CCSR_SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_IB_IF: /* Invert both clocks */ strcr ^= CCSR_SSI_STCR_TSCKP; strcr ^= CCSR_SSI_STCR_TFSI; break; default: return -EINVAL; } /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: strcr |= CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR; scr |= CCSR_SSI_SCR_SYS_CLK_EN; break; case SND_SOC_DAIFMT_CBM_CFM: scr &= ~CCSR_SSI_SCR_SYS_CLK_EN; break; default: return -EINVAL; } stcr |= strcr; srcr |= strcr; if (ssi_private->cpu_dai_drv.symmetric_rates) { /* Need to clear RXDIR when using SYNC mode */ srcr &= ~CCSR_SSI_SRCR_RXDIR; scr |= CCSR_SSI_SCR_SYN; } regmap_write(regs, CCSR_SSI_STCR, stcr); regmap_write(regs, CCSR_SSI_SRCR, srcr); regmap_write(regs, CCSR_SSI_SCR, scr); /* * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't * use FIFO 1. We program the transmit water to signal a DMA transfer * if there are only two (or fewer) elements left in the FIFO. Two * elements equals one frame (left channel, right channel). This value, * however, depends on the depth of the transmit buffer. * * We set the watermark on the same level as the DMA burstsize. For * fiq it is probably better to use the biggest possible watermark * size. */ if (ssi_private->use_dma) wm = ssi_private->fifo_depth - 2; else wm = ssi_private->fifo_depth; regmap_write(regs, CCSR_SSI_SFCSR, CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) | CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm)); if (ssi_private->use_dual_fifo) { regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1, CCSR_SSI_SRCR_RFEN1); regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1, CCSR_SSI_STCR_TFEN1); regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN, CCSR_SSI_SCR_TCH_EN); } if (fmt & SND_SOC_DAIFMT_AC97) fsl_ssi_setup_ac97(ssi_private); return 0; } /** * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format. */ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); return _fsl_ssi_set_dai_fmt(ssi_private, fmt); } /** * fsl_ssi_set_dai_tdm_slot - set TDM slot number * * Note: This function can be only called when using SSI as DAI master */ static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask, u32 rx_mask, int slots, int slot_width) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regs = ssi_private->regs; u32 val; /* The slot number should be >= 2 if using Network mode or I2S mode */ regmap_read(regs, CCSR_SSI_SCR, &val); val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET; if (val && slots < 2) { dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n"); return -EINVAL; } regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK, CCSR_SSI_SxCCR_DC(slots)); regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK, CCSR_SSI_SxCCR_DC(slots)); /* The register SxMSKs needs SSI to provide essential clock due to * hardware design. So we here temporarily enable SSI to set them. */ regmap_read(regs, CCSR_SSI_SCR, &val); val &= CCSR_SSI_SCR_SSIEN; regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN); regmap_write(regs, CCSR_SSI_STMSK, tx_mask); regmap_write(regs, CCSR_SSI_SRMSK, rx_mask); regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val); return 0; } /** * fsl_ssi_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. * * The DMA channel is in external master start and pause mode, which * means the SSI completely controls the flow of data. */ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct regmap *regs = ssi_private->regs; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) fsl_ssi_tx_config(ssi_private, true); else fsl_ssi_rx_config(ssi_private, true); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) fsl_ssi_tx_config(ssi_private, false); else fsl_ssi_rx_config(ssi_private, false); break; default: return -EINVAL; } if (fsl_ssi_is_ac97(ssi_private)) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR); else regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR); } return 0; } static int fsl_ssi_dai_probe(struct snd_soc_dai *dai) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai); if (ssi_private->soc->imx && ssi_private->use_dma) { dai->playback_dma_data = &ssi_private->dma_params_tx; dai->capture_dma_data = &ssi_private->dma_params_rx; } return 0; } static const struct snd_soc_dai_ops fsl_ssi_dai_ops = { .startup = fsl_ssi_startup, .shutdown = fsl_ssi_shutdown, .hw_params = fsl_ssi_hw_params, .hw_free = fsl_ssi_hw_free, .set_fmt = fsl_ssi_set_dai_fmt, .set_sysclk = fsl_ssi_set_dai_sysclk, .set_tdm_slot = fsl_ssi_set_dai_tdm_slot, .trigger = fsl_ssi_trigger, }; /* Template for the CPU dai driver structure */ static struct snd_soc_dai_driver fsl_ssi_dai_template = { .probe = fsl_ssi_dai_probe, .playback = { .stream_name = "CPU-Playback", .channels_min = 1, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .capture = { .stream_name = "CPU-Capture", .channels_min = 1, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .ops = &fsl_ssi_dai_ops, }; static const struct snd_soc_component_driver fsl_ssi_component = { .name = "fsl-ssi", }; static struct snd_soc_dai_driver fsl_ssi_ac97_dai = { .ac97_control = 1, .playback = { .stream_name = "AC97 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "AC97 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &fsl_ssi_dai_ops, }; static struct fsl_ssi_private *fsl_ac97_data; static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct regmap *regs = fsl_ac97_data->regs; unsigned int lreg; unsigned int lval; if (reg > 0x7f) return; lreg = reg << 12; regmap_write(regs, CCSR_SSI_SACADD, lreg); lval = val << 4; regmap_write(regs, CCSR_SSI_SACDAT, lval); regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK, CCSR_SSI_SACNT_WR); udelay(100); } static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct regmap *regs = fsl_ac97_data->regs; unsigned short val = -1; u32 reg_val; unsigned int lreg; lreg = (reg & 0x7f) << 12; regmap_write(regs, CCSR_SSI_SACADD, lreg); regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK, CCSR_SSI_SACNT_RD); udelay(100); regmap_read(regs, CCSR_SSI_SACDAT, &reg_val); val = (reg_val >> 4) & 0xffff; return val; } static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = { .read = fsl_ssi_ac97_read, .write = fsl_ssi_ac97_write, }; /** * Make every character in a string lower-case */ static void make_lowercase(char *s) { char *p = s; char c; while ((c = *p)) { if ((c >= 'A') && (c <= 'Z')) *p = c + ('a' - 'A'); p++; } } static int fsl_ssi_imx_probe(struct platform_device *pdev, struct fsl_ssi_private *ssi_private, void __iomem *iomem) { struct device_node *np = pdev->dev.of_node; u32 dmas[4]; int ret; u32 buffer_size; if (ssi_private->has_ipg_clk_name) ssi_private->clk = devm_clk_get(&pdev->dev, "ipg"); else ssi_private->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssi_private->clk)) { ret = PTR_ERR(ssi_private->clk); dev_err(&pdev->dev, "could not get clock: %d\n", ret); return ret; } if (!ssi_private->has_ipg_clk_name) { ret = clk_prepare_enable(ssi_private->clk); if (ret) { dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); return ret; } } /* For those SLAVE implementations, we ingore non-baudclk cases * and, instead, abandon MASTER mode that needs baud clock. */ ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud"); if (IS_ERR(ssi_private->baudclk)) dev_dbg(&pdev->dev, "could not get baud clock: %ld\n", PTR_ERR(ssi_private->baudclk)); /* * We have burstsize be "fifo_depth - 2" to match the SSI * watermark setting in fsl_ssi_startup(). */ ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2; ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2; ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; ret = of_property_read_u32_array(np, "dmas", dmas, 4); if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { ssi_private->use_dual_fifo = true; /* When using dual fifo mode, we need to keep watermark * as even numbers due to dma script limitation. */ ssi_private->dma_params_tx.maxburst &= ~0x1; ssi_private->dma_params_rx.maxburst &= ~0x1; } if (of_property_read_u32(np, "fsl,dma-buffer-size", &buffer_size)) buffer_size = IMX_SSI_DMABUF_SIZE; if (!ssi_private->use_dma) { /* * Some boards use an incompatible codec. To get it * working, we are using imx-fiq-pcm-audio, that * can handle those codecs. DMA is not possible in this * situation. */ ssi_private->fiq_params.irq = ssi_private->irq; ssi_private->fiq_params.base = iomem; ssi_private->fiq_params.dma_params_rx = &ssi_private->dma_params_rx; ssi_private->fiq_params.dma_params_tx = &ssi_private->dma_params_tx; ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params); if (ret) goto error_pcm; } else { ret = imx_pcm_dma_init(pdev, buffer_size); if (ret) goto error_pcm; } return 0; error_pcm: if (!ssi_private->has_ipg_clk_name) clk_disable_unprepare(ssi_private->clk); return ret; } static void fsl_ssi_imx_clean(struct platform_device *pdev, struct fsl_ssi_private *ssi_private) { if (!ssi_private->use_dma) imx_pcm_fiq_exit(pdev); if (!ssi_private->has_ipg_clk_name) clk_disable_unprepare(ssi_private->clk); } static int fsl_ssi_probe(struct platform_device *pdev) { struct fsl_ssi_private *ssi_private; int ret = 0; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id; const char *p, *sprop; const uint32_t *iprop; struct resource res; void __iomem *iomem; char name[64]; /* SSIs that are not connected on the board should have a * status = "disabled" * property in their device tree nodes. */ if (!of_device_is_available(np)) return -ENODEV; of_id = of_match_device(fsl_ssi_ids, &pdev->dev); if (!of_id || !of_id->data) return -EINVAL; ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private), GFP_KERNEL); if (!ssi_private) { dev_err(&pdev->dev, "could not allocate DAI object\n"); return -ENOMEM; } ssi_private->soc = of_id->data; sprop = of_get_property(np, "fsl,mode", NULL); if (sprop) { if (!strcmp(sprop, "ac97-slave")) ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97; else if (!strcmp(sprop, "i2s-slave")) ssi_private->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM; } ssi_private->use_dma = !of_property_read_bool(np, "fsl,fiq-stream-filter"); if (fsl_ssi_is_ac97(ssi_private)) { memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai, sizeof(fsl_ssi_ac97_dai)); fsl_ac97_data = ssi_private; snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); } else { /* Initialize this copy of the CPU DAI driver structure */ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, sizeof(fsl_ssi_dai_template)); } ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev); /* Get the addresses and IRQ */ ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not determine device resources\n"); return ret; } ssi_private->ssi_phys = res.start; iomem = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); if (!iomem) { dev_err(&pdev->dev, "could not map device resources\n"); return -ENOMEM; } ret = of_property_match_string(np, "clock-names", "ipg"); if (ret < 0) { ssi_private->has_ipg_clk_name = false; ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem, &fsl_ssi_regconfig); } else { ssi_private->has_ipg_clk_name = true; ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev, "ipg", iomem, &fsl_ssi_regconfig); } if (IS_ERR(ssi_private->regs)) { dev_err(&pdev->dev, "Failed to init register map\n"); return PTR_ERR(ssi_private->regs); } ssi_private->irq = irq_of_parse_and_map(np, 0); if (!ssi_private->irq) { dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); return -ENXIO; } /* Are the RX and the TX clocks locked? */ if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) { ssi_private->cpu_dai_drv.symmetric_rates = 1; ssi_private->cpu_dai_drv.symmetric_channels = 1; ssi_private->cpu_dai_drv.symmetric_samplebits = 1; } /* Determine the FIFO depth. */ iprop = of_get_property(np, "fsl,fifo-depth", NULL); if (iprop) ssi_private->fifo_depth = be32_to_cpup(iprop); else /* Older 8610 DTs didn't have the fifo-depth property */ ssi_private->fifo_depth = 8; pm_runtime_enable(&pdev->dev); dev_set_drvdata(&pdev->dev, ssi_private); if (ssi_private->soc->imx) { ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem); if (ret) goto error_irqmap; } ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component, &ssi_private->cpu_dai_drv, 1); if (ret) { dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); goto error_asoc_register; } if (ssi_private->use_dma) { ret = devm_request_irq(&pdev->dev, ssi_private->irq, fsl_ssi_isr, 0, dev_name(&pdev->dev), ssi_private); if (ret < 0) { dev_err(&pdev->dev, "could not claim irq %u\n", ssi_private->irq); goto error_irq; } } ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev); if (ret) goto error_asoc_register; /* * If codec-handle property is missing from SSI node, we assume * that the machine driver uses new binding which does not require * SSI driver to trigger machine driver's probe. */ if (!of_get_property(np, "codec-handle", NULL)) goto done; /* Trigger the machine driver's probe function. The platform driver * name of the machine driver is taken from /compatible property of the * device tree. We also pass the address of the CPU DAI driver * structure. */ sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL); /* Sometimes the compatible name has a "fsl," prefix, so we strip it. */ p = strrchr(sprop, ','); if (p) sprop = p + 1; snprintf(name, sizeof(name), "snd-soc-%s", sprop); make_lowercase(name); ssi_private->pdev = platform_device_register_data(&pdev->dev, name, 0, NULL, 0); if (IS_ERR(ssi_private->pdev)) { ret = PTR_ERR(ssi_private->pdev); dev_err(&pdev->dev, "failed to register platform: %d\n", ret); goto error_sound_card; } done: if (ssi_private->dai_fmt) _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); return 0; error_sound_card: fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); error_irq: snd_soc_unregister_component(&pdev->dev); error_asoc_register: if (ssi_private->soc->imx) fsl_ssi_imx_clean(pdev, ssi_private); error_irqmap: if (ssi_private->use_dma) irq_dispose_mapping(ssi_private->irq); return ret; } static int fsl_ssi_remove(struct platform_device *pdev) { struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev); fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); if (ssi_private->pdev) platform_device_unregister(ssi_private->pdev); snd_soc_unregister_component(&pdev->dev); if (ssi_private->soc->imx) fsl_ssi_imx_clean(pdev, ssi_private); if (ssi_private->use_dma) irq_dispose_mapping(ssi_private->irq); return 0; } #ifdef CONFIG_PM_RUNTIME static int fsl_ssi_runtime_resume(struct device *dev) { request_bus_freq(BUS_FREQ_AUDIO); return 0; } static int fsl_ssi_runtime_suspend(struct device *dev) { release_bus_freq(BUS_FREQ_AUDIO); return 0; } #endif #ifdef CONFIG_PM_SLEEP static int fsl_ssi_suspend(struct device *dev) { struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev); struct regmap *regs = ssi_private->regs; regmap_read(regs, CCSR_SSI_SFCSR, &ssi_private->regcache_sfcsr); regcache_cache_only(regs, true); regcache_mark_dirty(regs); return 0; } static int fsl_ssi_resume(struct device *dev) { struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev); struct regmap *regs = ssi_private->regs; regcache_cache_only(regs, false); regmap_update_bits(regs, CCSR_SSI_SFCSR, CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK | CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK, ssi_private->regcache_sfcsr); return regcache_sync(regs); } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops fsl_ssi_pm = { SET_RUNTIME_PM_OPS(fsl_ssi_runtime_suspend, fsl_ssi_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(fsl_ssi_suspend, fsl_ssi_resume) }; static struct platform_driver fsl_ssi_driver = { .driver = { .name = "fsl-ssi-dai", .owner = THIS_MODULE, .of_match_table = fsl_ssi_ids, .pm = &fsl_ssi_pm, }, .probe = fsl_ssi_probe, .remove = fsl_ssi_remove, }; module_platform_driver(fsl_ssi_driver); MODULE_ALIAS("platform:fsl-ssi-dai"); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale Synchronous Serial Interface (SSI) ASoC Driver"); MODULE_LICENSE("GPL v2");
FEDEVEL/openrex-linux-3.14
sound/soc/fsl/fsl_ssi.c
C
gpl-2.0
46,256
/* * Copyright (c) 2003, 2007-14 Matteo Frigo * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ /* This file was automatically generated --- DO NOT EDIT */ /* Generated on Tue Mar 4 13:50:34 EST 2014 */ #include "codelet-rdft.h" #ifdef HAVE_FMA /* Generated by: ../../../genfft/gen_r2cb.native -fma -reorder-insns -schedule-for-pipeline -compact -variables 4 -pipeline-latency 4 -sign 1 -n 16 -name r2cbIII_16 -dft-III -include r2cbIII.h */ /* * This function contains 66 FP additions, 36 FP multiplications, * (or, 46 additions, 16 multiplications, 20 fused multiply/add), * 55 stack variables, 9 constants, and 32 memory accesses */ #include "r2cbIII.h" static void r2cbIII_16(R *R0, R *R1, R *Cr, R *Ci, stride rs, stride csr, stride csi, INT v, INT ivs, INT ovs) { DK(KP668178637, +0.668178637919298919997757686523080761552472251); DK(KP1_662939224, +1.662939224605090474157576755235811513477121624); DK(KP198912367, +0.198912367379658006911597622644676228597850501); DK(KP1_961570560, +1.961570560806460898252364472268478073947867462); DK(KP707106781, +0.707106781186547524400844362104849039284835938); DK(KP1_414213562, +1.414213562373095048801688724209698078569671875); DK(KP414213562, +0.414213562373095048801688724209698078569671875); DK(KP1_847759065, +1.847759065022573512256366378793576573644833252); DK(KP2_000000000, +2.000000000000000000000000000000000000000000000); { INT i; for (i = v; i > 0; i = i - 1, R0 = R0 + ovs, R1 = R1 + ovs, Cr = Cr + ivs, Ci = Ci + ivs, MAKE_VOLATILE_STRIDE(64, rs), MAKE_VOLATILE_STRIDE(64, csr), MAKE_VOLATILE_STRIDE(64, csi)) { E TA, TD, Tv, TG, TE, TF; { E TK, TP, T7, T13, TW, TH, Tj, TC, To, Te, TX, TS, T12, Tt, TB; { E T4, Tf, T3, TU, Tz, T5, Tg, Th; { E T1, T2, Tx, Ty; T1 = Cr[0]; T2 = Cr[WS(csr, 7)]; Tx = Ci[0]; Ty = Ci[WS(csi, 7)]; T4 = Cr[WS(csr, 4)]; Tf = T1 - T2; T3 = T1 + T2; TU = Ty - Tx; Tz = Tx + Ty; T5 = Cr[WS(csr, 3)]; Tg = Ci[WS(csi, 4)]; Th = Ci[WS(csi, 3)]; } { E Tb, Tk, Ta, TR, Tn, Tc, Tq, Tr; { E T8, T9, Tl, Tm; T8 = Cr[WS(csr, 2)]; { E Tw, T6, TV, Ti; Tw = T4 - T5; T6 = T4 + T5; TV = Th - Tg; Ti = Tg + Th; TK = Tw - Tz; TA = Tw + Tz; TP = T3 - T6; T7 = T3 + T6; T13 = TV + TU; TW = TU - TV; TH = Tf + Ti; Tj = Tf - Ti; T9 = Cr[WS(csr, 5)]; } Tl = Ci[WS(csi, 2)]; Tm = Ci[WS(csi, 5)]; Tb = Cr[WS(csr, 1)]; Tk = T8 - T9; Ta = T8 + T9; TR = Tl - Tm; Tn = Tl + Tm; Tc = Cr[WS(csr, 6)]; Tq = Ci[WS(csi, 1)]; Tr = Ci[WS(csi, 6)]; } TC = Tk + Tn; To = Tk - Tn; { E Tp, Td, TQ, Ts; Tp = Tb - Tc; Td = Tb + Tc; TQ = Tr - Tq; Ts = Tq + Tr; Te = Ta + Td; TX = Ta - Td; TS = TQ - TR; T12 = TR + TQ; Tt = Tp - Ts; TB = Tp + Ts; } } } { E T10, TT, TY, TZ; R0[0] = KP2_000000000 * (T7 + Te); R0[WS(rs, 4)] = KP2_000000000 * (T13 - T12); T10 = TP - TS; TT = TP + TS; TY = TW - TX; TZ = TX + TW; { E T11, T14, TI, TL, Tu; T11 = T7 - Te; T14 = T12 + T13; R0[WS(rs, 5)] = KP1_847759065 * (FNMS(KP414213562, TT, TY)); R0[WS(rs, 1)] = KP1_847759065 * (FMA(KP414213562, TY, TT)); R0[WS(rs, 6)] = KP1_414213562 * (T14 - T11); R0[WS(rs, 2)] = KP1_414213562 * (T11 + T14); TD = TB - TC; TI = TC + TB; TL = To - Tt; Tu = To + Tt; { E TO, TJ, TN, TM; R0[WS(rs, 7)] = -(KP1_847759065 * (FNMS(KP414213562, TZ, T10))); R0[WS(rs, 3)] = KP1_847759065 * (FMA(KP414213562, T10, TZ)); TO = FMA(KP707106781, TI, TH); TJ = FNMS(KP707106781, TI, TH); TN = FMA(KP707106781, TL, TK); TM = FNMS(KP707106781, TL, TK); Tv = FMA(KP707106781, Tu, Tj); TG = FNMS(KP707106781, Tu, Tj); R1[WS(rs, 3)] = KP1_961570560 * (FMA(KP198912367, TO, TN)); R1[WS(rs, 7)] = -(KP1_961570560 * (FNMS(KP198912367, TN, TO))); R1[WS(rs, 5)] = KP1_662939224 * (FNMS(KP668178637, TJ, TM)); R1[WS(rs, 1)] = KP1_662939224 * (FMA(KP668178637, TM, TJ)); } } } } TE = FNMS(KP707106781, TD, TA); TF = FMA(KP707106781, TD, TA); R1[WS(rs, 2)] = -(KP1_662939224 * (FNMS(KP668178637, TG, TF))); R1[WS(rs, 6)] = -(KP1_662939224 * (FMA(KP668178637, TF, TG))); R1[WS(rs, 4)] = -(KP1_961570560 * (FMA(KP198912367, Tv, TE))); R1[0] = KP1_961570560 * (FNMS(KP198912367, TE, Tv)); } } } static const kr2c_desc desc = { 16, "r2cbIII_16", {46, 16, 20, 0}, &GENUS }; void X(codelet_r2cbIII_16) (planner *p) { X(kr2c_register) (p, r2cbIII_16, &desc); } #else /* HAVE_FMA */ /* Generated by: ../../../genfft/gen_r2cb.native -compact -variables 4 -pipeline-latency 4 -sign 1 -n 16 -name r2cbIII_16 -dft-III -include r2cbIII.h */ /* * This function contains 66 FP additions, 32 FP multiplications, * (or, 54 additions, 20 multiplications, 12 fused multiply/add), * 40 stack variables, 9 constants, and 32 memory accesses */ #include "r2cbIII.h" static void r2cbIII_16(R *R0, R *R1, R *Cr, R *Ci, stride rs, stride csr, stride csi, INT v, INT ivs, INT ovs) { DK(KP1_961570560, +1.961570560806460898252364472268478073947867462); DK(KP390180644, +0.390180644032256535696569736954044481855383236); DK(KP1_111140466, +1.111140466039204449485661627897065748749874382); DK(KP1_662939224, +1.662939224605090474157576755235811513477121624); DK(KP707106781, +0.707106781186547524400844362104849039284835938); DK(KP1_414213562, +1.414213562373095048801688724209698078569671875); DK(KP765366864, +0.765366864730179543456919968060797733522689125); DK(KP1_847759065, +1.847759065022573512256366378793576573644833252); DK(KP2_000000000, +2.000000000000000000000000000000000000000000000); { INT i; for (i = v; i > 0; i = i - 1, R0 = R0 + ovs, R1 = R1 + ovs, Cr = Cr + ivs, Ci = Ci + ivs, MAKE_VOLATILE_STRIDE(64, rs), MAKE_VOLATILE_STRIDE(64, csr), MAKE_VOLATILE_STRIDE(64, csi)) { E T7, TW, T13, Tj, TD, TK, TP, TH, Te, TX, T12, To, Tt, Tx, TS; E Tw, TT, TY; { E T3, Tf, TC, TV, T6, Tz, Ti, TU; { E T1, T2, TA, TB; T1 = Cr[0]; T2 = Cr[WS(csr, 7)]; T3 = T1 + T2; Tf = T1 - T2; TA = Ci[0]; TB = Ci[WS(csi, 7)]; TC = TA + TB; TV = TB - TA; } { E T4, T5, Tg, Th; T4 = Cr[WS(csr, 4)]; T5 = Cr[WS(csr, 3)]; T6 = T4 + T5; Tz = T4 - T5; Tg = Ci[WS(csi, 4)]; Th = Ci[WS(csi, 3)]; Ti = Tg + Th; TU = Tg - Th; } T7 = T3 + T6; TW = TU + TV; T13 = TV - TU; Tj = Tf - Ti; TD = Tz + TC; TK = Tz - TC; TP = T3 - T6; TH = Tf + Ti; } { E Ta, Tk, Tn, TR, Td, Tp, Ts, TQ; { E T8, T9, Tl, Tm; T8 = Cr[WS(csr, 2)]; T9 = Cr[WS(csr, 5)]; Ta = T8 + T9; Tk = T8 - T9; Tl = Ci[WS(csi, 2)]; Tm = Ci[WS(csi, 5)]; Tn = Tl + Tm; TR = Tl - Tm; } { E Tb, Tc, Tq, Tr; Tb = Cr[WS(csr, 1)]; Tc = Cr[WS(csr, 6)]; Td = Tb + Tc; Tp = Tb - Tc; Tq = Ci[WS(csi, 1)]; Tr = Ci[WS(csi, 6)]; Ts = Tq + Tr; TQ = Tr - Tq; } Te = Ta + Td; TX = Ta - Td; T12 = TR + TQ; To = Tk - Tn; Tt = Tp - Ts; Tx = Tp + Ts; TS = TQ - TR; Tw = Tk + Tn; } R0[0] = KP2_000000000 * (T7 + Te); R0[WS(rs, 4)] = KP2_000000000 * (T13 - T12); TT = TP + TS; TY = TW - TX; R0[WS(rs, 1)] = FMA(KP1_847759065, TT, KP765366864 * TY); R0[WS(rs, 5)] = FNMS(KP765366864, TT, KP1_847759065 * TY); { E T11, T14, TZ, T10; T11 = T7 - Te; T14 = T12 + T13; R0[WS(rs, 2)] = KP1_414213562 * (T11 + T14); R0[WS(rs, 6)] = KP1_414213562 * (T14 - T11); TZ = TP - TS; T10 = TX + TW; R0[WS(rs, 3)] = FMA(KP765366864, TZ, KP1_847759065 * T10); R0[WS(rs, 7)] = FNMS(KP1_847759065, TZ, KP765366864 * T10); } { E TJ, TN, TM, TO, TI, TL; TI = KP707106781 * (Tw + Tx); TJ = TH - TI; TN = TH + TI; TL = KP707106781 * (To - Tt); TM = TK - TL; TO = TL + TK; R1[WS(rs, 1)] = FMA(KP1_662939224, TJ, KP1_111140466 * TM); R1[WS(rs, 7)] = FNMS(KP1_961570560, TN, KP390180644 * TO); R1[WS(rs, 5)] = FNMS(KP1_111140466, TJ, KP1_662939224 * TM); R1[WS(rs, 3)] = FMA(KP390180644, TN, KP1_961570560 * TO); } { E Tv, TF, TE, TG, Tu, Ty; Tu = KP707106781 * (To + Tt); Tv = Tj + Tu; TF = Tj - Tu; Ty = KP707106781 * (Tw - Tx); TE = Ty + TD; TG = Ty - TD; R1[0] = FNMS(KP390180644, TE, KP1_961570560 * Tv); R1[WS(rs, 6)] = FNMS(KP1_662939224, TF, KP1_111140466 * TG); R1[WS(rs, 4)] = -(FMA(KP390180644, Tv, KP1_961570560 * TE)); R1[WS(rs, 2)] = FMA(KP1_111140466, TF, KP1_662939224 * TG); } } } } static const kr2c_desc desc = { 16, "r2cbIII_16", {54, 20, 12, 0}, &GENUS }; void X(codelet_r2cbIII_16) (planner *p) { X(kr2c_register) (p, r2cbIII_16, &desc); } #endif /* HAVE_FMA */
Starlink/fftw
rdft/scalar/r2cb/r2cbIII_16.c
C
gpl-2.0
10,274
/* testlib.c for c++ - test expectlib */ #include <stdio.h> #include "expect.h" extern "C" { extern int write(...); extern int strlen(...); } void timedout() { fprintf(stderr,"timed out\n"); exit(-1); } char move[100]; void read_first_move(int fd) { if (EXP_TIMEOUT == exp_expectl(fd,exp_glob,"first\r\n1.*\r\n",0,exp_end)) { timedout(); } sscanf(exp_match,"%*s 1. %s",move); } /* moves and counter-moves are printed out in different formats, sigh... */ void read_counter_move(int fd) { switch (exp_expectl(fd,exp_glob,"*...*\r\n",0,exp_end)) { case EXP_TIMEOUT: timedout(); case EXP_EOF: exit(-1); } sscanf(exp_match,"%*s %*s %*s %*s ... %s",move); } void read_move(int fd) { switch (exp_expectl(fd,exp_glob,"*...*\r\n*.*\r\n",0,exp_end)) { case EXP_TIMEOUT: timedout(); case EXP_EOF: exit(-1); } sscanf(exp_match,"%*s %*s ... %*s %*s %s",move); } void send_move(int fd) { write(fd,move,strlen(move)); } main(){ int fd1, fd2; exp_loguser = 1; exp_timeout = 3600; if (-1 == (fd1 = exp_spawnl("chess","chess",(char *)0))) { perror("chess"); exit(-1); } if (-1 == exp_expectl(fd1,exp_glob,"Chess\r\n",0,exp_end)) exit; if (-1 == write(fd1,"first\r",6)) exit; read_first_move(fd1); fd2 = exp_spawnl("chess","chess",(char *)0); if (-1 == exp_expectl(fd2,exp_glob,"Chess\r\n",0,exp_end)) exit; for (;;) { send_move(fd2); read_counter_move(fd2); send_move(fd1); read_move(fd1); } }
atmark-techno/atmark-dist
user/expect/expect-5.43.0/example/chesslib++.c
C
gpl-2.0
1,443
/* * refclock_ulink - clock driver for Ultralink WWVB receiver */ /*********************************************************************** * * * Copyright (c) David L. Mills 1992-1998 * * * * Permission to use, copy, modify, and distribute this software and * * its documentation for any purpose and without fee is hereby * * granted, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission * * notice appear in supporting documentation, and that the name * * University of Delaware not be used in advertising or publicity * * pertaining to distribution of the software without specific, * * written prior permission. The University of Delaware makes no * * representations about the suitability this software for any * * purpose. It is provided "as is" without express or implied * * warranty. * **********************************************************************/ #ifdef HAVE_CONFIG_H #include <config.h> #endif #if defined(REFCLOCK) && defined(CLOCK_ULINK) #include <stdio.h> #include <ctype.h> #include "ntpd.h" #include "ntp_io.h" #include "ntp_refclock.h" #include "ntp_stdlib.h" /* This driver supports ultralink Model 320,325,330,331,332 WWVB radios * * this driver was based on the refclock_wwvb.c driver * in the ntp distribution. * * Fudge Factors * * fudge flag1 0 don't poll clock * 1 send poll character * * revision history: * 99/9/09 j.c.lang original edit's * 99/9/11 j.c.lang changed timecode parse to * match what the radio actually * sends. * 99/10/11 j.c.lang added support for continous * time code mode (dipsw2) * 99/11/26 j.c.lang added support for 320 decoder * (taken from Dave Strout's * Model 320 driver) * 99/11/29 j.c.lang added fudge flag 1 to control * clock polling * 99/12/15 j.c.lang fixed 320 quality flag * 01/02/21 s.l.smith fixed 33x quality flag * added more debugging stuff * updated 33x time code explanation * 04/01/23 frank migge added support for 325 decoder * (tested with ULM325.F) * * Questions, bugs, ideas send to: * Joseph C. Lang * tcnojl1@earthlink.net * * Dave Strout * dstrout@linuxfoundry.com * * Frank Migge * frank.migge@oracle.com * * * on the Ultralink model 33X decoder Dip switch 2 controls * polled or continous timecode * set fudge flag1 if using polled (needed for model 320 and 325) * dont set fudge flag1 if dip switch 2 is set on model 33x decoder */ /* * Interface definitions */ #define DEVICE "/dev/wwvb%d" /* device name and unit */ #define SPEED232 B9600 /* uart speed (9600 baud) */ #define PRECISION (-10) /* precision assumed (about 10 ms) */ #define REFID "WWVB" /* reference ID */ #define DESCRIPTION "Ultralink WWVB Receiver" /* WRU */ #define LEN33X 32 /* timecode length Model 33X and 325 */ #define LEN320 24 /* timecode length Model 320 */ #define SIGLCHAR33x 'S' /* signal strength identifier char 325 */ #define SIGLCHAR325 'R' /* signal strength identifier char 33x */ /* * unit control structure */ struct ulinkunit { u_char tcswitch; /* timecode switch */ l_fp laststamp; /* last receive timestamp */ }; /* * Function prototypes */ static int ulink_start P((int, struct peer *)); static void ulink_shutdown P((int, struct peer *)); static void ulink_receive P((struct recvbuf *)); static void ulink_poll P((int, struct peer *)); /* * Transfer vector */ struct refclock refclock_ulink = { ulink_start, /* start up driver */ ulink_shutdown, /* shut down driver */ ulink_poll, /* transmit poll message */ noentry, /* not used */ noentry, /* not used */ noentry, /* not used */ NOFLAGS }; /* * ulink_start - open the devices and initialize data for processing */ static int ulink_start( int unit, struct peer *peer ) { register struct ulinkunit *up; struct refclockproc *pp; int fd; char device[20]; /* * Open serial port. Use CLK line discipline, if available. */ (void)sprintf(device, DEVICE, unit); if (!(fd = refclock_open(device, SPEED232, LDISC_CLK))) return (0); /* * Allocate and initialize unit structure */ if (!(up = (struct ulinkunit *) emalloc(sizeof(struct ulinkunit)))) { (void) close(fd); return (0); } memset((char *)up, 0, sizeof(struct ulinkunit)); pp = peer->procptr; pp->unitptr = (caddr_t)up; pp->io.clock_recv = ulink_receive; pp->io.srcclock = (caddr_t)peer; pp->io.datalen = 0; pp->io.fd = fd; if (!io_addclock(&pp->io)) { (void) close(fd); free(up); return (0); } /* * Initialize miscellaneous variables */ peer->precision = PRECISION; peer->burst = NSTAGE; pp->clockdesc = DESCRIPTION; memcpy((char *)&pp->refid, REFID, 4); return (1); } /* * ulink_shutdown - shut down the clock */ static void ulink_shutdown( int unit, struct peer *peer ) { register struct ulinkunit *up; struct refclockproc *pp; pp = peer->procptr; up = (struct ulinkunit *)pp->unitptr; io_closeclock(&pp->io); free(up); } /* * ulink_receive - receive data from the serial interface */ static void ulink_receive( struct recvbuf *rbufp ) { struct ulinkunit *up; struct refclockproc *pp; struct peer *peer; l_fp trtmp; /* arrival timestamp */ int quality; /* quality indicator */ int temp; /* int temp */ char syncchar; /* synchronization indicator */ char leapchar; /* leap indicator */ char modechar; /* model 320 mode flag */ char siglchar; /* model difference between 33x/325 */ char char_quality[2]; /* temp quality flag */ /* * Initialize pointers and read the timecode and timestamp */ peer = (struct peer *)rbufp->recv_srcclock; pp = peer->procptr; up = (struct ulinkunit *)pp->unitptr; temp = refclock_gtlin(rbufp, pp->a_lastcode, BMAX, &trtmp); /* * Note we get a buffer and timestamp for both a <cr> and <lf>, * but only the <cr> timestamp is retained. */ if (temp == 0) { if (up->tcswitch == 0) { up->tcswitch = 1; up->laststamp = trtmp; } else up->tcswitch = 0; return; } pp->lencode = temp; pp->lastrec = up->laststamp; up->laststamp = trtmp; up->tcswitch = 1; #ifdef DEBUG if (debug) printf("ulink: timecode %d %s\n", pp->lencode, pp->a_lastcode); #endif /* * We get down to business, check the timecode format and decode * its contents. If the timecode has invalid length or is not in * proper format, we declare bad format and exit. */ syncchar = leapchar = modechar = siglchar = ' '; switch (pp->lencode ) { case LEN33X: /* * First we check if the format is 33x or 325: * <CR><LF>S9+D 00 YYYY+DDDUTCS HH:MM:SSL+5 (33x) * <CR><LF>R5_1C00LYYYY+DDDUTCS HH:MM:SSL+5 (325) * simply by comparing if the signal level is 'S' or 'R' */ if (sscanf(pp->a_lastcode, "%c%*31c", &siglchar) == 1) { if(siglchar == SIGLCHAR325) { /* * decode for a Model 325 decoder. * Timecode format from January 23, 2004 datasheet is: * * <CR><LF>R5_1C00LYYYY+DDDUTCS HH:MM:SSL+5 * * R WWVB decodersignal readability R1 - R5 * 5 R1 is unreadable, R5 is best * space a space (0x20) * 1 Data bit 0, 1, M (pos mark), or ? (unknown). * C Reception from either (C)olorado or (H)awaii * 00 Hours since last good WWVB frame sync. Will * be 00-99 * space Space char (0x20) or (0xa5) if locked to wwvb * YYYY Current year, 2000-2099 * + Leap year indicator. '+' if a leap year, * a space (0x20) if not. * DDD Day of year, 000 - 365. * UTC Timezone (always 'UTC'). * S Daylight savings indicator * S - standard time (STD) in effect * O - during STD to DST day 0000-2400 * D - daylight savings time (DST) in effect * I - during DST to STD day 0000-2400 * space Space character (0x20) * HH Hours 00-23 * : This is the REAL in sync indicator (: = insync) * MM Minutes 00-59 * : : = in sync ? = NOT in sync * SS Seconds 00-59 * L Leap second flag. Changes from space (0x20) * to 'I' or 'D' during month preceding leap * second adjustment. (I)nsert or (D)elete * +5 UT1 correction (sign + digit )) */ if (sscanf(pp->a_lastcode, "%*2c %*2c%2c%*c%4d%*c%3d%*4c %2d%c%2d:%2d%c%*2c", char_quality, &pp->year, &pp->day, &pp->hour, &syncchar, &pp->minute, &pp->second, &leapchar) == 8) { if (char_quality[0] == '0') { quality = 0; } else if (char_quality[0] == '0') { quality = (char_quality[1] & 0x0f); } else { quality = 99; } if (leapchar == 'I' ) leapchar = '+'; if (leapchar == 'D' ) leapchar = '-'; /* #ifdef DEBUG if (debug) { printf("ulink: char_quality %c %c\n", char_quality[0], char_quality[1]); printf("ulink: quality %d\n", quality); printf("ulink: syncchar %x\n", syncchar); printf("ulink: leapchar %x\n", leapchar); } #endif */ } } if(siglchar == SIGLCHAR33x) { /* * We got a Model 33X decoder. * Timecode format from January 29, 2001 datasheet is: * <CR><LF>S9+D 00 YYYY+DDDUTCS HH:MM:SSL+5 * S WWVB decoder sync indicator. S for in-sync(?) * or N for noisy signal. * 9+ RF signal level in S-units, 0-9 followed by * a space (0x20). The space turns to '+' if the * level is over 9. * D Data bit 0, 1, 2 (position mark), or * 3 (unknown). * space Space character (0x20) * 00 Hours since last good WWVB frame sync. Will * be 00-23 hrs, or '1d' to '7d'. Will be 'Lk' * if currently in sync. * space Space character (0x20) * YYYY Current year, 1990-2089 * + Leap year indicator. '+' if a leap year, * a space (0x20) if not. * DDD Day of year, 001 - 366. * UTC Timezone (always 'UTC'). * S Daylight savings indicator * S - standard time (STD) in effect * O - during STD to DST day 0000-2400 * D - daylight savings time (DST) in effect * I - during DST to STD day 0000-2400 * space Space character (0x20) * HH Hours 00-23 * : This is the REAL in sync indicator (: = insync) * MM Minutes 00-59 * : : = in sync ? = NOT in sync * SS Seconds 00-59 * L Leap second flag. Changes from space (0x20) * to '+' or '-' during month preceding leap * second adjustment. * +5 UT1 correction (sign + digit )) */ if (sscanf(pp->a_lastcode, "%*4c %2c %4d%*c%3d%*4c %2d%c%2d:%2d%c%*2c", char_quality, &pp->year, &pp->day, &pp->hour, &syncchar, &pp->minute, &pp->second, &leapchar) == 8) { if (char_quality[0] == 'L') { quality = 0; } else if (char_quality[0] == '0') { quality = (char_quality[1] & 0x0f); } else { quality = 99; } /* #ifdef DEBUG if (debug) { printf("ulink: char_quality %c %c\n", char_quality[0], char_quality[1]); printf("ulink: quality %d\n", quality); printf("ulink: syncchar %x\n", syncchar); printf("ulink: leapchar %x\n", leapchar); } #endif */ } } break; } case LEN320: /* * Model 320 Decoder * The timecode format is: * * <cr><lf>SQRYYYYDDD+HH:MM:SS.mmLT<cr> * * where: * * S = 'S' -- sync'd in last hour, * '0'-'9' - hours x 10 since last update, * '?' -- not in sync * Q = Number of correlating time-frames, from 0 to 5 * R = 'R' -- reception in progress, * 'N' -- Noisy reception, * ' ' -- standby mode * YYYY = year from 1990 to 2089 * DDD = current day from 1 to 366 * + = '+' if current year is a leap year, else ' ' * HH = UTC hour 0 to 23 * MM = Minutes of current hour from 0 to 59 * SS = Seconds of current minute from 0 to 59 * mm = 10's milliseconds of the current second from 00 to 99 * L = Leap second pending at end of month * 'I' = insert, 'D'= delete * T = DST <-> STD transition indicators * */ if (sscanf(pp->a_lastcode, "%c%1d%c%4d%3d%*c%2d:%2d:%2d.%2ld%c", &syncchar, &quality, &modechar, &pp->year, &pp->day, &pp->hour, &pp->minute, &pp->second, &pp->nsec, &leapchar) == 10) { pp->nsec *= 10000000; /* M320 returns 10's of msecs */ if (leapchar == 'I' ) leapchar = '+'; if (leapchar == 'D' ) leapchar = '-'; if (syncchar != '?' ) syncchar = ':'; break; } default: refclock_report(peer, CEVNT_BADREPLY); return; } /* * Decode quality indicator * For the 325 & 33x series, the lower the number the "better" * the time is. I used the dispersion as the measure of time * quality. The quality indicator in the 320 is the number of * correlating time frames (the more the better) */ /* * The spec sheet for the 325 & 33x series states the clock will * maintain +/-0.002 seconds accuracy when locked to WWVB. This * is indicated by 'Lk' in the quality portion of the incoming * string. When not in lock, a drift of +/-0.015 seconds should * be allowed for. * With the quality indicator decoding scheme above, the 'Lk' * condition will produce a quality value of 0. If the quality * indicator starts with '0' then the second character is the * number of hours since we were last locked. If the first * character is anything other than 'L' or '0' then we have been * out of lock for more than 9 hours so we assume the worst and * force a quality value that selects the 'default' maximum * dispersion. The dispersion values below are what came with the * driver. They're not unreasonable so they've not been changed. */ if (pp->lencode == LEN33X) { switch (quality) { case 0 : pp->disp=.002; break; case 1 : pp->disp=.02; break; case 2 : pp->disp=.04; break; case 3 : pp->disp=.08; break; default: pp->disp=MAXDISPERSE; break; } } else { switch (quality) { case 5 : pp->disp=.002; break; case 4 : pp->disp=.02; break; case 3 : pp->disp=.04; break; case 2 : pp->disp=.08; break; case 1 : pp->disp=.16; break; default: pp->disp=MAXDISPERSE; break; } } /* * Decode synchronization, and leap characters. If * unsynchronized, set the leap bits accordingly and exit. * Otherwise, set the leap bits according to the leap character. */ if (syncchar != ':') pp->leap = LEAP_NOTINSYNC; else if (leapchar == '+') pp->leap = LEAP_ADDSECOND; else if (leapchar == '-') pp->leap = LEAP_DELSECOND; else pp->leap = LEAP_NOWARNING; /* * Process the new sample in the median filter and determine the * timecode timestamp. */ if (!refclock_process(pp)) { refclock_report(peer, CEVNT_BADTIME); } } /* * ulink_poll - called by the transmit procedure */ static void ulink_poll( int unit, struct peer *peer ) { struct refclockproc *pp; char pollchar; pp = peer->procptr; pollchar = 'T'; if (pp->sloppyclockflag & CLK_FLAG1) { if (write(pp->io.fd, &pollchar, 1) != 1) refclock_report(peer, CEVNT_FAULT); else pp->polls++; } else pp->polls++; if (peer->burst > 0) return; if (pp->coderecv == pp->codeproc) { refclock_report(peer, CEVNT_TIMEOUT); return; } pp->lastref = pp->lastrec; refclock_receive(peer); record_clock_stats(&peer->srcadr, pp->a_lastcode); peer->burst = NSTAGE; } #else int refclock_ulink_bs; #endif /* REFCLOCK */
rhuitl/uClinux
user/ntp/ntpd/refclock_ulink.c
C
gpl-2.0
17,372
/* * Driver O/S-independent utility routines * * $Copyright Open Broadcom Corporation$ * $Id: bcmutils.c 496061 2014-08-11 06:14:48Z $ */ #include <bcm_cfg.h> #include <typedefs.h> #include <bcmdefs.h> #include <stdarg.h> #ifdef BCMDRIVER #include <osl.h> #include <bcmutils.h> #else /* !BCMDRIVER */ #include <stdio.h> #include <string.h> #include <bcmutils.h> #if defined(BCMEXTSUP) #include <bcm_osl.h> #endif #ifndef ASSERT #define ASSERT(exp) #endif #endif /* !BCMDRIVER */ #include <bcmendian.h> #include <bcmdevs.h> #include <proto/ethernet.h> #include <proto/vlan.h> #include <proto/bcmip.h> #include <proto/802.1d.h> #include <proto/802.11.h> void *_bcmutils_dummy_fn = NULL; #ifdef CUSTOM_DSCP_TO_PRIO_MAPPING #define CUST_IPV4_TOS_PREC_MASK 0x3F #define DCSP_MAX_VALUE 64 /* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */ int dscp2priomap[DCSP_MAX_VALUE]= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */ 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0 }; #endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */ #ifdef BCMDRIVER /* copy a pkt buffer chain into a buffer */ uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; if (len < 0) len = 4096; /* "infinite" */ /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(PKTDATA(osh, p) + offset, buf, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* copy a buffer into a pkt buffer chain */ uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(buf, PKTDATA(osh, p) + offset, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* return total length of buffer chain */ uint BCMFASTPATH pkttotlen(osl_t *osh, void *p) { uint total; int len; total = 0; for (; p; p = PKTNEXT(osh, p)) { len = PKTLEN(osh, p); total += len; #ifdef BCMLFRAG if (BCMLFRAG_ENAB()) { if (PKTISFRAG(osh, p)) { total += PKTFRAGTOTLEN(osh, p); } } #endif } return (total); } /* return the last buffer of chained pkt */ void * pktlast(osl_t *osh, void *p) { for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) ; return (p); } /* count segments of a chained packet */ uint BCMFASTPATH pktsegcnt(osl_t *osh, void *p) { uint cnt; for (cnt = 0; p; p = PKTNEXT(osh, p)) { cnt++; #ifdef BCMLFRAG if (BCMLFRAG_ENAB()) { if (PKTISFRAG(osh, p)) { cnt += PKTFRAGTOTNUM(osh, p); } } #endif } return cnt; } /* count segments of a chained packet */ uint BCMFASTPATH pktsegcnt_war(osl_t *osh, void *p) { uint cnt; uint8 *pktdata; uint len, remain, align64; for (cnt = 0; p; p = PKTNEXT(osh, p)) { cnt++; len = PKTLEN(osh, p); if (len > 128) { pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */ /* Check for page boundary straddle (2048B) */ if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff)) cnt++; align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */ align64 = (64 - align64) & 0x3f; len -= align64; /* bytes from aligned 64B to end */ /* if aligned to 128B, check for MOD 128 between 1 to 4B */ remain = len % 128; if (remain > 0 && remain <= 4) cnt++; /* add extra seg */ } } return cnt; } uint8 * BCMFASTPATH pktdataoffset(osl_t *osh, void *p, uint offset) { uint total = pkttotlen(osh, p); uint pkt_off = 0, len = 0; uint8 *pdata = (uint8 *) PKTDATA(osh, p); if (offset > total) return NULL; for (; p; p = PKTNEXT(osh, p)) { pdata = (uint8 *) PKTDATA(osh, p); pkt_off = offset - len; len += PKTLEN(osh, p); if (len > offset) break; } return (uint8*) (pdata+pkt_off); } /* given a offset in pdata, find the pkt seg hdr */ void * pktoffset(osl_t *osh, void *p, uint offset) { uint total = pkttotlen(osh, p); uint len = 0; if (offset > total) return NULL; for (; p; p = PKTNEXT(osh, p)) { len += PKTLEN(osh, p); if (len > offset) break; } return p; } #endif /* BCMDRIVER */ #if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) const unsigned char bcm_ctype[] = { _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, _BCM_C, /* 8-15 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ }; ulong bcm_strtoul(const char *cp, char **endp, uint base) { ulong result, last_result = 0, value; bool minus; minus = FALSE; while (bcm_isspace(*cp)) cp++; if (cp[0] == '+') cp++; else if (cp[0] == '-') { minus = TRUE; cp++; } if (base == 0) { if (cp[0] == '0') { if ((cp[1] == 'x') || (cp[1] == 'X')) { base = 16; cp = &cp[2]; } else { base = 8; cp = &cp[1]; } } else base = 10; } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { cp = &cp[2]; } result = 0; while (bcm_isxdigit(*cp) && (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { result = result*base + value; /* Detected overflow */ if (result < last_result && !minus) return (ulong)-1; last_result = result; cp++; } if (minus) result = (ulong)(-(long)result); if (endp) *endp = DISCARD_QUAL(cp, char); return (result); } int bcm_atoi(const char *s) { return (int)bcm_strtoul(s, NULL, 10); } /* return pointer to location of substring 'needle' in 'haystack' */ char * bcmstrstr(const char *haystack, const char *needle) { int len, nlen; int i; if ((haystack == NULL) || (needle == NULL)) return DISCARD_QUAL(haystack, char); nlen = (int)strlen(needle); len = (int)strlen(haystack) - nlen + 1; for (i = 0; i < len; i++) if (memcmp(needle, &haystack[i], nlen) == 0) return DISCARD_QUAL(&haystack[i], char); return (NULL); } char * bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len) { for (; s_len >= substr_len; s++, s_len--) if (strncmp(s, substr, substr_len) == 0) return DISCARD_QUAL(s, char); return NULL; } char * bcmstrcat(char *dest, const char *src) { char *p; p = dest + strlen(dest); while ((*p++ = *src++) != '\0') ; return (dest); } char * bcmstrncat(char *dest, const char *src, uint size) { char *endp; char *p; p = dest + strlen(dest); endp = p + size; while (p != endp && (*p++ = *src++) != '\0') ; return (dest); } /**************************************************************************** * Function: bcmstrtok * * Purpose: * Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), * but allows strToken() to be used by different strings or callers at the same * time. Each call modifies '*string' by substituting a NULL character for the * first delimiter that is encountered, and updates 'string' to point to the char * after the delimiter. Leading delimiters are skipped. * * Parameters: * string (mod) Ptr to string ptr, updated by token. * delimiters (in) Set of delimiter characters. * tokdelim (out) Character that delimits the returned token. (May * be set to NULL if token delimiter is not required). * * Returns: Pointer to the next token found. NULL when no more tokens are found. ***************************************************************************** */ char * bcmstrtok(char **string, const char *delimiters, char *tokdelim) { unsigned char *str; unsigned long map[8]; int count; char *nextoken; if (tokdelim != NULL) { /* Prime the token delimiter */ *tokdelim = '\0'; } /* Clear control map */ for (count = 0; count < 8; count++) { map[count] = 0; } /* Set bits in delimiter table */ do { map[*delimiters >> 5] |= (1 << (*delimiters & 31)); } while (*delimiters++); str = (unsigned char*)*string; /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets str to point to the terminal * null (*str == '\0') */ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { str++; } nextoken = (char*)str; /* Find the end of the token. If it is not the end of the string, * put a null there. */ for (; *str; str++) { if (map[*str >> 5] & (1 << (*str & 31))) { if (tokdelim != NULL) { *tokdelim = *str; } *str++ = '\0'; break; } } *string = (char*)str; /* Determine if a token has been found. */ if (nextoken == (char *) str) { return NULL; } else { return nextoken; } } #define xToLower(C) \ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) /**************************************************************************** * Function: bcmstricmp * * Purpose: Compare to strings case insensitively. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstricmp(const char *s1, const char *s2) { char dc, sc; while (*s2 && *s1) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; } if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /**************************************************************************** * Function: bcmstrnicmp * * Purpose: Compare to strings case insensitively, upto a max of 'cnt' * characters. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * cnt (in) Max characters to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstrnicmp(const char* s1, const char* s2, int cnt) { char dc, sc; while (*s2 && *s1 && cnt) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; cnt--; } if (!cnt) return 0; if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /* parse a xx:xx:xx:xx:xx:xx format ethernet address */ int bcm_ether_atoe(const char *p, struct ether_addr *ea) { int i = 0; char *ep; for (;;) { ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); p = ep; if (!*p++ || i == 6) break; } return (i == 6); } int bcm_atoipv4(const char *p, struct ipv4_addr *ip) { int i = 0; char *c; for (;;) { ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0); if (*c++ != '.' || i == IPV4_ADDR_LEN) break; p = c; } return (i == IPV4_ADDR_LEN); } #endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ #if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) /* registry routine buffer preparation utility functions: * parameter order is like strncpy, but returns count * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) */ ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) { ulong copyct = 1; ushort i; if (abuflen == 0) return 0; /* wbuflen is in bytes */ wbuflen /= sizeof(ushort); for (i = 0; i < wbuflen; ++i) { if (--abuflen == 0) break; *abuf++ = (char) *wbuf++; ++copyct; } *abuf = '\0'; return copyct; } #endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ char * bcm_ether_ntoa(const struct ether_addr *ea, char *buf) { static const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; const uint8 *octet = ea->octet; char *p = buf; int i; for (i = 0; i < 6; i++, octet++) { *p++ = hex[(*octet >> 4) & 0xf]; *p++ = hex[*octet & 0xf]; *p++ = ':'; } *(p-1) = '\0'; return (buf); } char * bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) { snprintf(buf, 16, "%d.%d.%d.%d", ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); return (buf); } char * bcm_ipv6_ntoa(void *ipv6, char *buf) { /* Implementing RFC 5952 Sections 4 + 5 */ /* Not thoroughly tested */ uint16 tmp[8]; uint16 *a = &tmp[0]; char *p = buf; int i, i_max = -1, cnt = 0, cnt_max = 1; uint8 *a4 = NULL; memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN); for (i = 0; i < IPV6_ADDR_LEN/2; i++) { if (a[i]) { if (cnt > cnt_max) { cnt_max = cnt; i_max = i - cnt; } cnt = 0; } else cnt++; } if (cnt > cnt_max) { cnt_max = cnt; i_max = i - cnt; } if (i_max == 0 && /* IPv4-translated: ::ffff:0:a.b.c.d */ ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) || /* IPv4-mapped: ::ffff:a.b.c.d */ (cnt_max == 5 && a[5] == 0xffff))) a4 = (uint8*) (a + 6); for (i = 0; i < IPV6_ADDR_LEN/2; i++) { if ((uint8*) (a + i) == a4) { snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]); break; } else if (i == i_max) { *p++ = ':'; i += cnt_max - 1; p[0] = ':'; p[1] = '\0'; } else { if (i) *p++ = ':'; p += snprintf(p, 8, "%x", ntoh16(a[i])); } } return buf; } #ifdef BCMDRIVER void bcm_mdelay(uint ms) { uint i; for (i = 0; i < ms; i++) { OSL_DELAY(1000); } } #if defined(DHD_DEBUG) /* pretty hex print a pkt buffer chain */ void prpkt(const char *msg, osl_t *osh, void *p0) { void *p; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); for (p = p0; p; p = PKTNEXT(osh, p)) prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); } #endif /* Takes an Ethernet frame and sets out-of-bound PKTPRIO. * Also updates the inplace vlan tag if requested. * For debugging, it returns an indication of what it did. */ uint BCMFASTPATH pktsetprio(void *pkt, bool update_vtag) { struct ether_header *eh; struct ethervlan_header *evh; uint8 *pktdata; int priority = 0; int rc = 0; pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); eh = (struct ether_header *) pktdata; if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { uint16 vlan_tag; int vlan_prio, dscp_prio = 0; evh = (struct ethervlan_header *)eh; vlan_tag = ntoh16(evh->vlan_tag); vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); uint8 tos_tc = IP_TOS46(ip_body); dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); } /* DSCP priority gets precedence over 802.1P (vlan tag) */ if (dscp_prio != 0) { priority = dscp_prio; rc |= PKTPRIO_VDSCP; } else { priority = vlan_prio; rc |= PKTPRIO_VLAN; } /* * If the DSCP priority is not the same as the VLAN priority, * then overwrite the priority field in the vlan tag, with the * DSCP priority value. This is required for Linux APs because * the VLAN driver on Linux, overwrites the skb->priority field * with the priority value in the vlan tag */ if (update_vtag && (priority != vlan_prio)) { vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; evh->vlan_tag = hton16(vlan_tag); rc |= PKTPRIO_UPD; } } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ether_header); uint8 tos_tc = IP_TOS46(ip_body); uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; switch (dscp) { case DSCP_EF: priority = PRIO_8021D_VO; break; case DSCP_AF31: case DSCP_AF32: case DSCP_AF33: priority = PRIO_8021D_CL; break; case DSCP_AF21: case DSCP_AF22: case DSCP_AF23: case DSCP_AF11: case DSCP_AF12: case DSCP_AF13: priority = PRIO_8021D_EE; break; default: #ifndef CUSTOM_DSCP_TO_PRIO_MAPPING priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); #else priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT) & CUST_IPV4_TOS_PREC_MASK)]; #endif break; } rc |= PKTPRIO_DSCP; } ASSERT(priority >= 0 && priority <= MAXPRIO); PKTSETPRIO(pkt, priority); return (rc | priority); } /* Returns TRUE and DSCP if IP header found, FALSE otherwise. */ bool BCMFASTPATH pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) { struct ether_header *eh; struct ethervlan_header *evh; uint8 *ip_body; bool rc = FALSE; /* minimum length is ether header and IP header */ if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN) return FALSE; eh = (struct ether_header *) pktdata; if (eh->ether_type == HTON16(ETHER_TYPE_IP)) { ip_body = pktdata + sizeof(struct ether_header); *dscp = IP_DSCP46(ip_body); rc = TRUE; } else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { evh = (struct ethervlan_header *)eh; /* minimum length is ethervlan header and IP header */ if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && evh->ether_type == HTON16(ETHER_TYPE_IP)) { ip_body = pktdata + sizeof(struct ethervlan_header); *dscp = IP_DSCP46(ip_body); rc = TRUE; } } return rc; } /* The 0.5KB string table is not removed by compiler even though it's unused */ static char bcm_undeferrstr[32]; static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; /* Convert the error codes into related error strings */ const char * bcmerrorstr(int bcmerror) { /* check if someone added a bcmerror code but forgot to add errorstring */ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); if (bcmerror > 0 || bcmerror < BCME_LAST) { snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); return bcm_undeferrstr; } ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); return bcmerrorstrtable[-bcmerror]; } /* iovar table lookup */ /* could mandate sorted tables and do a binary search */ const bcm_iovar_t* bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) { const bcm_iovar_t *vi; const char *lookup_name; /* skip any ':' delimited option prefixes */ lookup_name = strrchr(name, ':'); if (lookup_name != NULL) lookup_name++; else lookup_name = name; ASSERT(table != NULL); for (vi = table; vi->name; vi++) { if (!strcmp(vi->name, lookup_name)) return vi; } /* ran to end of table */ return NULL; /* var name not found */ } int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) { int bcmerror = 0; /* length check on io buf */ switch (vi->type) { case IOVT_BOOL: case IOVT_INT8: case IOVT_INT16: case IOVT_INT32: case IOVT_UINT8: case IOVT_UINT16: case IOVT_UINT32: /* all integers are int32 sized args at the ioctl interface */ if (len < (int)sizeof(int)) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_BUFFER: /* buffer must meet minimum length requirement */ if (len < vi->minlen) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_VOID: if (!set) { /* Cannot return nil... */ bcmerror = BCME_UNSUPPORTED; } else if (len) { /* Set is an action w/o parameters */ bcmerror = BCME_BUFTOOLONG; } break; default: /* unknown type for length check in iovar info */ ASSERT(0); bcmerror = BCME_UNSUPPORTED; } return bcmerror; } #endif /* BCMDRIVER */ uint8 * bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) { uint8 *new_dst = dst; bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst; /* dst buffer should always be valid */ ASSERT(dst); /* data len must be within valid range */ ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)); /* source data buffer pointer should be valid, unless datalen is 0 * meaning no data with this TLV */ ASSERT((data != NULL) || (datalen == 0)); /* only do work if the inputs are valid * - must have a dst to write to AND * - datalen must be within range AND * - the source data pointer must be non-NULL if datalen is non-zero * (this last condition detects datalen > 0 with a NULL data pointer) */ if ((dst != NULL) && ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) && ((data != NULL) || (datalen == 0))) { /* write type, len fields */ dst_tlv->id = (uint8)type; dst_tlv->len = (uint8)datalen; /* if data is present, copy to the output buffer and update * pointer to output buffer */ if (datalen > 0) { memcpy(dst_tlv->data, data, datalen); } /* update the output destination poitner to point past * the TLV written */ new_dst = dst + BCM_TLV_HDR_SIZE + datalen; } return (new_dst); } uint8 * bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen) { uint8 *new_dst = dst; if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) { /* if len + tlv hdr len is more than destlen, don't do anything * just return the buffer untouched */ if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { new_dst = bcm_write_tlv(type, data, datalen, dst); } } return (new_dst); } uint8 * bcm_copy_tlv(const void *src, uint8 *dst) { uint8 *new_dst = dst; const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; uint totlen; ASSERT(dst && src); if (dst && src) { totlen = BCM_TLV_HDR_SIZE + src_tlv->len; memcpy(dst, src_tlv, totlen); new_dst = dst + totlen; } return (new_dst); } uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen) { uint8 *new_dst = dst; const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; ASSERT(src); if (src) { if (bcm_valid_tlv(src_tlv, dst_maxlen)) { new_dst = bcm_copy_tlv(src, dst); } } return (new_dst); } #if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) /******************************************************************************* * crc8 * * Computes a crc8 over the input data using the polynomial: * * x^8 + x^7 +x^6 + x^4 + x^2 + 1 * * The caller provides the initial value (either CRC8_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC8_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const uint8 crc8_table[256] = { 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F }; #define CRC_INNER_LOOP(n, c, x) \ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] uint8 hndcrc8( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint8 crc /* either CRC8_INIT_VALUE or previous return value */ ) { /* hard code the crc loop instead of using CRC_INNER_LOOP macro * to avoid the undefined and unnecessary (uint8 >> 8) operation. */ while (nbytes-- > 0) crc = crc8_table[(crc ^ *pdata++) & 0xff]; return crc; } /******************************************************************************* * crc16 * * Computes a crc16 over the input data using the polynomial: * * x^16 + x^12 +x^5 + 1 * * The caller provides the initial value (either CRC16_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC16_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const uint16 crc16_table[256] = { 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 }; uint16 hndcrc16( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint16 crc /* either CRC16_INIT_VALUE or previous return value */ ) { while (nbytes-- > 0) CRC_INNER_LOOP(16, crc, *pdata++); return crc; } static const uint32 crc32_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; /* * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if * accumulating over multiple pieces. */ uint32 hndcrc32(uint8 *pdata, uint nbytes, uint32 crc) { uint8 *pend; pend = pdata + nbytes; while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); return crc; } #ifdef notdef #define CLEN 1499 /* CRC Length */ #define CBUFSIZ (CLEN+4) #define CNBUFS 5 /* # of bufs */ void testcrc32(void) { uint j, k, l; uint8 *buf; uint len[CNBUFS]; uint32 crcr; uint32 crc32tv[CNBUFS] = {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); /* step through all possible alignments */ for (l = 0; l <= 4; l++) { for (j = 0; j < CNBUFS; j++) { len[j] = CLEN; for (k = 0; k < len[j]; k++) *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; } for (j = 0; j < CNBUFS; j++) { crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); ASSERT(crcr == crc32tv[j]); } } MFREE(buf, CBUFSIZ*CNBUFS); return; } #endif /* notdef */ /* * Advance from the current 1-byte tag/1-byte length/variable-length value * triple, to the next, returning a pointer to the next. * If the current or next TLV is invalid (does not fit in given buffer length), * NULL is returned. * *buflen is not modified if the TLV elt parameter is invalid, or is decremented * by the TLV parameter's length if it is valid. */ bcm_tlv_t * bcm_next_tlv(bcm_tlv_t *elt, int *buflen) { int len; /* validate current elt */ if (!bcm_valid_tlv(elt, *buflen)) { return NULL; } /* advance to next elt */ len = elt->len; elt = (bcm_tlv_t*)(elt->data + len); *buflen -= (TLV_HDR_LEN + len); /* validate next elt */ if (!bcm_valid_tlv(elt, *buflen)) { return NULL; } return elt; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ bcm_tlv_t * bcm_parse_tlvs(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= TLV_HDR_LEN) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { return (elt); } elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); totlen -= (len + TLV_HDR_LEN); } return NULL; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag * return NULL if not found or length field < min_varlen */ bcm_tlv_t * bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen) { bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key); if (ret == NULL || ret->len < min_bodylen) { return NULL; } return ret; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag. Stop parsing when we see an element whose ID is greater * than the target key. */ bcm_tlv_t * bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= TLV_HDR_LEN) { uint id = elt->id; int len = elt->len; /* Punt if we start seeing IDs > than target key */ if (id > key) { return (NULL); } /* validate remaining totlen */ if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { return (elt); } elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); totlen -= (len + TLV_HDR_LEN); } return NULL; } #endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ #if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ defined(DHD_DEBUG) int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len) { int i, slen = 0; uint32 bit, mask; const char *name; mask = bd->mask; if (len < 2 || !buf) return 0; buf[0] = '\0'; for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) { bit = bd->bitfield[i].bit; if ((flags & mask) == bit) { if (len > (int)strlen(name)) { slen = strlen(name); strncpy(buf, name, slen+1); } break; } } return slen; } int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) { int i; char* p = buf; char hexstr[16]; int slen = 0, nlen = 0; uint32 bit; const char* name; if (len < 2 || !buf) return 0; buf[0] = '\0'; for (i = 0; flags != 0; i++) { bit = bd[i].bit; name = bd[i].name; if (bit == 0 && flags != 0) { /* print any unnamed bits */ snprintf(hexstr, 16, "0x%X", flags); name = hexstr; flags = 0; /* exit loop */ } else if ((flags & bit) == 0) continue; flags &= ~bit; nlen = strlen(name); slen += nlen; /* count btwn flag space */ if (flags != 0) slen += 1; /* need NULL char as well */ if (len <= slen) break; /* copy NULL char but don't count it */ strncpy(p, name, nlen + 1); p += nlen; /* copy btwn flag space and NULL char */ if (flags != 0) p += snprintf(p, 2, " "); } /* indicate the str was too short */ if (flags != 0) { if (len < 2) p -= 2 - len; /* overwrite last char */ p += snprintf(p, 2, ">"); } return (int)(p - buf); } #endif /* print bytes formatted as hex to a string. return the resulting string length */ int bcm_format_hex(char *str, const void *bytes, int len) { int i; char *p = str; const uint8 *src = (const uint8*)bytes; for (i = 0; i < len; i++) { p += snprintf(p, 3, "%02X", *src); src++; } return (int)(p - str); } /* pretty hex print a contiguous buffer */ void prhex(const char *msg, uchar *buf, uint nbytes) { char line[128], *p; int len = sizeof(line); int nchar; uint i; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); p = line; for (i = 0; i < nbytes; i++) { if (i % 16 == 0) { nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ p += nchar; len -= nchar; } if (len > 0) { nchar = snprintf(p, len, "%02x ", buf[i]); p += nchar; len -= nchar; } if (i % 16 == 15) { printf("%s\n", line); /* flush line */ p = line; len = sizeof(line); } } /* flush last partial line */ if (p != line) printf("%s\n", line); } static const char *crypto_algo_names[] = { "NONE", "WEP1", "TKIP", "WEP128", "AES_CCM", "AES_OCB_MSDU", "AES_OCB_MPDU", #ifdef BCMCCX "CKIP", "CKIP_MMH", "WEP_MMH", "NALG", #else "NALG", "UNDEF", "UNDEF", "UNDEF", #endif /* BCMCCX */ "WAPI", "PMK", "BIP", "AES_GCM", "AES_CCM256", "AES_GCM256", "BIP_CMAC256", "BIP_GMAC", "BIP_GMAC256", "UNDEF" }; const char * bcm_crypto_algo_name(uint algo) { return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; } char * bcm_chipname(uint chipid, char *buf, uint len) { const char *fmt; fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; snprintf(buf, len, fmt, chipid); return buf; } /* Produce a human-readable string for boardrev */ char * bcm_brev_str(uint32 brev, char *buf) { if (brev < 0x100) snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); else snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); return (buf); } #define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ /* dump large strings to console */ void printbig(char *buf) { uint len, max_len; char c; len = (uint)strlen(buf); max_len = BUFSIZE_TODUMP_ATONCE; while (len > max_len) { c = buf[max_len]; buf[max_len] = '\0'; printf("%s", buf); buf[max_len] = c; buf += max_len; len -= max_len; } /* print the remaining string */ printf("%s\n", buf); return; } /* routine to dump fields in a fileddesc structure */ uint bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, char *buf, uint32 bufsize) { uint filled_len; int len; struct fielddesc *cur_ptr; filled_len = 0; cur_ptr = fielddesc_array; while (bufsize > 1) { if (cur_ptr->nameandfmt == NULL) break; len = snprintf(buf, bufsize, cur_ptr->nameandfmt, read_rtn(arg0, arg1, cur_ptr->offset)); /* check for snprintf overflow or error */ if (len < 0 || (uint32)len >= bufsize) len = bufsize - 1; buf += len; bufsize -= len; filled_len += len; cur_ptr++; } return filled_len; } uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) { uint len; len = (uint)strlen(name) + 1; if ((len + datalen) > buflen) return 0; strncpy(buf, name, buflen); /* append data onto the end of the name string */ memcpy(&buf[len], data, datalen); len += datalen; return len; } /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a uint16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; uint16 bcm_qdbm_to_mw(uint8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) { /* clamp to max uint16 mW value */ return 0xFFFF; } /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return ((nqdBm_to_mW_map[idx] + factor/2) / factor); } uint8 bcm_mw_to_qdbm(uint16 mw) { uint8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - nqdBm_to_mW_map[qdbm])/2; if (mw_uint < boundary) break; } qdbm += (uint8)offset; return (qdbm); } uint bcm_bitcount(uint8 *bitmap, uint length) { uint bitcount = 0, i; uint8 tmp; for (i = 0; i < length; i++) { tmp = bitmap[i]; while (tmp) { bitcount++; tmp &= (tmp - 1); } } return bitcount; } #ifdef BCMDRIVER /* Initialization of bcmstrbuf structure */ void bcm_binit(struct bcmstrbuf *b, char *buf, uint size) { b->origsize = b->size = size; b->origbuf = b->buf = buf; } /* Buffer sprintf wrapper to guard against buffer overflow */ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vsnprintf(b->buf, b->size, fmt, ap); /* Non Ansi C99 compliant returns -1, * Ansi compliant return r >= b->size, * bcmstdlib returns 0, handle all */ /* r == 0 is also the case when strlen(fmt) is zero. * typically the case when "" is passed as argument. */ if ((r == -1) || (r >= (int)b->size)) { b->size = 0; } else { b->size -= r; b->buf += r; } va_end(ap); return r; } void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len) { int i; if (msg != NULL && msg[0] != '\0') bcm_bprintf(b, "%s", msg); for (i = 0; i < len; i ++) bcm_bprintf(b, "%02X", buf[i]); if (newline) bcm_bprintf(b, "\n"); } void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) { int i; for (i = 0; i < num_bytes; i++) { num[i] += amount; if (num[i] >= amount) break; amount = 1; } } int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) { int i; for (i = nbytes - 1; i >= 0; i--) { if (arg1[i] != arg2[i]) return (arg1[i] - arg2[i]); } return 0; } void bcm_print_bytes(const char *name, const uchar *data, int len) { int i; int per_line = 0; printf("%s: %d \n", name ? name : "", len); for (i = 0; i < len; i++) { printf("%02x ", *data++); per_line++; if (per_line == 16) { per_line = 0; printf("\n"); } } printf("\n"); } /* Look for vendor-specific IE with specified OUI and optional type */ bcm_tlv_t * bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len) { bcm_tlv_t *ie; uint8 ie_len; ie = (bcm_tlv_t*)tlvs; /* make sure we are looking at a valid IE */ if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) { return NULL; } /* Walk through the IEs looking for an OUI match */ do { ie_len = ie->len; if ((ie->id == DOT11_MNG_PROPR_ID) && (ie_len >= (DOT11_OUI_LEN + type_len)) && !bcmp(ie->data, voui, DOT11_OUI_LEN)) { /* compare optional type */ if (type_len == 0 || !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) { return (ie); /* a match */ } } } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL); return NULL; } #if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) #define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) { uint i, c; char *p = buf; char *endp = buf + SSID_FMT_BUF_LEN; if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; for (i = 0; i < ssid_len; i++) { c = (uint)ssid[i]; if (c == '\\') { *p++ = '\\'; *p++ = '\\'; } else if (bcm_isprint((uchar)c)) { *p++ = (char)c; } else { p += snprintf(p, (endp - p), "\\x%02X", c); } } *p = '\0'; ASSERT(p < endp); return (int)(p - buf); } #endif #endif /* BCMDRIVER */ /* * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL. * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0 * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. */ unsigned int process_nvram_vars(char *varbuf, unsigned int len) { char *dp; bool findNewline; int column; unsigned int buf_len, n; unsigned int pad = 0; dp = varbuf; findNewline = FALSE; column = 0; // terence 20130914: print out NVRAM version if (varbuf[0] == '#') { printf("NVRAM version: "); for (n=1; n<len; n++) { if (varbuf[n] == '\n') break; printf("%c", varbuf[n]); } printf("\n"); } for (n = 0; n < len; n++) { if (varbuf[n] == '\r') continue; if (findNewline && varbuf[n] != '\n') continue; findNewline = FALSE; if (varbuf[n] == '#') { findNewline = TRUE; continue; } if (varbuf[n] == '\n') { if (column == 0) continue; *dp++ = 0; column = 0; continue; } *dp++ = varbuf[n]; column++; } buf_len = (unsigned int)(dp - varbuf); if (buf_len % 4) { pad = 4 - buf_len % 4; if (pad && (buf_len + pad <= len)) { buf_len += pad; } } while (dp < varbuf + n) *dp++ = 0; return buf_len; } /* calculate a * b + c */ void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c) { #define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;} uint32 r1, r0; uint32 a1, a0, b1, b0, t, cc = 0; a1 = a >> 16; a0 = a & 0xffff; b1 = b >> 16; b0 = b & 0xffff; r0 = a0 * b0; FORMALIZE(r0); t = (a1 * b0) << 16; FORMALIZE(t); r0 += t; FORMALIZE(r0); t = (a0 * b1) << 16; FORMALIZE(t); r0 += t; FORMALIZE(r0); FORMALIZE(c); r0 += c; FORMALIZE(r0); r0 |= (cc % 2) ? 0x80000000 : 0; r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2); *r_high = r1; *r_low = r0; } /* calculate a / b */ void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b) { uint32 a1 = a_high, a0 = a_low, r0 = 0; if (b < 2) return; while (a1 != 0) { r0 += (0xffffffff / b) * a1; bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0); } r0 += a0 / b; *r = r0; } #ifndef setbit /* As in the header file */ #ifdef BCMUTILS_BIT_MACROS_USE_FUNCS /* Set bit in byte array. */ void setbit(void *array, uint bit) { ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY); } /* Clear bit in byte array. */ void clrbit(void *array, uint bit) { ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY)); } /* Test if bit is set in byte array. */ bool isset(const void *array, uint bit) { return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))); } /* Test if bit is clear in byte array. */ bool isclr(const void *array, uint bit) { return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0); } #endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */ #endif /* setbit */ void set_bitrange(void *array, uint start, uint end, uint maxbit) { uint startbyte = start/NBBY; uint endbyte = end/NBBY; uint i, startbytelastbit, endbytestartbit; if (end >= start) { if (endbyte - startbyte > 1) { startbytelastbit = (startbyte+1)*NBBY - 1; endbytestartbit = endbyte*NBBY; for (i = startbyte+1; i < endbyte; i++) ((uint8 *)array)[i] = 0xFF; for (i = start; i <= startbytelastbit; i++) setbit(array, i); for (i = endbytestartbit; i <= end; i++) setbit(array, i); } else { for (i = start; i <= end; i++) setbit(array, i); } } else { set_bitrange(array, start, maxbit, maxbit); set_bitrange(array, 0, end, maxbit); } } void bcm_bitprint32(const uint32 u32) { int i; for (i = NBITS(uint32) - 1; i >= 0; i--) { isbitset(u32, i) ? printf("1") : printf("0"); if ((i % NBBY) == 0) printf(" "); } printf("\n"); } /* calculate checksum for ip header, tcp / udp header / data */ uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) { while (len > 1) { sum += (buf[0] << 8) | buf[1]; buf += 2; len -= 2; } if (len > 0) { sum += (*buf) << 8; } while (sum >> 16) { sum = (sum & 0xffff) + (sum >> 16); } return ((uint16)~sum); } #ifdef BCMDRIVER /* * Hierarchical Multiword bitmap based small id allocator. * * Multilevel hierarchy bitmap. (maximum 2 levels) * First hierarchy uses a multiword bitmap to identify 32bit words in the * second hierarchy that have at least a single bit set. Each bit in a word of * the second hierarchy represents a unique ID that may be allocated. * * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed. * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs. * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non * non-zero bitmap word carrying at least one free ID. * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations. * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID * * Design Notes: * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many * bits are computed each time on allocation and deallocation, requiring 4 * array indexed access and 3 arithmetic operations. When not defined, a runtime * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed. * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation. * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may * be used by defining BCM_MWBMAP_USE_CNTSETBITS. * * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array * size is fixed. No intention to support larger than 4K indice allocation. ID * allocators for ranges smaller than 4K will have a wastage of only 12Bytes * with savings in not having to use an indirect access, had it been dynamically * allocated. */ #define BCM_MWBMAP_ITEMS_MAX (4 * 1024) /* May increase to 16K */ #define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) #define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) #define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD) #define BCM_MWBMAP_SHIFT_OP (5) #define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1)) #define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP) #define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP) /* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */ #define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl)) #define BCM_MWBMAP_HDL(ptr) ((void *)(ptr)) #if defined(BCM_MWBMAP_DEBUG) #define BCM_MWBMAP_AUDIT(mwb) \ do { \ ASSERT((mwb != NULL) && \ (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \ bcm_mwbmap_audit(mwb); \ } while (0) #define MWBMAP_ASSERT(exp) ASSERT(exp) #define MWBMAP_DBG(x) printf x #else /* !BCM_MWBMAP_DEBUG */ #define BCM_MWBMAP_AUDIT(mwb) do {} while (0) #define MWBMAP_ASSERT(exp) do {} while (0) #define MWBMAP_DBG(x) #endif /* !BCM_MWBMAP_DEBUG */ typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ uint16 wmaps; /* Total number of words in free wd bitmap */ uint16 imaps; /* Total number of words in free id bitmap */ int16 ifree; /* Count of free indices. Used only in audits */ uint16 total; /* Total indices managed by multiword bitmap */ void * magic; /* Audit handle parameter from user */ uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */ #if !defined(BCM_MWBMAP_USE_CNTSETBITS) int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */ #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ uint32 id_bitmap[0]; /* Second level bitmap */ } bcm_mwbmap_t; /* Incarnate a hierarchical multiword bitmap based small index allocator. */ struct bcm_mwbmap * bcm_mwbmap_init(osl_t *osh, uint32 items_max) { struct bcm_mwbmap * mwbmap_p; uint32 wordix, size, words, extra; /* Implementation Constraint: Uses 32bit word bitmap */ MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U); MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U); MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX)); MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U); ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX); /* Determine the number of words needed in the multiword bitmap */ extra = BCM_MWBMAP_MODOP(items_max); words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U); /* Allocate runtime state of multiword bitmap */ /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */ size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words); mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size); if (mwbmap_p == (bcm_mwbmap_t *)NULL) { ASSERT(0); goto error1; } memset(mwbmap_p, 0, size); /* Initialize runtime multiword bitmap state */ mwbmap_p->imaps = (uint16)words; mwbmap_p->ifree = (int16)items_max; mwbmap_p->total = (uint16)items_max; /* Setup magic, for use in audit of handle */ mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p); /* Setup the second level bitmap of free indices */ /* Mark all indices as available */ for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) { mwbmap_p->id_bitmap[wordix] = (uint32)(~0U); #if !defined(BCM_MWBMAP_USE_CNTSETBITS) mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD; #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ } /* Ensure that extra indices are tagged as un-available */ if (extra) { /* fixup the free ids in last bitmap and wd_count */ uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1]; *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ #if !defined(BCM_MWBMAP_USE_CNTSETBITS) mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */ #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ } /* Setup the first level bitmap hierarchy */ extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps); words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U); mwbmap_p->wmaps = (uint16)words; for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++) mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U); if (extra) { uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1]; *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ } return mwbmap_p; error1: return BCM_MWBMAP_INVALID_HDL; } /* Release resources used by multiword bitmap based small index allocator. */ void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl) { bcm_mwbmap_t * mwbmap_p; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap) + (sizeof(uint32) * mwbmap_p->imaps)); return; } /* Allocate a unique small index using a multiword bitmap index allocator. */ uint32 BCMFASTPATH bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) { bcm_mwbmap_t * mwbmap_p; uint32 wordix, bitmap; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); /* Start with the first hierarchy */ for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) { bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */ if (bitmap != 0U) { uint32 count, bitix, *bitmap_p; bitmap_p = &mwbmap_p->wd_bitmap[wordix]; /* clear all except trailing 1 */ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == bcm_count_leading_zeros(bitmap)); bitix = (BCM_MWBMAP_BITS_WORD - 1) - bcm_count_leading_zeros(bitmap); /* use asm clz */ wordix = BCM_MWBMAP_MULOP(wordix) + bitix; /* Clear bit if wd count is 0, without conditional branch */ #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ mwbmap_p->wd_count[wordix]--; count = mwbmap_p->wd_count[wordix]; MWBMAP_ASSERT(count == (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ MWBMAP_ASSERT(count >= 0); /* clear wd_bitmap bit if id_map count is 0 */ bitmap = (count == 0) << bitix; MWBMAP_DBG(( "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); *bitmap_p ^= bitmap; /* Use bitix in the second hierarchy */ bitmap_p = &mwbmap_p->id_bitmap[wordix]; bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */ MWBMAP_ASSERT(bitmap != 0U); /* clear all except trailing 1 */ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == bcm_count_leading_zeros(bitmap)); bitix = BCM_MWBMAP_MULOP(wordix) + (BCM_MWBMAP_BITS_WORD - 1) - bcm_count_leading_zeros(bitmap); /* use asm clz */ mwbmap_p->ifree--; /* decrement system wide free count */ MWBMAP_ASSERT(mwbmap_p->ifree >= 0); MWBMAP_DBG(( "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d", bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, mwbmap_p->ifree)); *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */ return bitix; } } ASSERT(mwbmap_p->ifree == 0); return BCM_MWBMAP_INVALID_IDX; } /* Force an index at a specified position to be in use */ void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) { bcm_mwbmap_t * mwbmap_p; uint32 count, wordix, bitmap, *bitmap_p; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); ASSERT(bitix < mwbmap_p->total); /* Start with second hierarchy */ wordix = BCM_MWBMAP_DIVOP(bitix); bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix)); bitmap_p = &mwbmap_p->id_bitmap[wordix]; ASSERT((*bitmap_p & bitmap) == bitmap); mwbmap_p->ifree--; /* update free count */ ASSERT(mwbmap_p->ifree >= 0); MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d", bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, mwbmap_p->ifree)); *bitmap_p ^= bitmap; /* mark as in use */ /* Update first hierarchy */ bitix = wordix; wordix = BCM_MWBMAP_DIVOP(bitix); bitmap_p = &mwbmap_p->wd_bitmap[wordix]; #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ mwbmap_p->wd_count[bitix]--; count = mwbmap_p->wd_count[bitix]; MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ MWBMAP_ASSERT(count >= 0); bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); *bitmap_p ^= bitmap; /* mark as in use */ return; } /* Free a previously allocated index back into the multiword bitmap allocator */ void BCMFASTPATH bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) { bcm_mwbmap_t * mwbmap_p; uint32 wordix, bitmap, *bitmap_p; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); ASSERT(bitix < mwbmap_p->total); /* Start with second level hierarchy */ wordix = BCM_MWBMAP_DIVOP(bitix); bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); bitmap_p = &mwbmap_p->id_bitmap[wordix]; ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */ mwbmap_p->ifree++; /* update free count */ ASSERT(mwbmap_p->ifree <= mwbmap_p->total); MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d", bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, mwbmap_p->ifree)); *bitmap_p |= bitmap; /* mark as available */ /* Now update first level hierarchy */ bitix = wordix; wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */ bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); bitmap_p = &mwbmap_p->wd_bitmap[wordix]; #if !defined(BCM_MWBMAP_USE_CNTSETBITS) mwbmap_p->wd_count[bitix]++; #endif #if defined(BCM_MWBMAP_DEBUG) { uint32 count; #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ count = mwbmap_p->wd_count[bitix]; MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD); MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d", bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count)); } #endif /* BCM_MWBMAP_DEBUG */ *bitmap_p |= bitmap; return; } /* Fetch the toal number of free indices in the multiword bitmap allocator */ uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) { bcm_mwbmap_t * mwbmap_p; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); ASSERT(mwbmap_p->ifree >= 0); return mwbmap_p->ifree; } /* Determine whether an index is inuse or free */ bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) { bcm_mwbmap_t * mwbmap_p; uint32 wordix, bitmap; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); ASSERT(bitix < mwbmap_p->total); wordix = BCM_MWBMAP_DIVOP(bitix); bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U); } /* Debug dump a multiword bitmap allocator */ void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) { uint32 ix, count; bcm_mwbmap_t * mwbmap_p; BCM_MWBMAP_AUDIT(mwbmap_hdl); mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p, mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total); for (ix = 0U; ix < mwbmap_p->wmaps; ix++) { printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]); bcm_bitprint32(mwbmap_p->wd_bitmap[ix]); printf("\n"); } for (ix = 0U; ix < mwbmap_p->imaps; ix++) { #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ count = mwbmap_p->wd_count[ix]; MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); bcm_bitprint32(mwbmap_p->id_bitmap[ix]); printf("\n"); } return; } /* Audit a hierarchical multiword bitmap */ void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) { bcm_mwbmap_t * mwbmap_p; uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p; mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) { bitmap_p = &mwbmap_p->wd_bitmap[wordix]; for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) { if ((*bitmap_p) & (1 << bitix)) { idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix; #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ count = mwbmap_p->wd_count[idmap_ix]; ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ ASSERT(count != 0U); free_cnt += count; } } } ASSERT((int)free_cnt == mwbmap_p->ifree); } /* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */ /* Simple 16bit Id allocator using a stack implementation. */ typedef struct id16_map { uint16 total; /* total number of ids managed by allocator */ uint16 start; /* start value of 16bit ids to be managed */ uint32 failures; /* count of failures */ void *dbg; /* debug placeholder */ int stack_idx; /* index into stack of available ids */ uint16 stack[0]; /* stack of 16 bit ids */ } id16_map_t; #define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \ (sizeof(uint16) * (items))) #if defined(BCM_DBG) /* Uncomment BCM_DBG_ID16 to debug double free */ /* #define BCM_DBG_ID16 */ typedef struct id16_map_dbg { uint16 total; bool avail[0]; } id16_map_dbg_t; #define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \ (sizeof(bool) * (items))) #define ID16_MAP_MSG(x) print x #else #define ID16_MAP_MSG(x) #endif /* BCM_DBG */ void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */ id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) { uint16 idx, val16; id16_map_t * id16_map; ASSERT(total_ids > 0); ASSERT((start_val16 + total_ids) < ID16_INVALID); id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); if (id16_map == NULL) { return NULL; } id16_map->total = total_ids; id16_map->start = start_val16; id16_map->failures = 0; id16_map->dbg = NULL; /* Populate stack with 16bit id values, commencing with start_val16 */ id16_map->stack_idx = 0; val16 = start_val16; for (idx = 0; idx < total_ids; idx++, val16++) { id16_map->stack_idx = idx; id16_map->stack[id16_map->stack_idx] = val16; } #if defined(BCM_DBG) && defined(BCM_DBG_ID16) id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); if (id16_map->dbg) { id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; id16_map_dbg->total = total_ids; for (idx = 0; idx < total_ids; idx++) { id16_map_dbg->avail[idx] = TRUE; } } #endif /* BCM_DBG && BCM_DBG_ID16 */ return (void *)id16_map; } void * /* Destruct an id16 allocator instance */ id16_map_fini(osl_t *osh, void * id16_map_hndl) { uint16 total_ids; id16_map_t * id16_map; if (id16_map_hndl == NULL) return NULL; id16_map = (id16_map_t *)id16_map_hndl; total_ids = id16_map->total; ASSERT(total_ids > 0); #if defined(BCM_DBG) && defined(BCM_DBG_ID16) if (id16_map->dbg) { MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids)); id16_map->dbg = NULL; } #endif /* BCM_DBG && BCM_DBG_ID16 */ id16_map->total = 0; MFREE(osh, id16_map, ID16_MAP_SZ(total_ids)); return NULL; } void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) { uint16 idx, val16; id16_map_t * id16_map; ASSERT(total_ids > 0); ASSERT((start_val16 + total_ids) < ID16_INVALID); id16_map = (id16_map_t *)id16_map_hndl; if (id16_map == NULL) { return; } id16_map->total = total_ids; id16_map->start = start_val16; id16_map->failures = 0; /* Populate stack with 16bit id values, commencing with start_val16 */ id16_map->stack_idx = 0; val16 = start_val16; for (idx = 0; idx < total_ids; idx++, val16++) { id16_map->stack_idx = idx; id16_map->stack[id16_map->stack_idx] = val16; } #if defined(BCM_DBG) && defined(BCM_DBG_ID16) if (id16_map->dbg) { id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; id16_map_dbg->total = total_ids; for (idx = 0; idx < total_ids; idx++) { id16_map_dbg->avail[idx] = TRUE; } } #endif /* BCM_DBG && BCM_DBG_ID16 */ } uint16 BCMFASTPATH /* Allocate a unique 16bit id */ id16_map_alloc(void * id16_map_hndl) { uint16 val16; id16_map_t * id16_map; ASSERT(id16_map_hndl != NULL); id16_map = (id16_map_t *)id16_map_hndl; ASSERT(id16_map->total > 0); if (id16_map->stack_idx < 0) { id16_map->failures++; return ID16_INVALID; } val16 = id16_map->stack[id16_map->stack_idx]; id16_map->stack_idx--; #if defined(BCM_DBG) && defined(BCM_DBG_ID16) ASSERT(val16 < (id16_map->start + id16_map->total)); if (id16_map->dbg) { /* Validate val16 */ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE); id16_map_dbg->avail[val16 - id16_map->start] = FALSE; } #endif /* BCM_DBG && BCM_DBG_ID16 */ return val16; } void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */ id16_map_free(void * id16_map_hndl, uint16 val16) { id16_map_t * id16_map; ASSERT(id16_map_hndl != NULL); id16_map = (id16_map_t *)id16_map_hndl; #if defined(BCM_DBG) && defined(BCM_DBG_ID16) ASSERT(val16 < (id16_map->start + id16_map->total)); if (id16_map->dbg) { /* Validate val16 */ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE); id16_map_dbg->avail[val16 - id16_map->start] = TRUE; } #endif /* BCM_DBG && BCM_DBG_ID16 */ id16_map->stack_idx++; id16_map->stack[id16_map->stack_idx] = val16; } uint32 /* Returns number of failures to allocate an unique id16 */ id16_map_failures(void * id16_map_hndl) { ASSERT(id16_map_hndl != NULL); return ((id16_map_t *)id16_map_hndl)->failures; } bool id16_map_audit(void * id16_map_hndl) { int idx; int insane = 0; id16_map_t * id16_map; ASSERT(id16_map_hndl != NULL); id16_map = (id16_map_t *)id16_map_hndl; ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total)); for (idx = 0; idx <= id16_map->stack_idx; idx++) { ASSERT(id16_map->stack[idx] >= id16_map->start); ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); #if defined(BCM_DBG) && defined(BCM_DBG_ID16) if (id16_map->dbg) { uint16 val16 = id16_map->stack[idx]; if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { insane |= 1; ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", id16_map_hndl, idx, val16)); } } #endif /* BCM_DBG && BCM_DBG_ID16 */ } #if defined(BCM_DBG) && defined(BCM_DBG_ID16) if (id16_map->dbg) { uint16 avail = 0; /* Audit available ids counts */ for (idx = 0; idx < id16_map_dbg->total; idx++) { if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE) avail++; } if (avail && (avail != (id16_map->stack_idx + 1))) { insane |= 1; ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", id16_map_hndl, avail, id16_map->stack_idx)); } } #endif /* BCM_DBG && BCM_DBG_ID16 */ return (!!insane); } /* END: Simple id16 allocator */ #endif /* BCMDRIVER */ /* calculate a >> b; and returns only lower 32 bits */ void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b) { uint32 a1 = a_high, a0 = a_low, r0 = 0; if (b == 0) { r0 = a_low; *r = r0; return; } if (b < 32) { a0 = a0 >> b; a1 = a1 & ((1 << b) - 1); a1 = a1 << (32 - b); r0 = a0 | a1; *r = r0; return; } else { r0 = a1 >> (b - 32); *r = r0; return; } } /* calculate a + b where a is a 64 bit number and b is a 32 bit number */ void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset) { uint32 r1_lo = *r_lo; (*r_lo) += offset; if (*r_lo < r1_lo) (*r_hi) ++; } /* calculate a - b where a is a 64 bit number and b is a 32 bit number */ void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset) { uint32 r1_lo = *r_lo; (*r_lo) -= offset; if (*r_lo > r1_lo) (*r_hi) --; } #ifdef DEBUG_COUNTER #if (OSL_SYSUPTIME_SUPPORT == TRUE) void counter_printlog(counter_tbl_t *ctr_tbl) { uint32 now; if (!ctr_tbl->enabled) return; now = OSL_SYSUPTIME(); if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) { uint8 i = 0; printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print); for (i = 0; i < ctr_tbl->needed_cnt; i++) { printf(" %u", ctr_tbl->cnt[i]); } printf("\n"); ctr_tbl->prev_log_print = now; bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint)); } } #else /* OSL_SYSUPTIME is not supported so no way to get time */ #define counter_printlog(a) do {} while (0) #endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ #endif /* DEBUG_COUNTER */ #ifdef BCMDRIVER void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) { uint32 mem_size; mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); if (pool) MFREE(osh, pool, mem_size); } dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) { uint32 mem_size, i; dll_pool_t * dll_pool_p; dll_t * elem_p; ASSERT(elem_size > sizeof(dll_t)); mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) { printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", elems_max, elem_size); ASSERT(0); return dll_pool_p; } bzero(dll_pool_p, mem_size); dll_init(&dll_pool_p->free_list); dll_pool_p->elems_max = elems_max; dll_pool_p->elem_size = elem_size; elem_p = dll_pool_p->elements; for (i = 0; i < elems_max; i++) { dll_append(&dll_pool_p->free_list, elem_p); elem_p = (dll_t *)((uintptr)elem_p + elem_size); } dll_pool_p->free_count = elems_max; return dll_pool_p; } void * dll_pool_alloc(dll_pool_t * dll_pool_p) { dll_t * elem_p; if (dll_pool_p->free_count == 0) { ASSERT(dll_empty(&dll_pool_p->free_list)); return NULL; } elem_p = dll_head_p(&dll_pool_p->free_list); dll_delete(elem_p); dll_pool_p->free_count -= 1; return (void *)elem_p; } void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p) { dll_t * node_p = (dll_t *)elem_p; dll_prepend(&dll_pool_p->free_list, node_p); dll_pool_p->free_count += 1; } void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) { dll_t * node_p = (dll_t *)elem_p; dll_append(&dll_pool_p->free_list, node_p); dll_pool_p->free_count += 1; } #endif /* BCMDRIVER */
s20121035/rk3288_android5.1_repo
kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmutils.c
C
gpl-3.0
77,626
/* lzw.c -- compress files in LZW format. * This is a dummy version avoiding patent problems. */ #include <config.h> #include "tailor.h" #include "gzip.h" #include "lzw.h" static int msg_done = 0; /* Compress in to out with lzw method. */ int lzw(in, out) int in, out; { if (msg_done) return ERROR; msg_done = 1; fprintf(stderr,"output in compress .Z format not supported\n"); if (in != out) { /* avoid warnings on unused variables */ exit_code = ERROR; } return ERROR; }
infoburp/gzip
lzw.c
C
gpl-3.0
513