repo_name
string
path
string
copies
string
size
string
content
string
license
string
advx9600/kernel-4.4-RuiEr
security/selinux/ss/hashtab.c
12560
2990
/* * Implementation of the hash table type. * * Author : Stephen Smalley, <sds@epoch.ncsc.mil> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include "hashtab.h" struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key), int (*keycmp)(struct hashtab *h, const void *key1, const void *key2), u32 size) { struct hashtab *p; u32 i; p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return p; p->size = size; p->nel = 0; p->hash_value = hash_value; p->keycmp = keycmp; p->htable = kmalloc(sizeof(*(p->htable)) * size, GFP_KERNEL); if (p->htable == NULL) { kfree(p); return NULL; } for (i = 0; i < size; i++) p->htable[i] = NULL; return p; } int hashtab_insert(struct hashtab *h, void *key, void *datum) { u32 hvalue; struct hashtab_node *prev, *cur, *newnode; if (!h || h->nel == HASHTAB_MAX_NODES) return -EINVAL; hvalue = h->hash_value(h, key); prev = NULL; cur = h->htable[hvalue]; while (cur && h->keycmp(h, key, cur->key) > 0) { prev = cur; cur = cur->next; } if (cur && (h->keycmp(h, key, cur->key) == 0)) return -EEXIST; newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); if (newnode == NULL) return -ENOMEM; newnode->key = key; newnode->datum = datum; if (prev) { newnode->next = prev->next; prev->next = newnode; } else { newnode->next = h->htable[hvalue]; h->htable[hvalue] = newnode; } h->nel++; return 0; } void *hashtab_search(struct hashtab *h, const void *key) { u32 hvalue; struct hashtab_node *cur; if (!h) return NULL; hvalue = h->hash_value(h, key); cur = h->htable[hvalue]; while (cur && h->keycmp(h, key, cur->key) > 0) cur = cur->next; if (cur == NULL || (h->keycmp(h, key, cur->key) != 0)) return NULL; return cur->datum; } void hashtab_destroy(struct hashtab *h) { u32 i; struct hashtab_node *cur, *temp; if (!h) return; for (i = 0; i < h->size; i++) { cur = h->htable[i]; while (cur) { temp = cur; cur = cur->next; kfree(temp); } h->htable[i] = NULL; } kfree(h->htable); h->htable = NULL; kfree(h); } int hashtab_map(struct hashtab *h, int (*apply)(void *k, void *d, void *args), void *args) { u32 i; int ret; struct hashtab_node *cur; if (!h) return 0; for (i = 0; i < h->size; i++) { cur = h->htable[i]; while (cur) { ret = apply(cur->key, cur->datum, args); if (ret) return ret; cur = cur->next; } } return 0; } void hashtab_stat(struct hashtab *h, struct hashtab_info *info) { u32 i, chain_len, slots_used, max_chain_len; struct hashtab_node *cur; slots_used = 0; max_chain_len = 0; for (slots_used = max_chain_len = i = 0; i < h->size; i++) { cur = h->htable[i]; if (cur) { slots_used++; chain_len = 0; while (cur) { chain_len++; cur = cur->next; } if (chain_len > max_chain_len) max_chain_len = chain_len; } } info->slots_used = slots_used; info->max_chain_len = max_chain_len; }
gpl-2.0
pierdebeer/AudaxPlus_M_Kernel
drivers/net/wireless/bcmdhd4359/dhd_pcie_linux.c
17
40449
/* * Linux DHD Bus Module for PCIE * * Copyright (C) 1999-2015, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * * <<Broadcom-WL-IPTag/Open:>> * * $Id: dhd_pcie_linux.c 606676 2015-12-16 09:49:30Z $ */ /* include files */ #include <typedefs.h> #include <bcmutils.h> #include <bcmdevs.h> #include <siutils.h> #include <hndsoc.h> #include <hndpmu.h> #include <sbchipc.h> #if defined(DHD_DEBUG) #include <hnd_armtrap.h> #include <hnd_cons.h> #endif /* defined(DHD_DEBUG) */ #include <dngl_stats.h> #include <pcie_core.h> #include <dhd.h> #include <dhd_bus.h> #include <dhd_proto.h> #include <dhd_dbg.h> #include <dhdioctl.h> #include <bcmmsgbuf.h> #include <pcicfg.h> #include <dhd_pcie.h> #include <dhd_linux.h> #ifdef CONFIG_ARCH_MSM #ifdef CONFIG_PCI_MSM #include <linux/msm_pcie.h> #else #include <mach/msm_pcie.h> #endif /* CONFIG_PCI_MSM */ #endif /* CONFIG_ARCH_MSM */ #define PCI_CFG_RETRY 10 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ #define OSL_PKTTAG_CLEAR(p) \ do { \ struct sk_buff *s = (struct sk_buff *)(p); \ ASSERT(OSL_PKTTAG_SZ == 32); \ *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ } while (0) /* user defined data structures */ typedef struct dhd_pc_res { uint32 bar0_size; void* bar0_addr; uint32 bar1_size; void* bar1_addr; } pci_config_res, *pPci_config_res; typedef bool (*dhdpcie_cb_fn_t)(void *); typedef struct dhdpcie_info { dhd_bus_t *bus; osl_t *osh; struct pci_dev *dev; /* pci device handle */ volatile char *regs; /* pci device memory va */ volatile char *tcm; /* pci device memory va */ uint32 tcm_size; /* pci device memory size */ struct pcos_info *pcos_info; uint16 last_intrstatus; /* to cache intrstatus */ int irq; char pciname[32]; struct pci_saved_state* default_state; struct pci_saved_state* state; #ifdef BCMPCIE_OOB_HOST_WAKE void *os_cxt; /* Pointer to per-OS private data */ #endif /* BCMPCIE_OOB_HOST_WAKE */ } dhdpcie_info_t; struct pcos_info { dhdpcie_info_t *pc; spinlock_t lock; wait_queue_head_t intr_wait_queue; struct timer_list tuning_timer; int tuning_timer_exp; atomic_t timer_enab; struct tasklet_struct tuning_tasklet; }; #ifdef BCMPCIE_OOB_HOST_WAKE typedef struct dhdpcie_os_info { int oob_irq_num; /* valid when hardware or software oob in use */ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ bool oob_irq_registered; bool oob_irq_enabled; bool oob_irq_wake_enabled; spinlock_t oob_irq_spinlock; void *dev; /* handle to the underlying device */ } dhdpcie_os_info_t; #endif /* BCMPCIE_OOB_HOST_WAKE */ /* function declarations */ static int __devinit dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void __devexit dhdpcie_pci_remove(struct pci_dev *pdev); static int dhdpcie_init(struct pci_dev *pdev); static irqreturn_t dhdpcie_isr(int irq, void *arg); /* OS Routine functions for PCI suspend/resume */ static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); static int dhdpcie_resume_host_dev(dhd_bus_t *bus); static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); static int dhdpcie_resume_dev(struct pci_dev *dev); static int dhdpcie_suspend_dev(struct pci_dev *dev); #ifdef DHD_PCIE_RUNTIMEPM static int dhdpcie_pm_suspend(struct device *dev); static int dhdpcie_pm_prepare(struct device *dev); static int dhdpcie_pm_resume(struct device *dev); static void dhdpcie_pm_complete(struct device *dev); #else static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); static int dhdpcie_pci_resume(struct pci_dev *dev); #endif /* DHD_PCIE_RUNTIMEPM */ static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { { vendor: 0x14e4, device: PCI_ANY_ID, subvendor: PCI_ANY_ID, subdevice: PCI_ANY_ID, class: PCI_CLASS_NETWORK_OTHER << 8, class_mask: 0xffff00, driver_data: 0, }, { 0, } }; MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); /* Power Management Hooks */ #ifdef DHD_PCIE_RUNTIMEPM static const struct dev_pm_ops dhd_pcie_pm_ops = { .prepare = dhdpcie_pm_prepare, .suspend = dhdpcie_pm_suspend, .resume = dhdpcie_pm_resume, .complete = dhdpcie_pm_complete, }; #endif /* DHD_PCIE_RUNTIMEPM */ static struct pci_driver dhdpcie_driver = { node: {}, name: "pcieh", id_table: dhdpcie_pci_devid, probe: dhdpcie_pci_probe, remove: dhdpcie_pci_remove, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) save_state: NULL, #endif #ifdef DHD_PCIE_RUNTIMEPM .driver.pm = &dhd_pcie_pm_ops, #else suspend: dhdpcie_pci_suspend, resume: dhdpcie_pci_resume, #endif /* DHD_PCIE_RUNTIMEPM */ }; int dhdpcie_init_succeeded = FALSE; #ifdef DHD_PCIE_RUNTIMEPM static int dhdpcie_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); return dhdpcie_set_suspend_resume(pdev, TRUE); } static int dhdpcie_pm_prepare(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; if (pch) { bus = pch->bus; DHD_DISABLE_RUNTIME_PM(bus->dhd); } return 0; } static int dhdpcie_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); return dhdpcie_set_suspend_resume(pdev, FALSE); } static void dhdpcie_pm_complete(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; if (pch) { bus = pch->bus; DHD_ENABLE_RUNTIME_PM(bus->dhd); } return; } #else static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) { BCM_REFERENCE(state); return dhdpcie_set_suspend_resume(pdev, TRUE); } static int dhdpcie_pci_resume(struct pci_dev *pdev) { return dhdpcie_set_suspend_resume(pdev, FALSE); } #endif /* DHD_PCIE_RUNTIMEPM */ static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) { int ret = 0; dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; if (pch) { bus = pch->bus; } #ifdef DHD_PCIE_RUNTIMEPM if (bus && !bus->dhd->dongle_reset) { /* if wakelock is held during suspend, return failed */ if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { return -EBUSY; } mutex_lock(&bus->pm_lock); } #endif /* DHD_PCIE_RUNTIMEPM */ /* When firmware is not loaded do the PCI bus */ /* suspend/resume only */ if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && !bus->dhd->dongle_reset) { ret = dhdpcie_pci_suspend_resume(bus, state); #ifdef DHD_PCIE_RUNTIMEPM mutex_unlock(&bus->pm_lock); #endif /* DHD_PCIE_RUNTIMEPM */ return ret; } if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| (bus->dhd->busstate == DHD_BUS_DATA)) && (bus->suspended != state)) { ret = dhdpcie_bus_suspend(bus, state); } #ifdef DHD_PCIE_RUNTIMEPM if (bus && !bus->dhd->dongle_reset) { mutex_unlock(&bus->pm_lock); } #endif /* DHD_PCIE_RUNTIMEPM */ return ret; } static int dhdpcie_suspend_dev(struct pci_dev *dev) { int ret; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch = pci_get_drvdata(dev); dhd_bus_t *bus = pch->bus; if (bus->is_linkdown) { DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); return BCME_ERROR; } #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); pci_save_state(dev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) pch->state = pci_store_saved_state(dev); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_enable_wake(dev, PCI_D0, TRUE); if (pci_is_enabled(dev)) { pci_disable_device(dev); } ret = pci_set_power_state(dev, PCI_D3hot); if (ret) { DHD_ERROR(("%s: pci_set_power_state error %d\n", __FUNCTION__, ret)); } disable_irq(dev->irq); return ret; } static int dhdpcie_resume_dev(struct pci_dev *dev) { int err = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch = pci_get_drvdata(dev); pci_load_and_free_saved_state(dev, &pch->state); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); pci_restore_state(dev); err = pci_enable_device(dev); if (err) { printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); goto out; } pci_set_master(dev); err = pci_set_power_state(dev, PCI_D0); if (err) { printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); goto out; } out: enable_irq(dev->irq); return err; } static int dhdpcie_resume_host_dev(dhd_bus_t *bus) { int bcmerror = 0; #ifdef USE_EXYNOS_PCIE_RC_PMPATCH bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ #ifdef CONFIG_ARCH_MSM bcmerror = dhdpcie_start_host_pcieclock(bus); #endif /* CONFIG_ARCH_MSM */ if (bcmerror < 0) { DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", __FUNCTION__, bcmerror)); bus->is_linkdown = 1; #ifdef CONFIG_ARCH_MSM bus->no_cfg_restore = 1; #endif /* CONFIG_ARCH_MSM */ } return bcmerror; } static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) { int bcmerror = 0; #ifdef USE_EXYNOS_PCIE_RC_PMPATCH struct pci_dev *rc_pci_dev; rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); if (rc_pci_dev) { pci_save_state(rc_pci_dev); } exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ #ifdef CONFIG_ARCH_MSM bcmerror = dhdpcie_stop_host_pcieclock(bus); #endif /* CONFIG_ARCH_MSM */ return bcmerror; } int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) { int rc; struct pci_dev *dev = bus->dev; if (state) { if (bus->is_linkdown) { DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); return BCME_ERROR; } #ifndef BCMPCIE_OOB_HOST_WAKE dhdpcie_pme_active(bus->osh, state); #endif /* !BCMPCIE_OOB_HOST_WAKE */ rc = dhdpcie_suspend_dev(dev); if (!rc) { dhdpcie_suspend_host_dev(bus); } } else { dhdpcie_resume_host_dev(bus); rc = dhdpcie_resume_dev(dev); #ifndef BCMPCIE_OOB_HOST_WAKE dhdpcie_pme_active(bus->osh, state); #endif /* !BCMPCIE_OOB_HOST_WAKE */ if (bus->is_linkdown) { bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; dhd_os_send_hang_message(bus->dhd); } } return rc; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) static int dhdpcie_device_scan(struct device *dev, void *data) { struct pci_dev *pcidev; int *cnt = data; pcidev = container_of(dev, struct pci_dev, dev); if (pcidev->vendor != 0x14e4) return 0; DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); *cnt += 1; if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", pcidev->device, pcidev->driver->name)); return 0; } #endif /* LINUX_VERSION >= 2.6.0 */ int dhdpcie_bus_register(void) { int error = 0; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) if (!(error = pci_module_init(&dhdpcie_driver))) return 0; DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); #else if (!(error = pci_register_driver(&dhdpcie_driver))) { bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); if (!error) { DHD_ERROR(("No Broadcom PCI device enumerated!\n")); } else if (!dhdpcie_init_succeeded) { DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__)); } else { return 0; } pci_unregister_driver(&dhdpcie_driver); error = BCME_ERROR; } #endif /* LINUX_VERSION < 2.6.0 */ return error; } void dhdpcie_bus_unregister(void) { pci_unregister_driver(&dhdpcie_driver); } int __devinit dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); return -ENODEV; } printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" "(good PCI location)\n", pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); if (dhdpcie_init (pdev)) { DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); return -ENODEV; } #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND /* disable async suspend */ device_disable_async_suspend(&pdev->dev); #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); return 0; } int dhdpcie_detach(dhdpcie_info_t *pch) { if (pch) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) if (!dhd_download_fw_on_driverload) { pci_load_and_free_saved_state(pch->dev, &pch->default_state); } #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); } return 0; } void __devexit dhdpcie_pci_remove(struct pci_dev *pdev) { osl_t *osh = NULL; dhdpcie_info_t *pch = NULL; dhd_bus_t *bus = NULL; DHD_TRACE(("%s Enter\n", __FUNCTION__)); pch = pci_get_drvdata(pdev); bus = pch->bus; osh = pch->osh; #ifdef SUPPORT_LINKDOWN_RECOVERY if (bus) { #ifdef CONFIG_ARCH_MSM msm_pcie_deregister_event(&bus->pcie_event); #endif /* CONFIG_ARCH_MSM */ #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY #ifdef CONFIG_SOC_EXYNOS8890 exynos_pcie_deregister_event(&bus->pcie_event); #endif /* CONFIG_SOC_EXYNOS8890 */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ } #endif /* SUPPORT_LINKDOWN_RECOVERY */ dhdpcie_bus_release(bus); pci_disable_device(pdev); #ifdef BCMPCIE_OOB_HOST_WAKE /* pcie os info detach */ MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); #endif /* BCMPCIE_OOB_HOST_WAKE */ /* pcie info detach */ dhdpcie_detach(pch); /* osl detach */ osl_detach(osh); dhdpcie_init_succeeded = FALSE; DHD_TRACE(("%s Exit\n", __FUNCTION__)); return; } /* Free Linux irq */ int dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) { dhd_bus_t *bus = dhdpcie_info->bus; struct pci_dev *pdev = dhdpcie_info->bus->dev; if (!bus->irq_registered) { snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), "dhdpcie:%s", pci_name(pdev)); if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, dhdpcie_info->pciname, bus) < 0) { DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); return -1; } else { bus->irq_registered = TRUE; } } else { DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); } DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); return 0; /* SUCCESS */ } #ifdef CONFIG_PHYS_ADDR_T_64BIT #define PRINTF_RESOURCE "0x%016llx" #else #define PRINTF_RESOURCE "0x%08x" #endif /* Name: osl_pci_get_resource Parametrs: 1: struct pci_dev *pdev -- pci device structure 2: pci_res -- structure containing pci configuration space values Return value: int - Status (TRUE or FALSE) Description: Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure. */ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) { phys_addr_t bar0_addr, bar1_addr; ulong bar1_size; struct pci_dev *pdev = NULL; pdev = dhdpcie_info->dev; do { if (pci_enable_device(pdev)) { printf("%s: Cannot enable PCI device\n", __FUNCTION__); break; } pci_set_master(pdev); bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */ bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */ /* read Bar-1 mapped memory range */ bar1_size = pci_resource_len(pdev, 2); if ((bar1_size == 0) || (bar1_addr == 0)) { printf("%s: BAR1 Not enabled for this device size(%ld)," " addr(0x"PRINTF_RESOURCE")\n", __FUNCTION__, bar1_size, bar1_addr); goto err; } dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); break; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) if (!dhd_download_fw_on_driverload) { /* Backup PCIe configuration so as to use Wi-Fi on/off process * in case of built in driver */ pci_save_state(pdev); dhdpcie_info->default_state = pci_store_saved_state(pdev); if (dhdpcie_info->default_state == NULL) { DHD_ERROR(("%s pci_store_saved_state returns NULL\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); REG_UNMAP(dhdpcie_info->tcm); pci_disable_device(pdev); break; } } #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->regs, bar0_addr)); DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); return 0; /* SUCCESS */ } while (0); err: return -1; /* FAILURE */ } int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) { DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); do { /* define it here only!! */ if (dhdpcie_get_resource (dhdpcie_info)) { DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__)); break; } DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); return 0; /* SUCCESS */ } while (0); DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); return -1; /* FAILURE */ } #ifdef SUPPORT_LINKDOWN_RECOVERY #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ defined(CONFIG_SOC_EXYNOS8890)) void dhdpcie_linkdown_cb(struct_pcie_notify *noti) { struct pci_dev *pdev = (struct pci_dev *)noti->user; dhdpcie_info_t *pch = NULL; if (pdev) { pch = pci_get_drvdata(pdev); if (pch) { dhd_bus_t *bus = pch->bus; if (bus) { dhd_pub_t *dhd = bus->dhd; if (dhd) { DHD_ERROR(("%s: Event HANG send up " "due to PCIe linkdown\n", __FUNCTION__)); #ifdef CONFIG_ARCH_MSM bus->no_cfg_restore = 1; #endif /* CONFIG_ARCH_MSM */ bus->is_linkdown = 1; DHD_OS_WAKE_LOCK(dhd); dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; dhd_os_send_hang_message(dhd); } } } } } #endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ int dhdpcie_init(struct pci_dev *pdev) { osl_t *osh = NULL; dhd_bus_t *bus = NULL; dhdpcie_info_t *dhdpcie_info = NULL; wifi_adapter_info_t *adapter = NULL; #ifdef BCMPCIE_OOB_HOST_WAKE dhdpcie_os_info_t *dhdpcie_osinfo = NULL; #endif /* BCMPCIE_OOB_HOST_WAKE */ do { /* osl attach */ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__)); break; } /* initialize static buffer */ adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, PCI_SLOT(pdev->devfn)); if (adapter != NULL) DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name)); else DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); osl_static_mem_init(osh, adapter); /* Set ACP coherence flag */ if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) osl_flag_set(osh, OSL_ACP_COHERENCE); /* allocate linux spcific pcie structure here */ if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); break; } bzero(dhdpcie_info, sizeof(dhdpcie_info_t)); dhdpcie_info->osh = osh; dhdpcie_info->dev = pdev; #ifdef BCMPCIE_OOB_HOST_WAKE /* allocate OS speicific structure */ dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t)); if (dhdpcie_osinfo == NULL) { DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n", __FUNCTION__)); break; } bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo; /* Initialize host wake IRQ */ spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock); /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */ dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter, &dhdpcie_osinfo->oob_irq_flags); if (dhdpcie_osinfo->oob_irq_num < 0) { DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); } #endif /* BCMPCIE_OOB_HOST_WAKE */ /* Find the PCI resources, verify the */ /* vendor and device ID, map BAR regions and irq, update in structures */ if (dhdpcie_scan_resource(dhdpcie_info)) { DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__)); break; } /* Bus initialization */ bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); if (!bus) { DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); break; } dhdpcie_info->bus = bus; bus->is_linkdown = 0; #ifdef DONGLE_ENABLE_ISOLATION bus->dhd->dongle_isolation = TRUE; #endif /* DONGLE_ENABLE_ISOLATION */ #ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; bus->pcie_event.user = pdev; bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; bus->pcie_event.callback = dhdpcie_linkdown_cb; bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; msm_pcie_register_event(&bus->pcie_event); bus->no_cfg_restore = 0; #endif /* CONFIG_ARCH_MSM */ #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY #ifdef CONFIG_SOC_EXYNOS8890 bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; bus->pcie_event.user = pdev; bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; bus->pcie_event.callback = dhdpcie_linkdown_cb; exynos_pcie_register_event(&bus->pcie_event); #endif /* CONFIG_SOC_EXYNOS8890 */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (bus->intr) { /* Register interrupt callback, but mask it (not operational yet). */ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); dhdpcie_bus_intr_disable(bus); if (dhdpcie_request_irq(dhdpcie_info)) { DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); break; } } else { bus->pollrate = 1; DHD_INFO(("%s: PCIe interrupt function is NOT registered " "due to polling mode\n", __FUNCTION__)); } #if defined(BCM_REQUEST_FW) if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); } bus->nv_path = NULL; bus->fw_path = NULL; #endif /* BCM_REQUEST_FW */ /* set private data for pci_dev */ pci_set_drvdata(pdev, dhdpcie_info); if (dhd_download_fw_on_driverload) { if (dhd_bus_start(bus->dhd)) { DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); if (!allow_delay_fwdl) break; } } else { /* Set ramdom MAC address during boot time */ get_random_bytes(&bus->dhd->mac.octet[3], 3); /* Adding BRCM OUI */ bus->dhd->mac.octet[0] = 0; bus->dhd->mac.octet[1] = 0x90; bus->dhd->mac.octet[2] = 0x4C; } /* Attach to the OS network interface */ DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); if (dhd_register_if(bus->dhd, 0, TRUE)) { DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); break; } dhdpcie_init_succeeded = TRUE; DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); return 0; /* return SUCCESS */ } while (0); /* reverse the initialization in order in case of error */ if (bus) dhdpcie_bus_release(bus); #ifdef BCMPCIE_OOB_HOST_WAKE if (dhdpcie_osinfo) { MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); } #endif /* BCMPCIE_OOB_HOST_WAKE */ if (dhdpcie_info) dhdpcie_detach(dhdpcie_info); pci_disable_device(pdev); if (osh) osl_detach(osh); dhdpcie_init_succeeded = FALSE; DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); return -1; /* return FAILURE */ } /* Free Linux irq */ void dhdpcie_free_irq(dhd_bus_t *bus) { struct pci_dev *pdev = NULL; DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); if (!bus) { return; } if (bus->irq_registered) { pdev = bus->dev; free_irq(pdev->irq, bus); bus->irq_registered = FALSE; } else { DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); } DHD_TRACE(("%s: Exit\n", __FUNCTION__)); return; } /* Name: dhdpcie_isr Parametrs: 1: IN int irq -- interrupt vector 2: IN void *arg -- handle to private data structure Return value: Status (TRUE or FALSE) Description: Interrupt Service routine checks for the status register, disable interrupt and queue DPC if mail box interrupts are raised. */ irqreturn_t dhdpcie_isr(int irq, void *arg) { dhd_bus_t *bus = (dhd_bus_t*)arg; if (dhdpcie_bus_isr(bus)) return TRUE; else return FALSE; } int dhdpcie_start_host_pcieclock(dhd_bus_t *bus) { int ret = 0; #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY int options = 0; #endif /* SUPPORT_LINKDOWN_RECOVERY */ #endif /* CONFIG_ARCH_MSM */ DHD_TRACE(("%s Enter:\n", __FUNCTION__)); if (bus == NULL) { return BCME_ERROR; } if (bus->dev == NULL) { return BCME_ERROR; } #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY if (bus->no_cfg_restore) { options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; } ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, bus->dev, NULL, options); if (bus->no_cfg_restore && !ret) { msm_pcie_recover_config(bus->dev); bus->no_cfg_restore = 0; } #else ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, bus->dev, NULL, 0); #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (ret) { DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__)); goto done; } done: #endif /* CONFIG_ARCH_MSM */ DHD_TRACE(("%s Exit:\n", __FUNCTION__)); return ret; } int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) { int ret = 0; #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY int options = 0; #endif /* SUPPORT_LINKDOWN_RECOVERY */ #endif /* CONFIG_ARCH_MSM */ DHD_TRACE(("%s Enter:\n", __FUNCTION__)); if (bus == NULL) { return BCME_ERROR; } if (bus->dev == NULL) { return BCME_ERROR; } #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY if (bus->no_cfg_restore) { options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; } ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, bus->dev, NULL, options); #else ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, bus->dev, NULL, 0); #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (ret) { DHD_ERROR(("Failed to stop PCIe link\n")); goto done; } done: #endif /* CONFIG_ARCH_MSM */ DHD_TRACE(("%s Exit:\n", __FUNCTION__)); return ret; } int dhdpcie_disable_device(dhd_bus_t *bus) { DHD_TRACE(("%s Enter:\n", __FUNCTION__)); if (bus == NULL) { return BCME_ERROR; } if (bus->dev == NULL) { return BCME_ERROR; } pci_disable_device(bus->dev); return 0; } int dhdpcie_enable_device(dhd_bus_t *bus) { int ret = BCME_ERROR; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch; #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE(("%s Enter:\n", __FUNCTION__)); if (bus == NULL) { return BCME_ERROR; } if (bus->dev == NULL) { return BCME_ERROR; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) pch = pci_get_drvdata(bus->dev); if (pch == NULL) { return BCME_ERROR; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890) /* Updated with pci_load_and_free_saved_state to compatible * with kernel 3.14 or higher */ pci_load_and_free_saved_state(bus->dev, &pch->default_state); pch->default_state = pci_store_saved_state(bus->dev); #else pci_load_saved_state(bus->dev, pch->default_state); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */ pci_restore_state(bus->dev); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ ret = pci_enable_device(bus->dev); if (ret) { pci_disable_device(bus->dev); } else { pci_set_master(bus->dev); } return ret; } int dhdpcie_alloc_resource(dhd_bus_t *bus) { dhdpcie_info_t *dhdpcie_info; phys_addr_t bar0_addr, bar1_addr; ulong bar1_size; do { if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); break; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); break; } dhdpcie_info = pci_get_drvdata(bus->dev); if (dhdpcie_info == NULL) { DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); break; } bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */ bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */ /* read Bar-1 mapped memory range */ bar1_size = pci_resource_len(bus->dev, 2); if ((bar1_size == 0) || (bar1_addr == 0)) { printf("%s: BAR1 Not enabled for this device size(%ld)," " addr(0x"PRINTF_RESOURCE")\n", __FUNCTION__, bar1_size, bar1_addr); break; } dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); if (!dhdpcie_info->regs) { DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); break; } bus->regs = dhdpcie_info->regs; dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; if (!dhdpcie_info->tcm) { DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); bus->regs = NULL; break; } bus->tcm = dhdpcie_info->tcm; DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->regs, bar0_addr)); DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); return 0; } while (0); return BCME_ERROR; } void dhdpcie_free_resource(dhd_bus_t *bus) { dhdpcie_info_t *dhdpcie_info; if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); return; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); return; } dhdpcie_info = pci_get_drvdata(bus->dev); if (dhdpcie_info == NULL) { DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); return; } if (bus->regs) { REG_UNMAP(dhdpcie_info->regs); bus->regs = NULL; } if (bus->tcm) { REG_UNMAP(dhdpcie_info->tcm); bus->tcm = NULL; } } int dhdpcie_bus_request_irq(struct dhd_bus *bus) { dhdpcie_info_t *dhdpcie_info; int ret = 0; if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); return BCME_ERROR; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); return BCME_ERROR; } dhdpcie_info = pci_get_drvdata(bus->dev); if (dhdpcie_info == NULL) { DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); return BCME_ERROR; } if (bus->intr) { /* Register interrupt callback, but mask it (not operational yet). */ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); dhdpcie_bus_intr_disable(bus); ret = dhdpcie_request_irq(dhdpcie_info); if (ret) { DHD_ERROR(("%s: request_irq() failed, ret=%d\n", __FUNCTION__, ret)); return ret; } } return ret; } #ifdef BCMPCIE_OOB_HOST_WAKE void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) { unsigned long flags; dhdpcie_info_t *pch; dhdpcie_os_info_t *dhdpcie_osinfo; if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); return; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); return; } pch = pci_get_drvdata(bus->dev); if (pch == NULL) { DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); return; } dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags); if ((dhdpcie_osinfo->oob_irq_enabled != enable) && (dhdpcie_osinfo->oob_irq_num > 0)) { if (enable) { enable_irq(dhdpcie_osinfo->oob_irq_num); } else { disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); } dhdpcie_osinfo->oob_irq_enabled = enable; } spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags); } static irqreturn_t wlan_oob_irq(int irq, void *data) { dhd_bus_t *bus; DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); bus = (dhd_bus_t *)data; dhdpcie_oob_intr_set(bus, FALSE); #ifdef DHD_PCIE_RUNTIMEPM dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); #endif /* DHD_PCIE_RUNTIMPM */ if (bus->dhd->up && bus->suspended) { DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); } return IRQ_HANDLED; } int dhdpcie_oob_intr_register(dhd_bus_t *bus) { int err = 0; dhdpcie_info_t *pch; dhdpcie_os_info_t *dhdpcie_osinfo; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); return -EINVAL; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); return -EINVAL; } pch = pci_get_drvdata(bus->dev); if (pch == NULL) { DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); return -EINVAL; } dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; if (dhdpcie_osinfo->oob_irq_registered) { DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__)); return -EBUSY; } if (dhdpcie_osinfo->oob_irq_num > 0) { DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__, (int)dhdpcie_osinfo->oob_irq_num, (int)dhdpcie_osinfo->oob_irq_flags)); err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", bus); if (err) { DHD_ERROR(("%s: request_irq failed with %d\n", __FUNCTION__, err)); return err; } err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); if (!err) { dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; } dhdpcie_osinfo->oob_irq_enabled = TRUE; } dhdpcie_osinfo->oob_irq_registered = TRUE; return err; } void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) { int err = 0; dhdpcie_info_t *pch; dhdpcie_os_info_t *dhdpcie_osinfo; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (bus == NULL) { DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); return; } if (bus->dev == NULL) { DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); return; } pch = pci_get_drvdata(bus->dev); if (pch == NULL) { DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); return; } dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; if (!dhdpcie_osinfo->oob_irq_registered) { DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__)); return; } if (dhdpcie_osinfo->oob_irq_num > 0) { if (dhdpcie_osinfo->oob_irq_wake_enabled) { err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); if (!err) { dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; } } if (dhdpcie_osinfo->oob_irq_enabled) { disable_irq(dhdpcie_osinfo->oob_irq_num); dhdpcie_osinfo->oob_irq_enabled = FALSE; } free_irq(dhdpcie_osinfo->oob_irq_num, bus); } dhdpcie_osinfo->oob_irq_registered = FALSE; } #endif /* BCMPCIE_OOB_HOST_WAKE */ #ifdef DHD_PCIE_RUNTIMEPM bool dhd_runtimepm_state(dhd_pub_t *dhd) { dhd_bus_t *bus; unsigned long flags; bus = dhd->bus; DHD_GENERAL_LOCK(dhd, flags); if (bus->suspended == TRUE) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended)); return FALSE; } bus->idlecount++; DHD_TRACE(("%s : Enter \n", __FUNCTION__)); if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { bus->idlecount = 0; if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN && dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) { bus->bus_wake = 0; dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; bus->runtime_resume_done = FALSE; /* stop all interface network queue. */ dhd_bus_stop_queue(bus); DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); /* RPM suspend is failed, return FALSE then re-trying */ if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) { DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); DHD_GENERAL_LOCK(dhd, flags); dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; bus->runtime_resume_done = TRUE; /* It can make stuck NET TX Queue without below */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(dhd, flags); smp_wmb(); wake_up_interruptible(&bus->rpm_queue); return FALSE; } DHD_GENERAL_LOCK(dhd, flags); dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE; /* For making sure NET TX Queue active */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(dhd, flags); wait_event_interruptible(bus->rpm_queue, bus->bus_wake); DHD_GENERAL_LOCK(dhd, flags); dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE; dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; DHD_GENERAL_UNLOCK(dhd, flags); dhdpcie_set_suspend_resume(bus->dev, FALSE); DHD_GENERAL_LOCK(dhd, flags); dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; /* Inform the wake up context that Resume is over */ bus->runtime_resume_done = TRUE; /* For making sure NET TX Queue active */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(dhd, flags); smp_wmb(); wake_up_interruptible(&bus->rpm_queue); DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__)); return TRUE; } else { DHD_GENERAL_UNLOCK(dhd, flags); /* Since one of the contexts are busy (TX, IOVAR or RX) * we should not suspend */ DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", __FUNCTION__, dhd->dhd_bus_busy_state)); return FALSE; } } DHD_GENERAL_UNLOCK(dhd, flags); return FALSE; } /* dhd_runtimepm_state */ /* * dhd_runtime_bus_wake * TRUE - related with runtime pm context * FALSE - It isn't invloved in runtime pm context */ bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) { unsigned long flags; bus->idlecount = 0; DHD_TRACE(("%s : enter\n", __FUNCTION__)); if (bus->dhd->up == FALSE) { DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); return FALSE; } DHD_GENERAL_LOCK(bus->dhd, flags); if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) { /* Wake up RPM state thread if it is suspend in progress or suspended */ if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS || bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { bus->bus_wake = 1; DHD_GENERAL_UNLOCK(bus->dhd, flags); DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr)); smp_wmb(); wake_up_interruptible(&bus->rpm_queue); /* No need to wake up the RPM state thread */ } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) { DHD_GENERAL_UNLOCK(bus->dhd, flags); } /* If wait is TRUE, function with wait = TRUE will be wait in here */ if (wait) { wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done); } else { DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); } /* If it is called from RPM context, it returns TRUE */ return TRUE; } DHD_GENERAL_UNLOCK(bus->dhd, flags); return FALSE; } bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) { dhd_bus_t *bus = dhdp->bus; return dhd_runtime_bus_wake(bus, wait, func_addr); } bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) { dhd_bus_t *bus = dhdp->bus; return bus->runtime_resume_done; } #endif /* DHD_PCIE_RUNTIMEPM */
gpl-2.0
OtherCrashOverride/linux
block/elevator.c
17
25136
/* * Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * * 30042000 Jens Axboe <axboe@kernel.dk> : * * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/hash.h> #include <linux/uaccess.h> #include <trace/events/block.h> #include "blk.h" static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* * Merge hash stuff. */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) #define ELV_HASH_FN(sec) \ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) /* * Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); return 1; } /* * can we safely merge with this request? */ int elv_rq_merge_ok(struct request *rq, struct bio *bio) { if (!rq_mergeable(rq)) return 0; /* * Don't merge file system requests and discard requests */ if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) return 0; /* * Don't merge discard requests and secure discard requests */ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) return 0; /* * different data direction or already started, don't merge */ if (bio_data_dir(bio) != rq_data_dir(rq)) return 0; /* * must be same device and not a special request */ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) return 0; /* * only merge integrity protected bio into ditto rq */ if (bio_integrity(bio) != blk_integrity_rq(rq)) return 0; if (!elv_iosched_allow_merge(rq, bio)) return 0; return 1; } EXPORT_SYMBOL(elv_rq_merge_ok); int elv_try_merge(struct request *__rq, struct bio *bio) { int ret = ELEVATOR_NO_MERGE; /* * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) ret = ELEVATOR_BACK_MERGE; else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) ret = ELEVATOR_FRONT_MERGE; } return ret; } static struct elevator_type *elevator_find(const char *name) { struct elevator_type *e; list_for_each_entry(e, &elv_list, list) { if (!strcmp(e->elevator_name, name)) return e; } return NULL; } static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } static struct elevator_type *elevator_get(const char *name) { struct elevator_type *e; spin_lock(&elv_list_lock); e = elevator_find(name); if (!e) { spin_unlock(&elv_list_lock); request_module("%s-iosched", name); spin_lock(&elv_list_lock); e = elevator_find(name); } if (e && !try_module_get(e->elevator_owner)) e = NULL; spin_unlock(&elv_list_lock); return e; } static void *elevator_init_queue(struct request_queue *q, struct elevator_queue *eq) { return eq->ops->elevator_init_fn(q); } static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, void *data) { q->elevator = eq; eq->elevator_data = data; } static char chosen_elevator[16]; static int __init elevator_setup(char *str) { /* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); return 1; } __setup("elevator=", elevator_setup); static struct kobj_type elv_ktype; static struct elevator_queue *elevator_alloc(struct request_queue *q, struct elevator_type *e) { struct elevator_queue *eq; int i; eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); if (unlikely(!eq)) goto err; eq->ops = &e->ops; eq->elevator_type = e; kobject_init(&eq->kobj, &elv_ktype); mutex_init(&eq->sysfs_lock); eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL, q->node); if (!eq->hash) goto err; for (i = 0; i < ELV_HASH_ENTRIES; i++) INIT_HLIST_HEAD(&eq->hash[i]); return eq; err: kfree(eq); elevator_put(e); return NULL; } static void elevator_release(struct kobject *kobj) { struct elevator_queue *e; e = container_of(kobj, struct elevator_queue, kobj); elevator_put(e->elevator_type); kfree(e->hash); kfree(e); } int elevator_init(struct request_queue *q, char *name) { struct elevator_type *e = NULL; struct elevator_queue *eq; void *data; if (unlikely(q->elevator)) return 0; INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; if (name) { e = elevator_get(name); if (!e) return -EINVAL; } if (!e && *chosen_elevator) { e = elevator_get(chosen_elevator); if (!e) printk(KERN_ERR "I/O scheduler %s not found\n", chosen_elevator); } if (!e) { e = elevator_get(CONFIG_DEFAULT_IOSCHED); if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ "Using noop.\n"); e = elevator_get("noop"); } } eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; data = elevator_init_queue(q, eq); if (!data) { kobject_put(&eq->kobj); return -ENOMEM; } elevator_attach(q, eq, data); return 0; } EXPORT_SYMBOL(elevator_init); void elevator_exit(struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->ops->elevator_exit_fn) e->ops->elevator_exit_fn(e); e->ops = NULL; mutex_unlock(&e->sysfs_lock); kobject_put(&e->kobj); } EXPORT_SYMBOL(elevator_exit); static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); } static void elv_rqhash_del(struct request_queue *q, struct request *rq) { if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } static void elv_rqhash_add(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) { __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) { struct elevator_queue *e = q->elevator; struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } /* * RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ void elv_rb_add(struct rb_root *root, struct request *rq) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); if (blk_rq_pos(rq) < blk_rq_pos(__rq)) p = &(*p)->rb_left; else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) p = &(*p)->rb_right; } rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); } EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); if (sector < blk_rq_pos(rq)) n = n->rb_left; else if (sector > blk_rq_pos(rq)) n = n->rb_right; else return rq; } return NULL; } EXPORT_SYMBOL(elv_rb_find); /* * Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is sort instead into the dispatch queue. To be used by * specific elevators. */ void elv_dispatch_sort(struct request_queue *q, struct request *rq) { sector_t boundary; struct list_head *entry; int stop_flags; if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; boundary = q->end_sector; stop_flags = REQ_SOFTBARRIER | REQ_STARTED; list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); if ((rq->cmd_flags & REQ_DISCARD) != (pos->cmd_flags & REQ_DISCARD)) break; if (rq_data_dir(rq) != rq_data_dir(pos)) break; if (pos->cmd_flags & stop_flags) break; if (blk_rq_pos(rq) >= boundary) { if (blk_rq_pos(pos) < boundary) continue; } else { if (blk_rq_pos(pos) >= boundary) break; } if (blk_rq_pos(rq) >= blk_rq_pos(pos)) break; } list_add(&rq->queuelist, entry); } EXPORT_SYMBOL(elv_dispatch_sort); /* * Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. */ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } EXPORT_SYMBOL(elv_dispatch_add_tail); int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct elevator_queue *e = q->elevator; struct request *__rq; int ret; /* * Levels of merges: * nomerges: No merges at all attempted * noxmerges: Only simple one-hit cache try * merges: All merge tries attempted */ if (blk_queue_nomerges(q)) return ELEVATOR_NO_MERGE; /* * First try one-hit cache. */ if (q->last_merge) { ret = elv_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } if (blk_queue_noxmerges(q)) return ELEVATOR_NO_MERGE; /* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); return ELEVATOR_NO_MERGE; } /* * Attempt to do an insertion back merge. Only check for the case where * we can append 'rq' to an existing request, so we can throw 'rq' away * afterwards. * * Returns true if we merged, false otherwise */ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; bool ret; if (blk_queue_nomerges(q)) return false; /* * First try one-hit cache. */ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) return true; if (blk_queue_noxmerges(q)) return false; ret = false; /* * See if our hash lookup can find a potential backmerge. */ while (1) { __rq = elv_rqhash_find(q, blk_rq_pos(rq)); if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) break; /* The merged request could be merged with others, try again */ ret = true; rq = __rq; } return ret; } void elv_merged_request(struct request_queue *q, struct request *rq, int type) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_merged_fn) e->ops->elevator_merged_fn(q, rq, type); if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); q->last_merge = rq; } void elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) { struct elevator_queue *e = q->elevator; const int next_sorted = next->cmd_flags & REQ_SORTED; if (next_sorted && e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); elv_rqhash_reposition(q, rq); if (next_sorted) { elv_rqhash_del(q, next); q->nr_sorted--; } q->last_merge = rq; } void elv_bio_merged(struct request_queue *q, struct request *rq, struct bio *bio) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_bio_merged_fn) e->ops->elevator_bio_merged_fn(q, rq, bio); } void elv_requeue_request(struct request_queue *q, struct request *rq) { /* * it already went through dequeue, we need to decrement the * in_flight count again */ if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]--; if (rq->cmd_flags & REQ_SORTED) elv_deactivate_rq(q, rq); } rq->cmd_flags &= ~REQ_STARTED; __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); } void elv_drain_elevator(struct request_queue *q) { static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; if (q->nr_sorted == 0) return; if (printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this\n", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } /* * Call with queue lock held, interrupts disabled */ void elv_quiesce_start(struct request_queue *q) { if (!q->elevator) return; queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); /* * make sure we don't have any requests in flight */ elv_drain_elevator(q); while (q->rq.elvpriv) { __blk_run_queue(q); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); elv_drain_elevator(q); } } void elv_quiesce_end(struct request_queue *q) { queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); } void __elv_add_request(struct request_queue *q, struct request *rq, int where) { trace_block_rq_insert(q, rq); rq->q = q; if (rq->cmd_flags & REQ_SOFTBARRIER) { /* barriers are scheduling boundary, update end_sector */ if (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)) { q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } } else if (!(rq->cmd_flags & REQ_ELVPRIV) && (where == ELEVATOR_INSERT_SORT || where == ELEVATOR_INSERT_SORT_MERGE)) where = ELEVATOR_INSERT_BACK; switch (where) { case ELEVATOR_INSERT_REQUEUE: case ELEVATOR_INSERT_FRONT: rq->cmd_flags |= REQ_SOFTBARRIER; list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: rq->cmd_flags |= REQ_SOFTBARRIER; elv_drain_elevator(q); list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ __blk_run_queue(q); break; case ELEVATOR_INSERT_SORT_MERGE: /* * If we succeed in merging this request with one in the * queue already, we are done - rq has now been freed, * so no need to do anything further. */ if (elv_attempt_insert_merge(q, rq)) break; case ELEVATOR_INSERT_SORT: BUG_ON(rq->cmd_type != REQ_TYPE_FS && !(rq->cmd_flags & REQ_DISCARD)); rq->cmd_flags |= REQ_SORTED; q->nr_sorted++; if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } /* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ q->elevator->ops->elevator_add_req_fn(q, rq); break; case ELEVATOR_INSERT_FLUSH: rq->cmd_flags |= REQ_SOFTBARRIER; blk_insert_flush(rq); break; default: printk(KERN_ERR "%s: bad insertion point %d\n", __func__, where); BUG(); } } EXPORT_SYMBOL(__elv_add_request); void elv_add_request(struct request_queue *q, struct request *rq, int where) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __elv_add_request(q, rq, where); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(elv_add_request); struct request *elv_latter_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); return NULL; } struct request *elv_former_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); return NULL; } int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_set_req_fn) return e->ops->elevator_set_req_fn(q, rq, gfp_mask); rq->elevator_private[0] = NULL; return 0; } void elv_put_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_put_req_fn) e->ops->elevator_put_req_fn(rq); } int elv_may_queue(struct request_queue *q, int rw) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_may_queue_fn) return e->ops->elevator_may_queue_fn(q, rw); return ELV_MQUEUE_MAY; } void elv_abort_queue(struct request_queue *q) { struct request *rq; blk_abort_flushes(q); while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; trace_block_rq_abort(q, rq); /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); __blk_end_request_all(rq, -EIO); } } EXPORT_SYMBOL(elv_abort_queue); void elv_completed_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; /* * request is released from the driver, io must be done */ if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]--; if ((rq->cmd_flags & REQ_SORTED) && e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } } #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct elv_fs_entry *entry = to_elv(attr); struct elevator_queue *e; ssize_t error; if (!entry->show) return -EIO; e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct elv_fs_entry *entry = to_elv(attr); struct elevator_queue *e; ssize_t error; if (!entry->store) return -EIO; e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } static const struct sysfs_ops elv_sysfs_ops = { .show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; int elv_register_queue(struct request_queue *q) { struct elevator_queue *e = q->elevator; int error; error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); if (!error) { struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; if (attr) { while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) break; attr++; } } kobject_uevent(&e->kobj, KOBJ_ADD); e->registered = 1; } return error; } EXPORT_SYMBOL(elv_register_queue); static void __elv_unregister_queue(struct elevator_queue *e) { kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); e->registered = 0; } void elv_unregister_queue(struct request_queue *q) { if (q) __elv_unregister_queue(q->elevator); } EXPORT_SYMBOL(elv_unregister_queue); void elv_register(struct elevator_type *e) { char *def = ""; spin_lock(&elv_list_lock); BUG_ON(elevator_find(e->elevator_name)); list_add_tail(&e->list, &elv_list); spin_unlock(&elv_list_lock); if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) def = " (default)"; printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); } EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { struct task_struct *g, *p; /* * Iterate every thread in the process to remove the io contexts. */ if (e->ops.trim) { read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (p->io_context) e->ops.trim(p->io_context); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } spin_lock(&elv_list_lock); list_del_init(&e->list); spin_unlock(&elv_list_lock); } EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old * one, if the new one fails init for some reason. */ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) { struct elevator_queue *old_elevator, *e; void *data; int err; /* * Allocate new elevator */ e = elevator_alloc(q, new_e); if (!e) return -ENOMEM; data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); return -ENOMEM; } /* * Turn on BYPASS and drain all requests w/ elevator private data */ spin_lock_irq(q->queue_lock); elv_quiesce_start(q); /* * Remember old elevator. */ old_elevator = q->elevator; /* * attach and start new elevator */ elevator_attach(q, e, data); spin_unlock_irq(q->queue_lock); if (old_elevator->registered) { __elv_unregister_queue(old_elevator); err = elv_register_queue(q); if (err) goto fail_register; } /* * finally exit old elevator and turn off BYPASS. */ elevator_exit(old_elevator); spin_lock_irq(q->queue_lock); elv_quiesce_end(q); spin_unlock_irq(q->queue_lock); blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); return 0; fail_register: /* * switch failed, exit the new io scheduler and reattach the old * one again (along with re-adding the sysfs dir) */ elevator_exit(e); q->elevator = old_elevator; elv_register_queue(q); spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); return err; } /* * Switch this queue to the given IO scheduler. */ int elevator_change(struct request_queue *q, const char *name) { char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; if (!q->elevator) return -ENXIO; strlcpy(elevator_name, name, sizeof(elevator_name)); e = elevator_get(strstrip(elevator_name)); if (!e) { printk(KERN_ERR "elevator: type %s not found\n", elevator_name); return -EINVAL; } if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); return 0; } return elevator_switch(q, e); } EXPORT_SYMBOL(elevator_change); ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) { int ret; if (!q->elevator) return count; ret = elevator_change(q, name); if (!ret) return count; printk(KERN_ERR "elevator: switch to %s failed\n", name); return ret; } ssize_t elv_iosched_show(struct request_queue *q, char *name) { struct elevator_queue *e = q->elevator; struct elevator_type *elv; struct elevator_type *__e; int len = 0; if (!q->elevator || !blk_queue_stackable(q)) return sprintf(name, "none\n"); elv = e->elevator_type; spin_lock(&elv_list_lock); list_for_each_entry(__e, &elv_list, list) { if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } spin_unlock(&elv_list_lock); len += sprintf(len+name, "\n"); return len; } struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) { struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } EXPORT_SYMBOL(elv_rb_former_request); struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) { struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } EXPORT_SYMBOL(elv_rb_latter_request);
gpl-2.0
jomeister15/SGH-I727-kernel
drivers/usb/function/msm_hsusb.c
17
94688
/* drivers/usb/function/msm_hsusb.c * * Driver for HighSpeed USB Client Controller in MSM7K * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/workqueue.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/switch.h> #include <linux/usb/ch9.h> #include <linux/io.h> #include <asm/mach-types.h> #include <mach/vreg.h> #include <mach/board.h> #include <mach/msm_hsusb.h> #include <mach/rpc_hsusb.h> #include <mach/rpc_pmapp.h> #include <mach/gpio.h> #include <mach/msm_hsusb_hw.h> #include <mach/msm_otg.h> #include <linux/wakelock.h> #include <linux/pm_qos_params.h> #include <mach/clk.h> #define MSM_USB_BASE ((unsigned) ui->addr) #include "usb_function.h" #define EPT_FLAG_IN 0x0001 #define USB_DIR_MASK USB_DIR_IN #define SETUP_BUF_SIZE 4096 /* IDs for string descriptors */ #define STRING_LANGUAGE_ID 0 #define STRING_SERIAL 1 #define STRING_PRODUCT 2 #define STRING_MANUFACTURER 3 #define LANGUAGE_ID 0x0409 /* en-US */ #define SOC_ROC_2_0 0x10002 /* ROC 2.0 */ #define TRUE 1 #define FALSE 0 #define USB_LINK_RESET_TIMEOUT (msecs_to_jiffies(10)) #define USB_CHG_DET_DELAY msecs_to_jiffies(1000) #define is_phy_45nm() (PHY_MODEL(ui->phy_info) == USB_PHY_MODEL_45NM) #define is_phy_external() (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) static int pid = 0x9018; struct usb_fi_ept { struct usb_endpoint *ept; struct usb_endpoint_descriptor desc; }; struct usb_function_info { struct list_head list; unsigned enabled; struct usb_function *func; }; struct msm_request { struct usb_request req; struct usb_info *ui; struct msm_request *next; unsigned busy:1; unsigned live:1; unsigned alloced:1; unsigned dead:1; dma_addr_t dma; struct ept_queue_item *item; dma_addr_t item_dma; }; static unsigned char str_lang_desc[] = {4, USB_DT_STRING, (unsigned char)LANGUAGE_ID, (unsigned char)(LANGUAGE_ID >> 8)}; #define to_msm_request(r) container_of(r, struct msm_request, req) static int usb_hw_reset(struct usb_info *ui); static void usb_vbus_online(struct usb_info *); static void usb_vbus_offline(struct usb_info *ui); static void usb_lpm_exit(struct usb_info *ui); static void usb_lpm_wakeup_phy(struct work_struct *); static void usb_exit(void); static int usb_is_online(struct usb_info *ui); static void usb_do_work(struct work_struct *w); static int usb_lpm_enter(struct usb_info *ui); int (*usb_lpm_config_gpio)(int); static void usb_enable_pullup(struct usb_info *ui); static void usb_disable_pullup(struct usb_info *ui); static struct workqueue_struct *usb_work; static void usb_chg_stop(struct work_struct *w); #define USB_STATE_IDLE 0 #define USB_STATE_ONLINE 1 #define USB_STATE_OFFLINE 2 #define USB_FLAG_START 0x0001 #define USB_FLAG_VBUS_ONLINE 0x0002 #define USB_FLAG_VBUS_OFFLINE 0x0004 #define USB_FLAG_RESET 0x0008 #define USB_FLAG_SUSPEND 0x0010 #define USB_FLAG_CONFIGURE 0x0020 #define USB_FLAG_RESUME 0x0040 #define USB_FLAG_REG_OTG 0x0080 #define USB_MSC_ONLY_FUNC_MAP 0x10 #define DRIVER_NAME "msm_hsusb_peripheral" struct lpm_info { struct work_struct wakeup_phy; }; enum charger_type { USB_CHG_TYPE__SDP, USB_CHG_TYPE__CARKIT, USB_CHG_TYPE__WALLCHARGER, USB_CHG_TYPE__INVALID }; struct usb_info { /* lock for register/queue/device state changes */ spinlock_t lock; /* single request used for handling setup transactions */ struct usb_request *setup_req; struct usb_request *ep0out_req; struct platform_device *pdev; struct msm_hsusb_platform_data *pdata; int irq; int gpio_irq[2]; void *addr; unsigned state; unsigned flags; unsigned online; unsigned running; unsigned bound; struct dma_pool *pool; /* dma page to back the queue heads and items */ unsigned char *buf; dma_addr_t dma; struct ept_queue_head *head; /* used for allocation */ unsigned next_item; unsigned next_ifc_num; unsigned stopped:1; unsigned remote_wakeup:1; unsigned configured:1; unsigned selfpowered:1; unsigned iad:1; unsigned char maxpower; enum usb_device_speed speed; unsigned phy_info; /* endpoints are ordered based on their status bits, ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15 */ struct usb_endpoint ept[32]; struct delayed_work work; struct delayed_work chg_legacy_det; unsigned phy_status; unsigned phy_fail_count; struct usb_composition *composition; struct usb_function_info **func; unsigned num_funcs; struct usb_function_map *functions_map; #define MAX_INTERFACE_NUM 15 struct usb_function *func2ifc_map[MAX_INTERFACE_NUM]; #define ep0out ept[0] #define ep0in ept[16] struct clk *clk; struct clk *pclk; struct clk *cclk; unsigned int clk_enabled; struct vreg *vreg; unsigned int vreg_enabled; unsigned in_lpm; struct lpm_info li; enum charger_type chg_type; struct work_struct chg_stop; #define MAX_STRDESC_NUM 100 char **strdesc; int strdesc_index; u16 test_mode; struct wake_lock wlock; struct msm_otg_transceiver *xceiv; int active; enum usb_device_state usb_state; int vbus_sn_notif; struct switch_dev sdev; }; static struct usb_info *the_usb_info; static unsigned short usb_validate_product_id(unsigned short pid); static unsigned short usb_get_product_id(unsigned long enabled_functions); static void usb_switch_composition(unsigned short pid); static unsigned short usb_set_composition(unsigned short pid); static void usb_configure_device_descriptor(struct usb_info *ui); static void usb_uninit(struct usb_info *ui); static unsigned ulpi_read(struct usb_info *ui, unsigned reg); static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg); struct usb_device_descriptor desc_device = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = 0x0200, .bDeviceClass = 0, .bDeviceSubClass = 0, .bDeviceProtocol = 0, .bMaxPacketSize0 = 64, /* the following fields are filled in by usb_probe */ .idVendor = 0, .idProduct = 0, .bcdDevice = 0, .iManufacturer = 0, .iProduct = 0, .iSerialNumber = 0, .bNumConfigurations = 1, }; static void flush_endpoint(struct usb_endpoint *ept); static void msm_hsusb_suspend_locks_acquire(struct usb_info *, int); static ssize_t print_switch_name(struct switch_dev *sdev, char *buf) { return sprintf(buf, "%s\n", DRIVER_NAME); } static ssize_t print_switch_state(struct switch_dev *sdev, char *buf) { struct usb_info *ui = the_usb_info; return sprintf(buf, "%s\n", (ui->online ? "online" : "offline")); } #define USB_WALLCHARGER_CHG_CURRENT 1800 static int usb_get_max_power(struct usb_info *ui) { unsigned long flags; enum charger_type temp; int suspended; int configured; spin_lock_irqsave(&ui->lock, flags); temp = ui->chg_type; suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0; configured = ui->configured; spin_unlock_irqrestore(&ui->lock, flags); if (temp == USB_CHG_TYPE__INVALID) return -ENODEV; if (temp == USB_CHG_TYPE__WALLCHARGER) return USB_WALLCHARGER_CHG_CURRENT; if (suspended || !configured) return 0; return ui->maxpower * 2; } static void usb_chg_legacy_detect(struct work_struct *w) { struct usb_info *ui = the_usb_info; unsigned long flags; enum charger_type temp = USB_CHG_TYPE__INVALID; int maxpower; int ret = 0; spin_lock_irqsave(&ui->lock, flags); if (ui->usb_state == USB_STATE_NOTATTACHED) { ret = -ENODEV; goto chg_legacy_det_out; } if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) { ui->chg_type = temp = USB_CHG_TYPE__WALLCHARGER; goto chg_legacy_det_out; } ui->chg_type = temp = USB_CHG_TYPE__SDP; chg_legacy_det_out: spin_unlock_irqrestore(&ui->lock, flags); if (ret) return; msm_chg_usb_charger_connected(temp); maxpower = usb_get_max_power(ui); if (maxpower > 0) msm_chg_usb_i_is_available(maxpower); /* USB driver prevents idle and suspend power collapse(pc) * while usb cable is connected. But when dedicated charger is * connected, driver can vote for idle and suspend pc. In order * to allow pc, driver has to initiate low power mode which it * cannot do as phy cannot be accessed when dedicated charger * is connected due to phy lockup issues. Just to allow idle & * suspend pc when dedicated charger is connected, release the * wakelock, set driver latency to default and act as if we are * in low power mode so that, driver will re-acquire wakelocks * for any sub-sequent usb interrupts. */ if (temp == USB_CHG_TYPE__WALLCHARGER) { pr_info("\n%s: WALL-CHARGER\n", __func__); spin_lock_irqsave(&ui->lock, flags); if (ui->usb_state == USB_STATE_NOTATTACHED) { spin_unlock_irqrestore(&ui->lock, flags); return; } ui->in_lpm = 1; spin_unlock_irqrestore(&ui->lock, flags); msm_hsusb_suspend_locks_acquire(ui, 0); } else pr_info("\n%s: Standard Downstream Port\n", __func__); } int usb_msm_get_next_strdesc_id(char *str) { struct usb_info *ui = the_usb_info; unsigned id; unsigned long flags; int len; len = strlen(str); if (!len) { printk(KERN_ERR "usb next_strdesc_id(); null string\n"); return -EPERM; } /* for null character */ len = len + 1; spin_lock_irqsave(&ui->lock, flags); id = ui->strdesc_index; if (id >= MAX_STRDESC_NUM) { id = -EPERM; printk(KERN_ERR "reached max strdesc number\n"); goto get_strd_id_exit; } ui->strdesc[id] = kmalloc(len, GFP_ATOMIC); if (ui->strdesc[id]) { memcpy(ui->strdesc[id], str, len); ui->strdesc_index++; } else { id = -EPERM; printk(KERN_ERR "usb next_strdesc_id(); Out of memory:(%s)\n", str); } get_strd_id_exit: spin_unlock_irqrestore(&ui->lock, flags); return id; } EXPORT_SYMBOL(usb_msm_get_next_strdesc_id); inline int usb_msm_is_iad(void) { return the_usb_info->iad; } EXPORT_SYMBOL(usb_msm_is_iad); inline void usb_msm_enable_iad(void) { the_usb_info->iad = 1; } EXPORT_SYMBOL(usb_msm_enable_iad); int usb_msm_get_speed() { return the_usb_info->speed; } EXPORT_SYMBOL(usb_msm_get_speed); int usb_msm_get_next_ifc_number(struct usb_function *driver) { struct usb_info *ui = the_usb_info; int ifc_num = -1; unsigned long flags; int i; spin_lock_irqsave(&ui->lock, flags); for (i = 0; i < ui->pdata->num_functions; i++) { if (strcmp(ui->functions_map[i].name, driver->name)) continue; if (!(ui->composition->functions & (1 << i))) continue; ifc_num = ui->next_ifc_num++; ui->func2ifc_map[ifc_num] = driver; break; } spin_unlock_irqrestore(&ui->lock, flags); return ifc_num; } EXPORT_SYMBOL(usb_msm_get_next_ifc_number); static inline int usb_msm_get_selfpowered(void) { struct usb_info *ui = the_usb_info; return ui->selfpowered; } static inline int usb_msm_get_remotewakeup(void) { struct usb_info *ui = the_usb_info; return ui->remote_wakeup; } static void usb_clk_enable(struct usb_info *ui) { if (!ui->clk_enabled) { clk_enable(ui->pclk); if (ui->cclk) clk_enable(ui->cclk); ui->clk_enabled = 1; } } static void usb_clk_disable(struct usb_info *ui) { if (ui->clk_enabled) { clk_disable(ui->pclk); if (ui->cclk) clk_disable(ui->cclk); ui->clk_enabled = 0; } } static void usb_vreg_enable(struct usb_info *ui) { if (ui->vreg && !IS_ERR(ui->vreg) && !ui->vreg_enabled) { vreg_enable(ui->vreg); ui->vreg_enabled = 1; } } static void usb_vreg_disable(struct usb_info *ui) { if (ui->vreg && !IS_ERR(ui->vreg) && ui->vreg_enabled) { vreg_disable(ui->vreg); ui->vreg_enabled = 0; } } static unsigned ulpi_read(struct usb_info *ui, unsigned reg) { unsigned timeout = 100000; /* initiate read operation */ writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), USB_ULPI_VIEWPORT); /* wait for completion */ while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; if (timeout == 0) { printk(KERN_ERR "ulpi_read: timeout %08x\n", readl(USB_ULPI_VIEWPORT)); return 0xffffffff; } return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); } static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg) { unsigned timeout = 10000; /* initiate write operation */ writel(ULPI_RUN | ULPI_WRITE | ULPI_ADDR(reg) | ULPI_DATA(val), USB_ULPI_VIEWPORT); /* wait for completion */ while((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; if (timeout == 0) { printk(KERN_ERR "ulpi_write: timeout\n"); return -1; } return 0; } static void msm_hsusb_suspend_locks_acquire(struct usb_info *ui, int acquire) { if (acquire) { wake_lock(&ui->wlock); pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME, ui->pdata->swfi_latency); /* targets like 7x30 have introduced core clock * to remove the dependency on max axi frequency */ if (!ui->cclk) pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, MSM_AXI_MAX_FREQ); } else { wake_lock_timeout(&ui->wlock, HZ / 2); pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); if (!ui->cclk) pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); } } static void msm_hsusb_suspend_locks_init(struct usb_info *ui, int init) { if (init) { wake_lock_init(&ui->wlock, WAKE_LOCK_SUSPEND, "usb_bus_active"); pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME, PM_QOS_DEFAULT_VALUE); } else { wake_lock_destroy(&ui->wlock); pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME); pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME); } } static void init_endpoints(struct usb_info *ui) { unsigned n; for (n = 0; n < 32; n++) { struct usb_endpoint *ept = ui->ept + n; ept->ui = ui; ept->bit = n; ept->num = n & 15; ept->alloced = 0; if (ept->bit > 15) { /* IN endpoint */ ept->head = ui->head + (ept->num << 1) + 1; ept->flags = EPT_FLAG_IN; } else { /* OUT endpoint */ ept->head = ui->head + (ept->num << 1); ept->flags = 0; } } } void usb_configure_endpoint(struct usb_endpoint *ep, struct usb_endpoint_descriptor *ep_desc) { unsigned cfg = 0; unsigned long flags; struct usb_info *ui = ep->ui; if (!ui) return; spin_lock_irqsave(&ui->lock, flags); if (ep_desc) { ep->max_pkt = ep_desc->wMaxPacketSize; ep->ep_descriptor = ep_desc; } if (!ep->max_pkt) { printk(KERN_ERR "cannot configure zero length max pkt\n"); goto cfg_ept_end; } cfg = CONFIG_MAX_PKT(ep->max_pkt) | CONFIG_ZLT; /* ep0 out needs interrupt-on-setup */ if (ep->bit == 0) cfg |= CONFIG_IOS; ep->head->config = cfg; ep->head->next = TERMINATE; pr_debug("ept #%d %s max:%d head:%p bit:%d\n", ep->num, (ep->flags & EPT_FLAG_IN) ? "in" : "out", ep->max_pkt, ep->head, ep->bit); cfg_ept_end: spin_unlock_irqrestore(&ui->lock, flags); } EXPORT_SYMBOL(usb_configure_endpoint); #define NUM_EPTS 15 /* number of in or out non-ctrl endpoints */ struct usb_endpoint *usb_alloc_endpoint(unsigned direction) { struct usb_info *ui = the_usb_info; struct usb_endpoint *ept = NULL; int i; unsigned long flags; spin_lock_irqsave(&ui->lock, flags); if (direction & USB_DIR_IN) ept = (&ui->ep0in); else ept = (&ui->ep0out); for (i = 0; i < NUM_EPTS; i++) { ept++; if (!ept->alloced) { ept->alloced = 1; ept->ui = ui; spin_unlock_irqrestore(&ui->lock, flags); return ept; } } spin_unlock_irqrestore(&ui->lock, flags); return NULL; } EXPORT_SYMBOL(usb_alloc_endpoint); int usb_free_endpoint(struct usb_endpoint *ept) { struct usb_info *ui = the_usb_info; unsigned long flags; if (!ept) return -EINVAL; spin_lock_irqsave(&ui->lock, flags); ept->alloced = 0; ept->ui = 0; spin_unlock_irqrestore(&ui->lock, flags); return 0; } EXPORT_SYMBOL(usb_free_endpoint); struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept, unsigned bufsize) { struct usb_info *ui = ept->ui; struct msm_request *req; if (!ui) return NULL; req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) goto fail1; req->item = dma_pool_alloc(ui->pool, GFP_ATOMIC, &req->item_dma); if (!req->item) goto fail2; if (bufsize) { req->req.buf = kmalloc(bufsize, GFP_ATOMIC); if (!req->req.buf) goto fail3; req->alloced = 1; } return &req->req; fail3: dma_pool_free(ui->pool, req->item, req->item_dma); fail2: kfree(req); fail1: return NULL; } EXPORT_SYMBOL(usb_ept_alloc_req); static void do_free_req(struct usb_info *ui, struct msm_request *req) { if (req->alloced) kfree(req->req.buf); dma_pool_free(ui->pool, req->item, req->item_dma); kfree(req); } void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *_req) { struct msm_request *req, *temp_req, *prev_req; struct usb_info *ui; unsigned long flags; int dead = 0; if (!ept || !_req) return; ui = ept->ui; if (!ui) return; req = to_msm_request(_req); spin_lock_irqsave(&ui->lock, flags); /* defer freeing resources if request is still busy */ if (req->busy) dead = req->dead = 1; spin_unlock_irqrestore(&ui->lock, flags); /* if req->dead, then we will clean up when the request finishes */ if (!dead) { temp_req = ept->req; prev_req = temp_req; while (temp_req != NULL) { if (req == temp_req && ept->req != temp_req) prev_req->next = temp_req->next; prev_req = temp_req; temp_req = temp_req->next; } if (ept->req == req) ept->req = req->next; req->req.complete = NULL; do_free_req(ui, req); } else pr_err("%s: req is busy, can't free req\n", __func__); } EXPORT_SYMBOL(usb_ept_free_req); void usb_ept_enable(struct usb_endpoint *ept, int yes) { struct usb_info *ui; int in; unsigned n; unsigned char xfer; if (!ept || !ept->ui) return; ui = ept->ui; in = ept->flags & EPT_FLAG_IN; if (!ept->ep_descriptor) return; if (ui->in_lpm) { pr_err("%s: controller is in lpm, cannot proceed\n", __func__); return; } xfer = ept->ep_descriptor->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; n = readl(USB_ENDPTCTRL(ept->num)); if (in) { if (xfer == USB_ENDPOINT_XFER_BULK) n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_BULK; else if (xfer == USB_ENDPOINT_XFER_INT) n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_INT; if (yes) n |= CTRL_TXE | CTRL_TXR; else n &= (~CTRL_TXE); } else { if (xfer == USB_ENDPOINT_XFER_BULK) n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_BULK; else if (xfer == USB_ENDPOINT_XFER_INT) n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_INT; if (yes) n |= CTRL_RXE | CTRL_RXR; else n &= ~(CTRL_RXE); } /* complete all the updates to ept->head before enabling endpoint*/ dma_coherent_pre_ops(); writel(n, USB_ENDPTCTRL(ept->num)); } EXPORT_SYMBOL(usb_ept_enable); static void usb_ept_start(struct usb_endpoint *ept) { struct usb_info *ui = ept->ui; struct msm_request *req = ept->req; BUG_ON(req->live); /* link the hw queue head to the request's transaction item */ ept->head->next = req->item_dma; ept->head->info = 0; /* memory barrier to flush the data before priming endpoint*/ dma_coherent_pre_ops(); /* start the endpoint */ writel(1 << ept->bit, USB_ENDPTPRIME); /* mark this chain of requests as live */ while (req) { req->live = 1; if (req->item->next == TERMINATE) break; req = req->next; } } int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *_req) { unsigned long flags; struct msm_request *req = to_msm_request(_req); struct msm_request *last; struct usb_info *ui = ept->ui; struct ept_queue_item *item = req->item; unsigned length = req->req.length; if (length > 0x4000) return -EMSGSIZE; if (ui->in_lpm) { req->req.status = usb_remote_wakeup(); if (req->req.status) { pr_debug("%s:RWakeup generation failed, EP = %x\n", __func__, ept->bit); return req->req.status; } } spin_lock_irqsave(&ui->lock, flags); if (req->busy) { req->req.status = -EBUSY; spin_unlock_irqrestore(&ui->lock, flags); printk(KERN_INFO "usb_ept_queue_xfer() tried to queue busy request\n"); return -EBUSY; } if (!ui->online && (ept->num != 0)) { req->req.status = -ENODEV; spin_unlock_irqrestore(&ui->lock, flags); printk(KERN_INFO "usb_ept_queue_xfer() tried to queue request" "while offline; ept->bit: %x\n", ept->bit); return -ENODEV; } req->busy = 1; req->live = 0; req->next = 0; req->req.status = -EBUSY; req->dma = dma_map_single(NULL, req->req.buf, length, (ept->flags & EPT_FLAG_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); /* prepare the transaction descriptor item for the hardware */ item->next = TERMINATE; item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE; item->page0 = req->dma; item->page1 = (req->dma + 0x1000) & 0xfffff000; item->page2 = (req->dma + 0x2000) & 0xfffff000; item->page3 = (req->dma + 0x3000) & 0xfffff000; /* Add the new request to the end of the queue */ last = ept->last; if (last) { /* Already requests in the queue. add us to the * end, but let the completion interrupt actually * start things going, to avoid hw issues */ last->next = req; /* only modify the hw transaction next pointer if * that request is not live */ if (!last->live) last->item->next = req->item_dma; } else { /* queue was empty -- kick the hardware */ ept->req = req; usb_ept_start(ept); } ept->last = req; spin_unlock_irqrestore(&ui->lock, flags); return 0; } EXPORT_SYMBOL(usb_ept_queue_xfer); int usb_ept_flush(struct usb_endpoint *ept) { printk("usb_ept_flush \n"); flush_endpoint(ept); return 0; } int usb_ept_get_max_packet(struct usb_endpoint *ept) { return ept->max_pkt; } EXPORT_SYMBOL(usb_ept_get_max_packet); int usb_remote_wakeup(void) { struct usb_info *ui = the_usb_info; unsigned long flags; spin_lock_irqsave(&ui->lock, flags); if (!ui->remote_wakeup) { spin_unlock_irqrestore(&ui->lock, flags); pr_err("%s: remote wakeup not supported\n", __func__); return -ENOTSUPP; } if (!ui->online) { spin_unlock_irqrestore(&ui->lock, flags); pr_err("%s: device is not configured\n", __func__); return -ENODEV; } if (ui->in_lpm) usb_lpm_exit(ui); spin_unlock_irqrestore(&ui->lock, flags); /* if usb_lpm_exit is unable to set PHCD, * it would initiate workthread to set the PHCD */ if (cancel_work_sync(&ui->li.wakeup_phy)) usb_lpm_wakeup_phy(NULL); spin_lock_irqsave(&ui->lock, flags); if (ui->in_lpm) { spin_unlock_irqrestore(&ui->lock, flags); pr_err("%s: cannot bring controller out of lpm\n", __func__); return -ENODEV; } if (!usb_is_online(ui)) { pr_debug("%s: enabling force resume\n", __func__); writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); } else pr_debug("%s: controller seems to be out of suspend already\n", __func__); spin_unlock_irqrestore(&ui->lock, flags); return 0; } EXPORT_SYMBOL(usb_remote_wakeup); /* --- endpoint 0 handling --- */ static void set_configuration(struct usb_info *ui, int yes) { unsigned i; ui->online = !!yes; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (!fi || !(ui->composition->functions & (1 << i))) continue; if (fi->func->configure) fi->func->configure(yes, fi->func->context); } } static void ep0out_complete(struct usb_endpoint *ept, struct usb_request *req) { req->complete = 0; } static void ep0in_complete(struct usb_endpoint *ept, struct usb_request *req) { /* queue up the receive of the ACK response from the host */ if (req->status == 0) { struct usb_info *ui = ept->ui; req->length = 0; req->complete = ep0out_complete; usb_ept_queue_xfer(&ui->ep0out, req); } } static void ep0in_complete_sendzero( struct usb_endpoint *ept, struct usb_request *req) { if (req->status == 0) { struct usb_info *ui = ept->ui; req->length = 0; req->complete = ep0in_complete; usb_ept_queue_xfer(&ui->ep0in, req); } } static void ep0_status_complete( struct usb_endpoint *ept, struct usb_request *req) { struct usb_info *ui = ept->ui; unsigned int i; if (!ui->test_mode) return; switch (ui->test_mode) { case J_TEST: pr_info("usb electrical test mode: (J)\n"); i = readl(USB_PORTSC) & (~PORTSC_PTC); writel(i | PORTSC_PTC_J_STATE, USB_PORTSC); break; case K_TEST: pr_info("usb electrical test mode: (K)\n"); i = readl(USB_PORTSC) & (~PORTSC_PTC); writel(i | PORTSC_PTC_K_STATE, USB_PORTSC); break; case SE0_NAK_TEST: pr_info("usb electrical test mode: (SE0-NAK)\n"); i = readl(USB_PORTSC) & (~PORTSC_PTC); writel(i | PORTSC_PTC_SE0_NAK, USB_PORTSC); break; case TST_PKT_TEST: pr_info("usb electrical test mode: (TEST_PKT)\n"); i = readl(USB_PORTSC) & (~PORTSC_PTC); writel(i | PORTSC_PTC_TST_PKT, USB_PORTSC); break; default: pr_err("usb:%s: undefined test mode: (%x)\n", __func__, ui->test_mode); } } static void ep0_setup_ack(struct usb_info *ui) { struct usb_request *req = ui->setup_req; req->length = 0; req->complete = ep0_status_complete; usb_ept_queue_xfer(&ui->ep0in, req); } static void ep0_setup_stall(struct usb_info *ui) { writel((1<<16) | (1<<0), USB_ENDPTCTRL(0)); } static void ep0_setup_receive(struct usb_info *ui, unsigned len) { ui->ep0out_req->length = len; usb_ept_queue_xfer(&ui->ep0out, ui->ep0out_req); } static void ep0_setup_send(struct usb_info *ui, unsigned wlen) { struct usb_request *req = ui->setup_req; struct usb_endpoint *ept = &ui->ep0in; /* never send more data than the host requested */ if (req->length > wlen) req->length = wlen; /* if we are sending a short response that ends on * a packet boundary, we'll need to send a zero length * packet as well. */ if ((req->length != wlen) && ((req->length & 63) == 0)) { req->complete = ep0in_complete_sendzero; } else { req->complete = ep0in_complete; } usb_ept_queue_xfer(ept, req); } static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl, struct usb_request *req); static void handle_setup(struct usb_info *ui) { struct usb_ctrlrequest ctl; memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl)); writel(EPT_RX(0), USB_ENDPTSETUPSTAT); /* any pending ep0 transactions must be canceled */ flush_endpoint(&ui->ep0out); flush_endpoint(&ui->ep0in); /* let functions handle vendor and class requests */ if ((ctl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { struct usb_function *func; /* Send stall if received interface number is invalid */ if (ctl.wIndex >= ui->next_ifc_num) goto stall; func = ui->func2ifc_map[ctl.wIndex]; if (func && func->setup) { if (ctl.bRequestType & USB_DIR_IN) { struct usb_request *req = ui->setup_req; int ret = func->setup(&ctl, req->buf, SETUP_BUF_SIZE, func->context); if (ret >= 0) { req->length = ret; ep0_setup_send(ui, ctl.wLength); return; } } else { int ret = func->setup(&ctl, NULL, 0, func->context); if (ret == 0) { ep0_setup_ack(ui); return; } else if (ret > 0) { ep0_setup_receive(ui, ret); return; } } } goto stall; return; } switch (ctl.bRequest) { case USB_REQ_GET_STATUS: { struct usb_request *req = ui->setup_req; if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN)) break; if (ctl.wLength != 2) break; req->length = 2; switch (ctl.bRequestType & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: { unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; struct usb_endpoint *ept; if (num == 0) break; if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) num += 16; ept = ui->ept + num; memcpy(req->buf, &ept->ept_halted, 2); break; } case USB_RECIP_DEVICE: { unsigned short temp = 0; if (usb_msm_get_selfpowered()) temp = 1 << USB_DEVICE_SELF_POWERED; if (usb_msm_get_remotewakeup()) temp |= 1 << USB_DEVICE_REMOTE_WAKEUP; memcpy(req->buf, &temp, 2); break; } case USB_RECIP_INTERFACE: memset(req->buf, 0, 2); break; default: printk(KERN_ERR "Unreconginized recipient\n"); break; } ep0_setup_send(ui, 2); return; } case USB_REQ_GET_DESCRIPTOR: { struct usb_request *req; if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN)) break; req = ui->setup_req; if (!usb_find_descriptor(ui, &ctl, req)) { if (req->length > ctl.wLength) req->length = ctl.wLength; ep0_setup_send(ui, ctl.wLength); return; } break; } case USB_REQ_SET_FEATURE: if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT)) break; if (ctl.wLength != 0) break; switch (ctl.bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: if (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP) { ui->remote_wakeup = 1; ep0_setup_ack(ui); return; } else if (ctl.wValue == USB_DEVICE_TEST_MODE) { if (ctl.wIndex & 0x0f) break; ui->test_mode = ctl.wIndex; ep0_setup_ack(ui); return; } break; case USB_RECIP_ENDPOINT: { unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; if ((num == 0) || (ctl.wValue != 0)) break; if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) num += 16; usb_ept_set_halt(ui->ept + num); ep0_setup_ack(ui); return; } default: pr_err("usb: %s: set_feature: unrecognized recipient\n", __func__); break; } break; case USB_REQ_CLEAR_FEATURE: { if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT)) break; if (ctl.wLength != 0) break; switch (ctl.bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: if (ctl.wValue != USB_DEVICE_REMOTE_WAKEUP) break; ui->remote_wakeup = 0; ep0_setup_ack(ui); return; case USB_RECIP_ENDPOINT: { unsigned num; if (ctl.wValue != USB_ENDPOINT_HALT) break; num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; if (num != 0) { if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) num += 16; usb_ept_clear_halt(ui->ept + num); } ep0_setup_ack(ui); return; } default: pr_info("unsupported clear feature command\n"); pr_info("Request-type:(%08x) wValue:(%08x) " "wIndex:(%08x) wLength:(%08x)\n", ctl.bRequestType, ctl.wValue, ctl.wIndex, ctl.wLength); break; } break; } case USB_REQ_SET_INTERFACE: if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK)) != (USB_DIR_OUT | USB_RECIP_INTERFACE)) break; if (ui->func2ifc_map[ctl.wIndex]->set_interface) { ui->func2ifc_map[ctl.wIndex]->set_interface(ctl.wIndex, ctl.wValue, ui->func2ifc_map[ctl.wIndex]->context); ep0_setup_ack(ui); return; } break; case USB_REQ_GET_INTERFACE: { struct usb_function *f; struct usb_request *req = ui->setup_req; int ifc_num = ctl.wIndex; int ret = 0; if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK)) != (USB_DIR_IN | USB_RECIP_INTERFACE)) break; f = ui->func2ifc_map[ifc_num]; if (!f->get_interface) break; ret = f->get_interface(ifc_num, ui->func2ifc_map[ifc_num]->context); if (ret < 0) break; req->length = ctl.wLength; memcpy(req->buf, &ret, req->length); ep0_setup_send(ui, ctl.wLength); return; } case USB_REQ_SET_CONFIGURATION: if ((ctl.bRequestType & USB_DIR_MASK) != USB_DIR_OUT) break; ui->configured = ctl.wValue; pr_info("hsusb set_configuration wValue = %d usbcmd = %x\n", ctl.wValue, readl(USB_USBCMD)); set_configuration(ui, ctl.wValue); ep0_setup_ack(ui); ui->flags = USB_FLAG_CONFIGURE; if (ui->configured) ui->usb_state = USB_STATE_CONFIGURED; queue_delayed_work(usb_work, &ui->work, 0); return; case USB_REQ_GET_CONFIGURATION: { unsigned conf; struct usb_request *req = ui->setup_req; req->length = 1; conf = ui->configured; memcpy(req->buf, &conf, req->length); ep0_setup_send(ui, ctl.wLength); return; } case USB_REQ_SET_ADDRESS: if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK)) != (USB_DIR_OUT | USB_RECIP_DEVICE)) break; ui->usb_state = USB_STATE_ADDRESS; writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR); ep0_setup_ack(ui); return; } stall: ep0_setup_stall(ui); return; } static void handle_endpoint(struct usb_info *ui, unsigned bit) { struct usb_endpoint *ept = ui->ept + bit; struct msm_request *req; unsigned long flags; unsigned info; #if 0 printk(KERN_INFO "handle_endpoint() %d %s req=%p(%08x)\n", ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", ept->req, ept->req ? ept->req->item_dma : 0); #endif if (!ept) { pr_err("%s: ept is null: ep bit = %d\n", __func__, bit); return; } /* expire all requests that are no longer active */ spin_lock_irqsave(&ui->lock, flags); while ((req = ept->req)) { /* clean speculative fetches on req->item->info */ dma_coherent_post_ops(); info = req->item->info; /* if we've processed all live requests, time to * restart the hardware on the next non-live request */ if (!req->live) { usb_ept_start(ept); break; } /* if the transaction is still in-flight, stop here */ if (info & INFO_ACTIVE) break; /* advance ept queue to the next request */ ept->req = req->next; if (ept->req == 0) ept->last = 0; dma_unmap_single(NULL, req->dma, req->req.length, (ept->flags & EPT_FLAG_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) { /* XXX pass on more specific error code */ req->req.status = -EIO; req->req.actual = 0; printk(KERN_INFO "hsusb: ept %d %s error. info=%08x\n", ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", info); } else { req->req.status = 0; req->req.actual = req->req.length - ((info >> 16) & 0x7FFF); } req->busy = 0; req->live = 0; if (req->dead) do_free_req(ui, req); if (req->req.complete) { spin_unlock_irqrestore(&ui->lock, flags); req->req.complete(ept, &req->req); spin_lock_irqsave(&ui->lock, flags); } } spin_unlock_irqrestore(&ui->lock, flags); } static void flush_endpoint_hw(struct usb_info *ui, unsigned bits) { /* flush endpoint, canceling transactions ** - this can take a "large amount of time" (per databook) ** - the flush can fail in some cases, thus we check STAT ** and repeat if we're still operating ** (does the fact that this doesn't use the tripwire matter?!) */ if (ui->in_lpm) { pr_err("%s: controller is in lpm, cannot proceed\n", __func__); return; } do { writel(bits, USB_ENDPTFLUSH); while (readl(USB_ENDPTFLUSH) & bits) udelay(100); } while (readl(USB_ENDPTSTAT) & bits); } static void flush_endpoint_sw(struct usb_endpoint *ept) { struct usb_info *ui = ept->ui; struct msm_request *req, *next; unsigned long flags; /* inactive endpoints have nothing to do here */ if (!ui || !ept->alloced || !ept->max_pkt) return; /* put the queue head in a sane state */ ept->head->info = 0; ept->head->next = TERMINATE; /* cancel any pending requests */ spin_lock_irqsave(&ui->lock, flags); req = ept->req; ept->req = 0; ept->last = 0; while (req != 0) { next = req->next; req->busy = 0; req->live = 0; req->req.status = -ENODEV; req->req.actual = 0; if (req->req.complete) { spin_unlock_irqrestore(&ui->lock, flags); req->req.complete(ept, &req->req); spin_lock_irqsave(&ui->lock, flags); } if (req->dead) do_free_req(ui, req); req = req->next; } spin_unlock_irqrestore(&ui->lock, flags); } static void flush_endpoint(struct usb_endpoint *ept) { if (!ept->ui) return; flush_endpoint_hw(ept->ui, (1 << ept->bit)); flush_endpoint_sw(ept); } static void flush_all_endpoints(struct usb_info *ui) { unsigned n; flush_endpoint_hw(ui, 0xffffffff); for (n = 0; n < 32; n++) flush_endpoint_sw(ui->ept + n); } #define HW_DELAY_FOR_LPM msecs_to_jiffies(1000) #define DELAY_FOR_USB_VBUS_STABILIZE msecs_to_jiffies(500) static irqreturn_t usb_interrupt(int irq, void *data) { struct usb_info *ui = data; unsigned n; unsigned speed; if (!ui->active) return IRQ_HANDLED; if (ui->in_lpm) { usb_lpm_exit(ui); return IRQ_HANDLED; } n = readl(USB_USBSTS); writel(n, USB_USBSTS); /* somehow we got an IRQ while in the reset sequence: ignore it */ if (ui->running == 0) { pr_err("%s: ui->running is zero\n", __func__); return IRQ_HANDLED; } if (n & STS_PCI) { if (!(readl(USB_PORTSC) & PORTSC_PORT_RESET)) { speed = (readl(USB_PORTSC) & PORTSC_PORT_SPEED_MASK); switch (speed) { case PORTSC_PORT_SPEED_HIGH: pr_info("hsusb resume: speed = HIGH\n"); ui->speed = USB_SPEED_HIGH; break; case PORTSC_PORT_SPEED_FULL: pr_info("hsusb resume: speed = FULL\n"); ui->speed = USB_SPEED_FULL; break; default: pr_err("hsusb resume: Unknown Speed\n"); ui->speed = USB_SPEED_UNKNOWN; break; } } /* pci interrutpt would also be generated when resuming * from bus suspend, following check would avoid kick * starting usb main thread in case of pci interrupts * during enumeration */ if (ui->configured && ui->chg_type == USB_CHG_TYPE__SDP) { ui->usb_state = USB_STATE_CONFIGURED; ui->flags = USB_FLAG_RESUME; queue_delayed_work(usb_work, &ui->work, 0); } } if (n & STS_URI) { pr_info("hsusb reset interrupt\n"); ui->usb_state = USB_STATE_DEFAULT; ui->configured = 0; schedule_work(&ui->chg_stop); writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT); writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE); writel(0xffffffff, USB_ENDPTFLUSH); writel(0, USB_ENDPTCTRL(1)); if (ui->online != 0) { /* marking us offline will cause ept queue attempts to fail */ ui->online = 0; flush_all_endpoints(ui); /* XXX: we can't seem to detect going offline, so deconfigure * XXX: on reset for the time being */ set_configuration(ui, 0); } } if (n & STS_SLI) { pr_info("hsusb suspend interrupt\n"); ui->usb_state = USB_STATE_SUSPENDED; /* stop usb charging */ schedule_work(&ui->chg_stop); } if (n & STS_UI) { n = readl(USB_ENDPTSETUPSTAT); if (n & EPT_RX(0)) handle_setup(ui); n = readl(USB_ENDPTCOMPLETE); writel(n, USB_ENDPTCOMPLETE); while (n) { unsigned bit = __ffs(n); handle_endpoint(ui, bit); n = n & (~(1 << bit)); } } n = readl(USB_OTGSC); writel(n, USB_OTGSC); if (n & OTGSC_BSVIS) { /*Verify B Session Valid Bit to verify vbus status*/ if (B_SESSION_VALID & n) { pr_info("usb cable connected\n"); ui->usb_state = USB_STATE_POWERED; ui->flags = USB_FLAG_VBUS_ONLINE; /* Wait for 100ms to stabilize VBUS before initializing * USB and detecting charger type */ queue_delayed_work(usb_work, &ui->work, 0); } else { int i; usb_disable_pullup(ui); printk(KERN_INFO "usb cable disconnected\n"); ui->usb_state = USB_STATE_NOTATTACHED; ui->configured = 0; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (!fi || !(ui->composition->functions & (1 << i))) continue; if (fi->func->disconnect) fi->func->disconnect (fi->func->context); } ui->flags = USB_FLAG_VBUS_OFFLINE; queue_delayed_work(usb_work, &ui->work, 0); } } return IRQ_HANDLED; } static void usb_prepare(struct usb_info *ui) { memset(ui->buf, 0, 4096); ui->head = (void *) (ui->buf + 0); /* only important for reset/reinit */ memset(ui->ept, 0, sizeof(ui->ept)); ui->next_item = 0; ui->speed = USB_SPEED_UNKNOWN; init_endpoints(ui); ui->ep0in.max_pkt = 64; ui->ep0in.ui = ui; ui->ep0in.alloced = 1; ui->ep0out.max_pkt = 64; ui->ep0out.ui = ui; ui->ep0out.alloced = 1; ui->setup_req = usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE); ui->ep0out_req = usb_ept_alloc_req(&ui->ep0out, ui->ep0out.max_pkt); INIT_WORK(&ui->chg_stop, usb_chg_stop); INIT_WORK(&ui->li.wakeup_phy, usb_lpm_wakeup_phy); INIT_DELAYED_WORK(&ui->work, usb_do_work); INIT_DELAYED_WORK(&ui->chg_legacy_det, usb_chg_legacy_detect); } static int usb_is_online(struct usb_info *ui) { /* continue lpm if bus is suspended or disconnected or stopped*/ if (((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) || ((readl(USB_PORTSC) & PORTSC_CCS) == 0) || ((readl(USB_USBCMD) & USBCMD_RS) == 0)) return 0; pr_debug("usb is online\n"); pr_debug("usbcmd:(%08x) usbsts:(%08x) portsc:(%08x)\n", readl(USB_USBCMD), readl(USB_USBSTS), readl(USB_PORTSC)); return -1; } static int usb_wakeup_phy(struct usb_info *ui) { int i; writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD); /* some circuits automatically clear PHCD bit */ for (i = 0; i < 5 && (readl(USB_PORTSC) & PORTSC_PHCD); i++) { writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC); msleep(1); } if ((readl(USB_PORTSC) & PORTSC_PHCD)) { pr_err("%s: cannot clear phcd bit\n", __func__); return -1; } return 0; } static int usb_suspend_phy(struct usb_info *ui) { int i; unsigned long flags; if (usb_is_online(ui)) return -1; /* spec talks about following bits in LPM for external phy. * But they are ignored because * 1. disabling interface protection circuit: by disabling * interface protection curcuit we cannot come out * of lpm as async interrupts would be disabled * 2. setting the suspendM bit: this bit would be set by usb * controller once we set phcd bit. */ switch (PHY_TYPE(ui->phy_info)) { case USB_PHY_INTEGRATED: if (!is_phy_45nm()) ulpi_read(ui, 0x14); /* turn on/off otg comparators */ if (ui->vbus_sn_notif && ui->usb_state == USB_STATE_NOTATTACHED) ulpi_write(ui, 0x00, 0x30); else ulpi_write(ui, 0x01, 0x30); if (!is_phy_45nm()) ulpi_write(ui, 0x08, 0x09); break; case USB_PHY_UNDEFINED: pr_err("%s: undefined phy type\n", __func__); return -1; } /* loop for large amount of time */ for (i = 0; i < 500; i++) { spin_lock_irqsave(&ui->lock, flags); if (usb_is_online(ui)) { spin_unlock_irqrestore(&ui->lock, flags); return -1; } /* set phy to be in lpm */ writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); spin_unlock_irqrestore(&ui->lock, flags); msleep(1); if (readl(USB_PORTSC) & PORTSC_PHCD) goto blk_stp_sig; } if (!(readl(USB_PORTSC) & PORTSC_PHCD)) { pr_err("unable to set phcd of portsc reg\n"); pr_err("Reset HW link and phy to recover from phcd error\n"); usb_hw_reset(ui); return -1; } /* we have to set this bit again to work-around h/w bug */ writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); blk_stp_sig: /* block the stop signal */ writel(readl(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD); return 0; } /* SW workarounds Issue#2 - Integrated PHY Calibration Symptom - Electrical compliance failure in eye-diagram tests SW workaround - Try to raise amplitude to 400mV Issue#3 - AHB Posted Writes Symptom - USB stability SW workaround - This programs xtor ON, BURST disabled and unspecified length of INCR burst enabled */ static int usb_hw_reset(struct usb_info *ui) { unsigned i; struct msm_hsusb_platform_data *pdata; unsigned long timeout; unsigned val = 0; pdata = ui->pdev->dev.platform_data; clk_enable(ui->clk); /* reset the phy before resetting link */ if (readl(USB_PORTSC) & PORTSC_PHCD) usb_wakeup_phy(ui); /* rpc call for phy_reset */ if (ui->pdata->phy_reset) ui->pdata->phy_reset(ui->addr); else msm_hsusb_phy_reset(); /* Give some delay to settle phy after reset */ msleep(100); /* RESET */ writel(USBCMD_RESET, USB_USBCMD); timeout = jiffies + USB_LINK_RESET_TIMEOUT; while (readl(USB_USBCMD) & USBCMD_RESET) { if (time_after(jiffies, timeout)) { dev_err(&ui->pdev->dev, "usb link reset timeout\n"); break; } msleep(1); } /* select DEVICE mode with SDIS active */ writel((USBMODE_SDIS | USBMODE_DEVICE), USB_USBMODE); msleep(1); /* select ULPI phy */ i = (readl(USB_PORTSC) & ~PORTSC_PTS); writel(i | PORTSC_PTS_ULPI, USB_PORTSC); /* set usb controller interrupt latency to zero*/ writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0), USB_USBCMD); /* If the target is 7x01 and roc version is > 1.2, set * the AHB mode to 2 for maximum performance, else set * it to 1, to bypass the AHB transactor for stability. */ if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) { if (pdata->soc_version >= SOC_ROC_2_0) writel(0x02, USB_ROC_AHB_MODE); else writel(0x01, USB_ROC_AHB_MODE); } else { unsigned cfg_val; /* Raise amplitude to 400mV * SW workaround, Issue#2 */ cfg_val = ulpi_read(ui, ULPI_CONFIG_REG); cfg_val |= ULPI_AMPLITUDE_MAX; ulpi_write(ui, cfg_val, ULPI_CONFIG_REG); writel(0x0, USB_AHB_BURST); writel(0x00, USB_AHB_MODE); } /* TBD: do we have to add DpRise, ChargerRise and * IdFloatRise for 45nm */ /* Disable VbusValid and SessionEnd comparators */ val = ULPI_VBUS_VALID | ULPI_SESS_END; /* enable id interrupt only when transceiver is available */ if (ui->xceiv) writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC); else { writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU, USB_OTGSC); ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR); val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND; } ulpi_write(ui, val, ULPI_INT_RISE_CLR); ulpi_write(ui, val, ULPI_INT_FALL_CLR); /* we are just setting the pointer in the hwblock. Since the * endpoint isnt enabled the hw block doenst read the contents * of ui->dma - so we dont need a barrier here * */ writel(ui->dma, USB_ENDPOINTLISTADDR); clk_disable(ui->clk); return 0; } static void usb_reset(struct usb_info *ui) { unsigned long flags; spin_lock_irqsave(&ui->lock, flags); ui->running = 0; spin_unlock_irqrestore(&ui->lock, flags); #if 0 /* we should flush and shutdown cleanly if already running */ writel(0xffffffff, USB_ENDPTFLUSH); msleep(2); #endif if (usb_hw_reset(ui)) { pr_info("%s: h/w reset failed\n", __func__); return; } usb_configure_endpoint(&ui->ep0in, NULL); usb_configure_endpoint(&ui->ep0out, NULL); /* marking us offline will cause ept queue attempts to fail */ ui->online = 0; /* terminate any pending transactions */ flush_all_endpoints(ui); set_configuration(ui, 0); spin_lock_irqsave(&ui->lock, flags); ui->running = 1; spin_unlock_irqrestore(&ui->lock, flags); } static void usb_enable(void *handle, int enable) { struct usb_info *ui = handle; unsigned long flags; spin_lock_irqsave(&ui->lock, flags); if (enable) { ui->flags |= USB_FLAG_RESET; ui->active = 1; spin_unlock_irqrestore(&ui->lock, flags); usb_do_work(&ui->work.work); } else { ui->active = 0; spin_unlock_irqrestore(&ui->lock, flags); usb_clk_disable(ui); msm_hsusb_suspend_locks_acquire(ui, 0); } } static struct msm_otg_ops dcd_ops = { .request = usb_enable, }; void usb_start(struct usb_info *ui) { int i, ret; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (!fi || !(ui->composition->functions & (1<<i))) continue; if (fi->enabled) { pr_info("usb_bind_func() (%s)\n", fi->func->name); fi->func->bind(fi->func->context); } } ui->clk_enabled = 0; ui->vreg_enabled = 0; ui->xceiv = msm_otg_get_transceiver(); if (ui->xceiv) { ui->flags = USB_FLAG_REG_OTG; queue_delayed_work(usb_work, &ui->work, 0); } else { /*Initialize pm app RPC */ ret = msm_pm_app_rpc_init(); if (ret) { pr_err("%s: pm_app_rpc connect failed\n", __func__); goto out; } pr_info("%s: pm_app_rpc connect success\n", __func__); ret = msm_pm_app_register_vbus_sn(&msm_hsusb_set_vbus_state); if (ret) { pr_err("%s:PMIC VBUS SN notif not supported\n", \ __func__); msm_pm_app_rpc_deinit(); goto out; } pr_info("%s:PMIC VBUS SN notif supported\n", \ __func__); ret = msm_pm_app_enable_usb_ldo(1); if (ret) { pr_err("%s: unable to turn on internal LDO", \ __func__); msm_pm_app_unregister_vbus_sn( &msm_hsusb_set_vbus_state); msm_pm_app_rpc_deinit(); goto out; } ui->vbus_sn_notif = 1; out: ui->active = 1; ui->flags |= (USB_FLAG_START | USB_FLAG_RESET); queue_delayed_work(usb_work, &ui->work, 0); } } static LIST_HEAD(usb_function_list); static DEFINE_MUTEX(usb_function_list_lock); static struct usb_function_info *usb_find_function(const char *name) { struct list_head *entry; list_for_each(entry, &usb_function_list) { struct usb_function_info *fi = list_entry(entry, struct usb_function_info, list); if (fi) { if (!strcmp(name, fi->func->name)) return fi; } } return NULL; } static void usb_try_to_bind(void) { struct usb_info *ui = the_usb_info; unsigned long enabled_functions = 0; int i; if (!ui || ui->bound || !ui->pdev || !ui->composition) return; for (i = 0; i < ui->num_funcs; i++) { if (ui->func[i]) enabled_functions |= (1 << i); } if ((enabled_functions & ui->composition->functions) != ui->composition->functions) return; usb_set_composition(ui->composition->product_id); usb_configure_device_descriptor(ui); /* we have found all the needed functions */ ui->bound = 1; printk(KERN_INFO "msm_hsusb: functions bound. starting.\n"); usb_start(ui); } static int usb_get_function_index(const char *name) { struct usb_info *ui = the_usb_info; int i; for (i = 0; i < ui->num_funcs; i++) { if (!strcmp(name, ui->functions_map[i].name)) return i; } return -1; } int usb_function_register(struct usb_function *driver) { struct usb_info *ui = the_usb_info; struct usb_function_info *fi; int ret = 0; int index; mutex_lock(&usb_function_list_lock); index = usb_get_function_index(driver->name); if (index < 0) { pr_err("%s: unsupported function = %s\n", __func__, driver->name); ret = -EINVAL; goto fail; } fi = kzalloc(sizeof(*fi), GFP_KERNEL); if (!fi) { ret = -ENOMEM; goto fail; } fi->func = driver; list_add(&fi->list, &usb_function_list); ui->func[index] = fi; fi->func->ep0_out_req = ui->ep0out_req; fi->func->ep0_in_req = ui->setup_req; fi->func->ep0_out = &ui->ep0out; fi->func->ep0_in = &ui->ep0in; pr_info("%s: name = '%s', map = %d\n", __func__, driver->name, index); usb_try_to_bind(); fail: mutex_unlock(&usb_function_list_lock); return ret; } EXPORT_SYMBOL(usb_function_register); static unsigned short usb_validate_product_id(unsigned short pid) { struct usb_info *ui = the_usb_info; int i; if (!ui || !ui->pdata) return -1; /* set idProduct based on which functions are enabled */ for (i = 0; i < ui->pdata->num_compositions; i++) { if (ui->pdata->compositions[i].product_id == pid) break; } if (i < ui->pdata->num_compositions) { struct usb_composition *comp = &ui->pdata->compositions[i]; for (i = 0; i < ui->num_funcs; i++) { if (comp->functions & (1 << i)) { if (!ui->func[i]) { pr_err("%s: func(%d) not available\n", __func__, i); return 0; } } } return comp->product_id; } else pr_err("%s: Product id (%x) is not supported\n", __func__, pid); return 0; } static unsigned short usb_get_product_id(unsigned long enabled_functions) { struct usb_info *ui = the_usb_info; int i; if (!(ui && ui->pdata)) return -1; /* set idProduct based on which functions are enabled */ for (i = 0; i < ui->pdata->num_compositions; i++) { if (ui->pdata->compositions[i].functions == enabled_functions) return ui->pdata->compositions[i].product_id; } return 0; } static void usb_uninit(struct usb_info *ui) { int i; for (i = 0; i < ui->strdesc_index; i++) kfree(ui->strdesc[i]); ui->strdesc_index = 1; ui->next_ifc_num = 0; } static unsigned short usb_set_composition(unsigned short pid) { struct usb_info *ui = the_usb_info; int i; if (!(ui && ui->pdata)) return 0; /* Retrieve product id on enabled functions */ for (i = 0; i < ui->pdata->num_compositions; i++) { if (ui->pdata->compositions[i].product_id == pid) { ui->composition = &ui->pdata->compositions[i]; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (ui->func && fi && fi->func) { fi->enabled = (ui->composition-> functions >> i) & 1; } } pr_info("%s: composition set to product id = %x\n", __func__, ui->composition->product_id); return ui->composition->product_id; } } pr_err("%s: product id (%x) not supported\n", __func__, pid); return 0; } static void usb_switch_composition(unsigned short pid) { struct usb_info *ui = the_usb_info; int i; unsigned long flags; if (!ui->active) return; if (!usb_validate_product_id(pid)) return; disable_irq(ui->irq); if (cancel_delayed_work_sync(&ui->work)) pr_info("%s: Removed work successfully\n", __func__); if (ui->running) { spin_lock_irqsave(&ui->lock, flags); ui->running = 0; ui->online = 0; ui->bound = 0; spin_unlock_irqrestore(&ui->lock, flags); /* we should come out of lpm to access registers */ if (ui->in_lpm) { if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) { disable_irq(ui->gpio_irq[0]); disable_irq(ui->gpio_irq[1]); } if (ui->usb_state == USB_STATE_NOTATTACHED && ui->vbus_sn_notif) msm_pm_app_enable_usb_ldo(1); usb_lpm_exit(ui); if (cancel_work_sync(&ui->li.wakeup_phy)) usb_lpm_wakeup_phy(NULL); ui->in_lpm = 0; } /* disable usb and session valid interrupts */ writel(0, USB_USBINTR); writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC); /* stop the controller */ usb_disable_pullup(ui); ui->usb_state = USB_STATE_NOTATTACHED; switch_set_state(&ui->sdev, 0); /* Before starting again, wait for 300ms * to make sure host detects soft disconnection **/ msleep(300); } for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (!fi || !fi->func || !fi->enabled) continue; if (fi->func->configure) fi->func->configure(0, fi->func->context); if (fi->func->unbind) fi->func->unbind(fi->func->context); } usb_uninit(ui); usb_set_composition(pid); usb_configure_device_descriptor(ui); /* initialize functions */ for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (!fi || !(ui->composition->functions & (1 << i))) continue; if (fi->enabled) { if (fi->func->bind) fi->func->bind(fi->func->context); } } ui->bound = 1; ui->flags = USB_FLAG_RESET; queue_delayed_work(usb_work, &ui->work, 0); enable_irq(ui->irq); } void usb_function_enable(const char *function, int enable) { struct usb_function_info *fi; struct usb_info *ui = the_usb_info; unsigned long functions_mask; int curr_enable; unsigned short pid; int i; if (!ui) return; pr_info("%s: name = %s, enable = %d\n", __func__, function, enable); fi = usb_find_function(function); if (!fi) { pr_err("%s: function (%s) not registered with DCD\n", __func__, function); return; } if (fi->enabled == enable) { pr_err("%s: function (%s) state is same\n", __func__, function); return; } functions_mask = 0; curr_enable = fi->enabled; fi->enabled = enable; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; if (fi && fi->enabled) functions_mask |= (1 << i); } pid = usb_get_product_id(functions_mask); if (!pid) { fi->enabled = curr_enable; pr_err("%s: mask (%lx) not matching with any products\n", __func__, functions_mask); pr_err("%s: continuing with current composition\n", __func__); return; } usb_switch_composition(pid); } EXPORT_SYMBOL(usb_function_enable); static int usb_free(struct usb_info *ui, int ret) { disable_irq_wake(ui->irq); free_irq(ui->irq, ui); if (ui->gpio_irq[0]) free_irq(ui->gpio_irq[0], NULL); if (ui->gpio_irq[1]) free_irq(ui->gpio_irq[1], NULL); dma_pool_destroy(ui->pool); dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma); kfree(ui->func); kfree(ui->strdesc); iounmap(ui->addr); clk_put(ui->clk); clk_put(ui->pclk); clk_put(ui->cclk); msm_hsusb_suspend_locks_init(ui, 0); kfree(ui); return ret; } static int usb_vbus_is_on(struct usb_info *ui) { unsigned tmp; /* disable session valid raising and falling interrupts */ ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_C); ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_C); tmp = ulpi_read(ui, ULPI_USBINTR_STATUS); /* enable session valid raising and falling interrupts */ ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_S); ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_S); if (tmp & (1 << 2)) return 1; return 0; } static void usb_do_work(struct work_struct *w) { struct usb_info *ui = container_of(w, struct usb_info, work.work); unsigned long iflags; unsigned long flags, ret; for (;;) { spin_lock_irqsave(&ui->lock, iflags); flags = ui->flags; ui->flags = 0; spin_unlock_irqrestore(&ui->lock, iflags); /* give up if we have nothing to do */ if (flags == 0) break; switch (ui->state) { case USB_STATE_IDLE: if (flags & USB_FLAG_REG_OTG) { dcd_ops.handle = (void *) ui; ret = ui->xceiv->set_peripheral(ui->xceiv, &dcd_ops); if (ret) pr_err("%s: Can't register peripheral" "driver with OTG", __func__); break; } if ((flags & USB_FLAG_START) || (flags & USB_FLAG_RESET)) { disable_irq(ui->irq); if (ui->vbus_sn_notif) msm_pm_app_enable_usb_ldo(1); usb_clk_enable(ui); usb_vreg_enable(ui); usb_vbus_online(ui); /* if VBUS is present move to ONLINE state * otherwise move to OFFLINE state */ if (usb_vbus_is_on(ui)) { ui->usb_state = USB_STATE_POWERED; msm_hsusb_suspend_locks_acquire(ui, 1); ui->state = USB_STATE_ONLINE; usb_enable_pullup(ui); schedule_delayed_work( &ui->chg_legacy_det, USB_CHG_DET_DELAY); pr_info("hsusb: IDLE -> ONLINE\n"); } else { ui->usb_state = USB_STATE_NOTATTACHED; ui->state = USB_STATE_OFFLINE; msleep(500); usb_lpm_enter(ui); pr_info("hsusb: IDLE -> OFFLINE\n"); if (ui->vbus_sn_notif) msm_pm_app_enable_usb_ldo(0); } enable_irq(ui->irq); break; } goto reset; case USB_STATE_ONLINE: /* If at any point when we were online, we received * the signal to go offline, we must honor it */ if (flags & USB_FLAG_VBUS_OFFLINE) { enum charger_type temp; unsigned long f; cancel_delayed_work_sync(&ui->chg_legacy_det); spin_lock_irqsave(&ui->lock, f); temp = ui->chg_type; ui->chg_type = USB_CHG_TYPE__INVALID; spin_unlock_irqrestore(&ui->lock, f); if (temp != USB_CHG_TYPE__INVALID) { /* re-acquire wakelock and restore axi * freq if they have been reduced by * charger work item */ msm_hsusb_suspend_locks_acquire(ui, 1); msm_chg_usb_i_is_not_available(); msm_chg_usb_charger_disconnected(); } /* reset usb core and usb phy */ disable_irq(ui->irq); if (ui->in_lpm) usb_lpm_exit(ui); usb_vbus_offline(ui); usb_lpm_enter(ui); if ((ui->vbus_sn_notif) && (ui->usb_state == USB_STATE_NOTATTACHED)) msm_pm_app_enable_usb_ldo(0); ui->state = USB_STATE_OFFLINE; enable_irq(ui->irq); switch_set_state(&ui->sdev, 0); pr_info("hsusb: ONLINE -> OFFLINE\n"); break; } if (flags & USB_FLAG_SUSPEND) { ui->usb_state = USB_STATE_SUSPENDED; usb_lpm_enter(ui); msm_hsusb_suspend_locks_acquire(ui, 1); break; } if ((flags & USB_FLAG_RESUME) || (flags & USB_FLAG_CONFIGURE)) { int maxpower = usb_get_max_power(ui); if (maxpower > 0) msm_chg_usb_i_is_available(maxpower); if (flags & USB_FLAG_CONFIGURE) switch_set_state(&ui->sdev, 1); break; } goto reset; case USB_STATE_OFFLINE: /* If we were signaled to go online and vbus is still * present when we received the signal, go online. */ if ((flags & USB_FLAG_VBUS_ONLINE)) { msm_hsusb_suspend_locks_acquire(ui, 1); disable_irq(ui->irq); ui->state = USB_STATE_ONLINE; if (ui->in_lpm) usb_lpm_exit(ui); usb_vbus_online(ui); if (!(B_SESSION_VALID & readl(USB_OTGSC))) { writel(((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) | OTGSC_BSVIS), USB_OTGSC); enable_irq(ui->irq); goto reset; } usb_enable_pullup(ui); schedule_delayed_work( &ui->chg_legacy_det, USB_CHG_DET_DELAY); pr_info("hsusb: OFFLINE -> ONLINE\n"); enable_irq(ui->irq); break; } if (flags & USB_FLAG_SUSPEND) { usb_lpm_enter(ui); wake_unlock(&ui->wlock); break; } default: reset: /* For RESET or any unknown flag in a particular state * go to IDLE state and reset HW to bring to known state */ ui->flags = USB_FLAG_RESET; ui->state = USB_STATE_IDLE; } } } void msm_hsusb_set_vbus_state(int online) { struct usb_info *ui = the_usb_info; if (ui && online) { msm_pm_app_enable_usb_ldo(1); usb_lpm_exit(ui); /* Turn on PHY comparators */ if (!(ulpi_read(ui, 0x30) & 0x01)) ulpi_write(ui, 0x01, 0x30); } } static irqreturn_t usb_lpm_gpio_isr(int irq, void *data) { disable_irq(irq); return IRQ_HANDLED; } static void usb_lpm_exit(struct usb_info *ui) { if (ui->in_lpm == 0) return; if (usb_lpm_config_gpio) usb_lpm_config_gpio(0); wake_lock(&ui->wlock); usb_clk_enable(ui); usb_vreg_enable(ui); writel(readl(USB_USBCMD) & ~ASYNC_INTR_CTRL, USB_USBCMD); writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD); if (readl(USB_PORTSC) & PORTSC_PHCD) { disable_irq(ui->irq); schedule_work(&ui->li.wakeup_phy); } else { ui->in_lpm = 0; if (ui->xceiv) ui->xceiv->set_suspend(ui->xceiv, 0); } pr_info("%s(): USB exited from low power mode\n", __func__); } static int usb_lpm_enter(struct usb_info *ui) { unsigned long flags; unsigned connected; spin_lock_irqsave(&ui->lock, flags); if (ui->in_lpm) { spin_unlock_irqrestore(&ui->lock, flags); pr_debug("already in lpm, nothing to do\n"); return 0; } if (usb_is_online(ui)) { spin_unlock_irqrestore(&ui->lock, flags); pr_info("%s: lpm procedure aborted\n", __func__); return -1; } ui->in_lpm = 1; if (ui->xceiv) ui->xceiv->set_suspend(ui->xceiv, 1); disable_irq(ui->irq); spin_unlock_irqrestore(&ui->lock, flags); if (usb_suspend_phy(ui)) { ui->in_lpm = 0; ui->flags = USB_FLAG_RESET; enable_irq(ui->irq); pr_err("%s: phy suspend failed, lpm procedure aborted\n", __func__); return -1; } if ((B_SESSION_VALID & readl(USB_OTGSC)) && (ui->usb_state == USB_STATE_NOTATTACHED)) { ui->in_lpm = 0; writel(((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) | OTGSC_BSVIS), USB_OTGSC); ui->flags = USB_FLAG_VBUS_ONLINE; ui->usb_state = USB_STATE_POWERED; usb_wakeup_phy(ui); enable_irq(ui->irq); return -1; } /* enable async interrupt */ writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL, USB_USBCMD); connected = readl(USB_USBCMD) & USBCMD_RS; usb_vreg_disable(ui); usb_clk_disable(ui); if (usb_lpm_config_gpio) { if (usb_lpm_config_gpio(1)) { spin_lock_irqsave(&ui->lock, flags); usb_lpm_exit(ui); spin_unlock_irqrestore(&ui->lock, flags); enable_irq(ui->irq); return -1; } enable_irq(ui->gpio_irq[0]); enable_irq(ui->gpio_irq[1]); } enable_irq(ui->irq); msm_hsusb_suspend_locks_acquire(ui, 0); pr_info("%s: usb in low power mode\n", __func__); return 0; } static void usb_enable_pullup(struct usb_info *ui) { disable_irq(ui->irq); writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR); writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD); enable_irq(ui->irq); } /* SW workarounds Issue #1 - USB Spoof Disconnect Failure Symptom - Writing 0 to run/stop bit of USBCMD doesn't cause disconnect SW workaround - Making opmode non-driving and SuspendM set in function register of SMSC phy */ static void usb_disable_pullup(struct usb_info *ui) { disable_irq(ui->irq); writel(readl(USB_USBINTR) & ~(STS_URI | STS_SLI | STS_UI | STS_PCI), USB_USBINTR); writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD); /* S/W workaround, Issue#1 */ if (!is_phy_external() && !is_phy_45nm()) ulpi_write(ui, 0x48, 0x04); enable_irq(ui->irq); } static void usb_chg_stop(struct work_struct *w) { struct usb_info *ui = the_usb_info; enum charger_type temp; unsigned long flags; spin_lock_irqsave(&ui->lock, flags); temp = ui->chg_type; spin_unlock_irqrestore(&ui->lock, flags); if (temp == USB_CHG_TYPE__SDP) msm_chg_usb_i_is_not_available(); } static void usb_vbus_online(struct usb_info *ui) { if (ui->in_lpm) { if (usb_lpm_config_gpio) usb_lpm_config_gpio(0); usb_vreg_enable(ui); usb_clk_enable(ui); usb_wakeup_phy(ui); ui->in_lpm = 0; } usb_reset(ui); } static void usb_vbus_offline(struct usb_info *ui) { unsigned long timeout; unsigned val = 0; if (ui->online != 0) { ui->online = 0; flush_all_endpoints(ui); set_configuration(ui, 0); } /* reset h/w at cable disconnetion becasuse * of h/w bugs and to flush any resource that * h/w might be holding */ clk_enable(ui->clk); if (readl(USB_PORTSC) & PORTSC_PHCD) usb_wakeup_phy(ui); if (ui->pdata->phy_reset) ui->pdata->phy_reset(ui->addr); else msm_hsusb_phy_reset(); /* Give some delay to settle phy after reset */ msleep(100); writel(USBCMD_RESET, USB_USBCMD); timeout = jiffies + USB_LINK_RESET_TIMEOUT; while (readl(USB_USBCMD) & USBCMD_RESET) { if (time_after(jiffies, timeout)) { dev_err(&ui->pdev->dev, "usb link reset timeout\n"); break; } msleep(1); } /* Disable VbusValid and SessionEnd comparators */ val = ULPI_VBUS_VALID | ULPI_SESS_END; /* enable id interrupt only when transceiver is available */ if (ui->xceiv) writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC); else { writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU, USB_OTGSC); ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR); val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND; } ulpi_write(ui, val, ULPI_INT_RISE_CLR); ulpi_write(ui, val, ULPI_INT_FALL_CLR); clk_disable(ui->clk); } static void usb_lpm_wakeup_phy(struct work_struct *w) { struct usb_info *ui = the_usb_info; unsigned long flags; if (usb_wakeup_phy(ui)) { pr_err("fatal error: cannot bring phy out of lpm\n"); pr_err("%s: resetting controller\n", __func__); spin_lock_irqsave(&ui->lock, flags); usb_disable_pullup(ui); ui->flags = USB_FLAG_RESET; queue_delayed_work(usb_work, &ui->work, 0); enable_irq(ui->irq); spin_unlock_irqrestore(&ui->lock, flags); return; } ui->in_lpm = 0; if (ui->xceiv) ui->xceiv->set_suspend(ui->xceiv, 0); enable_irq(ui->irq); } void usb_function_reenumerate(void) { struct usb_info *ui = the_usb_info; /* disable and re-enable the D+ pullup */ pr_info("hsusb: disable pullup\n"); usb_disable_pullup(ui); msleep(10); pr_info("hsusb: enable pullup\n"); usb_enable_pullup(ui); } #if defined(CONFIG_DEBUG_FS) static char debug_buffer[PAGE_SIZE]; static ssize_t debug_read_status(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct usb_info *ui = file->private_data; char *buf = debug_buffer; unsigned long flags; struct usb_endpoint *ept; struct msm_request *req; int n; int i = 0; spin_lock_irqsave(&ui->lock, flags); i += scnprintf(buf + i, PAGE_SIZE - i, "regs: setup=%08x prime=%08x stat=%08x done=%08x\n", readl(USB_ENDPTSETUPSTAT), readl(USB_ENDPTPRIME), readl(USB_ENDPTSTAT), readl(USB_ENDPTCOMPLETE)); i += scnprintf(buf + i, PAGE_SIZE - i, "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n", readl(USB_USBCMD), readl(USB_USBSTS), readl(USB_USBINTR), readl(USB_PORTSC)); for (n = 0; n < 32; n++) { ept = ui->ept + n; if (ept->max_pkt == 0) continue; i += scnprintf(buf + i, PAGE_SIZE - i, "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n", ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", ept->head->config, ept->head->active, ept->head->next, ept->head->info); for (req = ept->req; req; req = req->next) i += scnprintf(buf + i, PAGE_SIZE - i, " req @%08x next=%08x info=%08x page0=%08x %c %c\n", req->item_dma, req->item->next, req->item->info, req->item->page0, req->busy ? 'B' : ' ', req->live ? 'L' : ' ' ); } i += scnprintf(buf + i, PAGE_SIZE - i, "phy failure count: %d\n", ui->phy_fail_count); spin_unlock_irqrestore(&ui->lock, flags); return simple_read_from_buffer(ubuf, count, ppos, buf, i); } static ssize_t debug_write_reset(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct usb_info *ui = file->private_data; unsigned long flags; spin_lock_irqsave(&ui->lock, flags); ui->flags |= USB_FLAG_RESET; queue_delayed_work(usb_work, &ui->work, 0); spin_unlock_irqrestore(&ui->lock, flags); return count; } static ssize_t debug_write_cycle(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { usb_function_reenumerate(); return count; } static int debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } const struct file_operations debug_stat_ops = { .open = debug_open, .read = debug_read_status, }; const struct file_operations debug_reset_ops = { .open = debug_open, .write = debug_write_reset, }; const struct file_operations debug_cycle_ops = { .open = debug_open, .write = debug_write_cycle, }; static struct dentry *debugfs_dent; static struct dentry *debugfs_status; static struct dentry *debugfs_reset; static struct dentry *debugfs_cycle; static void usb_debugfs_init(struct usb_info *ui) { debugfs_dent = debugfs_create_dir("usb", 0); if (IS_ERR(debugfs_dent)) return; debugfs_status = debugfs_create_file("status", 0444, debugfs_dent, ui, &debug_stat_ops); debugfs_reset = debugfs_create_file("reset", S_IWUSR |S_IWGRP, debugfs_dent, ui, &debug_reset_ops); debugfs_cycle = debugfs_create_file("cycle", S_IWUSR |S_IWGRP, debugfs_dent, ui, &debug_cycle_ops); } static void usb_debugfs_uninit(void) { debugfs_remove(debugfs_status); debugfs_remove(debugfs_reset); debugfs_remove(debugfs_cycle); debugfs_remove(debugfs_dent); } #else static void usb_debugfs_init(struct usb_info *ui) {} static void usb_debugfs_uninit(void) {} #endif static void usb_configure_device_descriptor(struct usb_info *ui) { desc_device.idVendor = ui->pdata->vendor_id; desc_device.idProduct = ui->composition->product_id; desc_device.bcdDevice = ui->pdata->version; if (ui->pdata->serial_number) desc_device.iSerialNumber = usb_msm_get_next_strdesc_id(ui->pdata->serial_number); if (ui->pdata->product_name) desc_device.iProduct = usb_msm_get_next_strdesc_id(ui->pdata->product_name); if (ui->pdata->manufacturer_name) desc_device.iManufacturer = usb_msm_get_next_strdesc_id( ui->pdata->manufacturer_name); /* Send Serial number to A9 for software download */ if (ui->pdata->serial_number) { msm_hsusb_is_serial_num_null(FALSE); msm_hsusb_send_serial_number(ui->pdata->serial_number); } else msm_hsusb_is_serial_num_null(TRUE); msm_hsusb_send_productID(desc_device.idProduct); } static ssize_t msm_hsusb_store_func_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { char name[20]; int enable = 0; int i; for (i = 0; buf[i] != 0; i++) { if (buf[i] == '=') break; name[i] = buf[i]; } name[i++] = 0; if (buf[i] == '0' || buf[i] == '1') enable = buf[i] - '0'; else return size; pr_info("%s: name = %s, enable = %d\n", __func__, name, enable); usb_function_enable(name, enable); return size; } static ssize_t msm_hsusb_show_compswitch(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_info *ui = the_usb_info; int i; if (ui->composition) i = scnprintf(buf, PAGE_SIZE, "composition product id = %x\n", ui->composition->product_id); else i = scnprintf(buf, PAGE_SIZE, "composition product id = 0\n"); return i; } static ssize_t msm_hsusb_store_compswitch(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned long pid; if (!strict_strtoul(buf, 16, &pid)) { pr_info("%s: Requested New Product id = %lx\n", __func__, pid); usb_switch_composition((unsigned short)pid); } else pr_info("%s: strict_strtoul conversion failed\n", __func__); return size; } static ssize_t msm_hsusb_store_autoresume(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { usb_remote_wakeup(); return size; } static ssize_t msm_hsusb_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_info *ui = the_usb_info; int i; char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED", "USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED", "USB_STATE_RECONNECTING", "USB_STATE_DEFAULT", "USB_STATE_ADDRESS", "USB_STATE_CONFIGURED", "USB_STATE_SUSPENDED" }; i = scnprintf(buf, PAGE_SIZE, "%s\n", state[ui->usb_state]); return i; } static ssize_t msm_hsusb_show_lpm(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_info *ui = the_usb_info; int i; i = scnprintf(buf, PAGE_SIZE, "%d\n", ui->in_lpm); return i; } static ssize_t msm_hsusb_show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_info *ui = the_usb_info; int i; char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW", "USB_SPEED_FULL", "USB_SPEED_HIGH"}; i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->speed]); return i; } static DEVICE_ATTR(composition, 0664, msm_hsusb_show_compswitch, msm_hsusb_store_compswitch); static DEVICE_ATTR(func_enable, S_IWUSR, NULL, msm_hsusb_store_func_enable); static DEVICE_ATTR(autoresume, 0222, NULL, msm_hsusb_store_autoresume); static DEVICE_ATTR(state, 0664, msm_hsusb_show_state, NULL); static DEVICE_ATTR(lpm, 0664, msm_hsusb_show_lpm, NULL); static DEVICE_ATTR(speed, 0664, msm_hsusb_show_speed, NULL); static struct attribute *msm_hsusb_attrs[] = { &dev_attr_composition.attr, &dev_attr_func_enable.attr, &dev_attr_autoresume.attr, &dev_attr_state.attr, &dev_attr_lpm.attr, &dev_attr_speed.attr, NULL, }; static struct attribute_group msm_hsusb_attr_grp = { .attrs = msm_hsusb_attrs, }; #define msm_hsusb_func_attr(function, index) \ static ssize_t show_##function(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_info *ui = the_usb_info; \ struct usb_function_info *fi = ui->func[index]; \ \ return sprintf(buf, "%d", fi->enabled); \ \ } \ \ static DEVICE_ATTR(function, S_IRUGO, show_##function, NULL); msm_hsusb_func_attr(diag, 0); msm_hsusb_func_attr(adb, 1); msm_hsusb_func_attr(modem, 2); msm_hsusb_func_attr(nmea, 3); msm_hsusb_func_attr(mass_storage, 4); msm_hsusb_func_attr(ethernet, 5); msm_hsusb_func_attr(rmnet, 6); static struct attribute *msm_hsusb_func_attrs[] = { &dev_attr_diag.attr, &dev_attr_adb.attr, &dev_attr_modem.attr, &dev_attr_nmea.attr, &dev_attr_mass_storage.attr, &dev_attr_ethernet.attr, &dev_attr_rmnet.attr, NULL, }; static struct attribute_group msm_hsusb_func_attr_grp = { .name = "functions", .attrs = msm_hsusb_func_attrs, }; static int __init usb_probe(struct platform_device *pdev) { struct resource *res; struct usb_info *ui; int irq; int ulpi_irq1 = 0; int ulpi_irq2 = 0; int i; int ret = 0; if (!pdev || !pdev->dev.platform_data) { pr_err("%s:pdev or platform data is null\n", __func__); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { pr_err("%s: failed to get irq num from platform_get_irq\n", __func__); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("%s: failed to get mem resource\n", __func__); return -ENODEV; } ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_attr_grp); if (ret) { pr_err("%s: unable to create sysfs group\n", __func__); return ret; } usb_work = create_singlethread_workqueue("usb_work"); if (!usb_work) { pr_err("%s: unable to create work queue\n", __func__); ret = -ENOMEM; goto free_sysfs_grp; } ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); if (!ui) { pr_err("%s: unable to allocate memory for ui\n", __func__); ret = -ENOMEM; goto free_workqueue; } ui->pdev = pdev; ui->pdata = pdev->dev.platform_data; for (i = 0; i < ui->pdata->num_compositions; i++) if (ui->pdata->compositions[i].product_id == pid) { ui->composition = &ui->pdata->compositions[i]; break; } if (!ui->composition) { pr_err("%s: unable to find the composition with pid:(%d)\n", __func__, pid); ret = -ENODEV; goto free_ui; } ui->phy_info = ui->pdata->phy_info; if (ui->phy_info == USB_PHY_UNDEFINED) { pr_err("undefined phy_info: (%d)\n", ui->phy_info); ret = -ENOMEM; goto free_ui; } /* zero is reserved for language id */ ui->strdesc_index = 1; ui->strdesc = kzalloc(sizeof(char *) * MAX_STRDESC_NUM, GFP_KERNEL); if (!ui->strdesc) { pr_err("%s: unable allocate mem for string descriptors\n", __func__); ret = -ENOMEM; goto free_ui; } ui->num_funcs = ui->pdata->num_functions; ui->func = kzalloc(sizeof(struct usb_function *) * ui->num_funcs, GFP_KERNEL); if (!ui->func) { pr_err("%s: unable allocate mem for functions\n", __func__); ret = -ENOMEM; goto free_str_desc; } ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp); if (ret) { pr_err("%s: unable to create functions sysfs group\n", __func__); goto free_func; } ui->addr = ioremap(res->start, resource_size(res)); if (!ui->addr) { pr_err("%s: unable ioremap\n", __func__); ret = -ENOMEM; goto free_func_sysfs_grp; } ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); if (!ui->buf) { pr_err("%s: failed allocate dma coherent memory\n", __func__); ret = -ENOMEM; goto free_iounmap; } ui->pool = dma_pool_create("hsusb", NULL, 32, 32, 0); if (!ui->pool) { pr_err("%s: unable to allocate dma pool\n", __func__); ret = -ENOMEM; goto free_dma_coherent; } ui->clk = clk_get(&pdev->dev, "usb_hs_clk"); if (IS_ERR(ui->clk)) { pr_err("%s: unable get usb_hs_clk\n", __func__); ret = PTR_ERR(ui->clk); goto free_dma_pool; } ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk"); if (IS_ERR(ui->pclk)) { pr_err("%s: unable get usb_hs_pclk\n", __func__); ret = PTR_ERR(ui->pclk); goto free_hs_clk; } if (ui->pdata->core_clk) { ui->cclk = clk_get(&pdev->dev, "usb_hs_core_clk"); if (IS_ERR(ui->cclk)) { pr_err("%s: unable get usb_hs_core_clk\n", __func__); ret = PTR_ERR(ui->cclk); goto free_hs_pclk; } } if (ui->pdata->vreg5v_required) { ui->vreg = vreg_get(NULL, "boost"); if (IS_ERR(ui->vreg)) { pr_err("%s: vreg get failed\n", __func__); ui->vreg = NULL; ret = PTR_ERR(ui->vreg); goto free_hs_cclk; } } /* disable interrupts before requesting irq */ usb_clk_enable(ui); writel(0, USB_USBINTR); writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC); usb_clk_disable(ui); ret = request_irq(irq, usb_interrupt, IRQF_SHARED, pdev->name, ui); if (ret) { pr_err("%s: request_irq failed\n", __func__); goto free_vreg5v; } ui->irq = irq; if (ui->pdata->config_gpio) { usb_lpm_config_gpio = ui->pdata->config_gpio; ulpi_irq1 = platform_get_irq_byname(pdev, "vbus_interrupt"); if (ulpi_irq1 < 0) { pr_err("%s: failed to get vbus gpio interrupt\n", __func__); return -ENODEV; } ulpi_irq2 = platform_get_irq_byname(pdev, "id_interrupt"); if (ulpi_irq2 < 0) { pr_err("%s: failed to get id gpio interrupt\n", __func__); return -ENODEV; } ret = request_irq(ulpi_irq1, &usb_lpm_gpio_isr, IRQF_TRIGGER_HIGH, "vbus_interrupt", NULL); if (ret) { pr_err("%s: failed to request vbus interrupt:(%d)\n", __func__, ulpi_irq1); goto free_irq; } ret = request_irq(ulpi_irq2, &usb_lpm_gpio_isr, IRQF_TRIGGER_RISING, "usb_ulpi_data3", NULL); if (ret) { pr_err("%s: failed to request irq ulpi_data_3:(%d)\n", __func__, ulpi_irq2); goto free_ulpi_irq1; } ui->gpio_irq[0] = ulpi_irq1; ui->gpio_irq[1] = ulpi_irq2; } ui->sdev.name = DRIVER_NAME; ui->sdev.print_name = print_switch_name; ui->sdev.print_state = print_switch_state; ret = switch_dev_register(&ui->sdev); if (ret < 0) { pr_err("%s(): switch_dev_register failed ret = %d\n", __func__, ret); goto free_ulpi_irq2; } the_usb_info = ui; ui->functions_map = ui->pdata->function_map; ui->selfpowered = 0; ui->remote_wakeup = 0; ui->maxpower = 0xFA; ui->chg_type = USB_CHG_TYPE__INVALID; /* to allow swfi latency, driver latency * must be above listed swfi latency */ ui->pdata->swfi_latency += 1; spin_lock_init(&ui->lock); msm_hsusb_suspend_locks_init(ui, 1); enable_irq_wake(irq); /* memory barrier initialization in non-interrupt context */ dmb(); usb_debugfs_init(ui); usb_prepare(ui); pr_info("%s: io=%p, irq=%d, dma=%p(%x)\n", __func__, ui->addr, ui->irq, ui->buf, ui->dma); return 0; free_ulpi_irq2: free_irq(ulpi_irq2, NULL); free_ulpi_irq1: free_irq(ulpi_irq1, NULL); free_irq: free_irq(ui->irq, ui); free_vreg5v: if (ui->pdata->vreg5v_required) vreg_put(ui->vreg); free_hs_cclk: clk_put(ui->cclk); free_hs_pclk: clk_put(ui->pclk); free_hs_clk: clk_put(ui->clk); free_dma_pool: dma_pool_destroy(ui->pool); free_dma_coherent: dma_free_coherent(&pdev->dev, 4096, ui->buf, ui->dma); free_iounmap: iounmap(ui->addr); free_func_sysfs_grp: sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp); free_func: kfree(ui->func); free_str_desc: kfree(ui->strdesc); free_ui: kfree(ui); free_workqueue: destroy_workqueue(usb_work); free_sysfs_grp: sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_attr_grp); return ret; } #ifdef CONFIG_PM static int usb_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_info *ui = the_usb_info; unsigned long flags; int ret = 0; spin_lock_irqsave(&ui->lock, flags); if (!ui->active) { spin_unlock_irqrestore(&ui->lock, flags); pr_info("%s: peripheral mode is not active" "nothing to be done\n", __func__); return 0; } if (ui->in_lpm) { spin_unlock_irqrestore(&ui->lock, flags); pr_info("%s: we are already in lpm, nothing to be done\n", __func__); return 0; } spin_unlock_irqrestore(&ui->lock, flags); ret = usb_lpm_enter(ui); if (ret) pr_err("%s: failed to enter lpm\n", __func__); return ret; } #endif static struct platform_driver usb_driver = { .probe = usb_probe, #ifdef CONFIG_PM .suspend = usb_platform_suspend, #endif .driver = { .name = DRIVER_NAME, }, }; static int __init usb_module_init(void) { /* rpc connect for phy_reset */ msm_hsusb_rpc_connect(); /* rpc connect for charging */ msm_chg_rpc_connect(); return platform_driver_register(&usb_driver); } static void free_usb_info(void) { struct usb_info *ui = the_usb_info; unsigned long flags; int i; if (ui) { INIT_LIST_HEAD(&usb_function_list); for (i = 0; i < ui->num_funcs; i++) kfree(ui->func[i]); ui->num_funcs = 0; usb_uninit(ui); kfree(ui->strdesc); usb_ept_free_req(&ui->ep0in, ui->setup_req); if (ui->ept[0].ui == ui) flush_all_endpoints(ui); spin_lock_irqsave(&ui->lock, flags); usb_clk_disable(ui); usb_vreg_disable(ui); spin_unlock_irqrestore(&ui->lock, flags); usb_free(ui, 0); the_usb_info = NULL; } } static void usb_exit(void) { struct usb_info *ui = the_usb_info; /* free the dev state structure */ if (!ui) return; if (ui->xceiv) { ui->xceiv->set_peripheral(ui->xceiv, NULL); msm_otg_put_transceiver(ui->xceiv); } cancel_work_sync(&ui->li.wakeup_phy); destroy_workqueue(usb_work); /* free the usb_info structure */ free_usb_info(); switch_dev_unregister(&ui->sdev); sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_func_attr_grp); sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_attr_grp); usb_debugfs_uninit(); platform_driver_unregister(&usb_driver); msm_hsusb_rpc_close(); msm_chg_rpc_close(); msm_pm_app_unregister_vbus_sn(&msm_hsusb_set_vbus_state); msm_pm_app_rpc_deinit(); } static void __exit usb_module_exit(void) { usb_exit(); } module_param(pid, int, 0); MODULE_PARM_DESC(pid, "Product ID of the desired composition"); module_init(usb_module_init); module_exit(usb_module_exit); static void copy_string_descriptor(char *string, char *buffer) { int length, i; if (string) { length = strlen(string); buffer[0] = 2 * length + 2; buffer[1] = USB_DT_STRING; for (i = 0; i < length; i++) { buffer[2 * i + 2] = string[i]; buffer[2 * i + 3] = 0; } } } static int get_qualifier_descriptor(struct usb_qualifier_descriptor *dq) { struct usb_qualifier_descriptor *dev_qualifier = dq; dev_qualifier->bLength = sizeof(struct usb_qualifier_descriptor), dev_qualifier->bDescriptorType = USB_DT_DEVICE_QUALIFIER, dev_qualifier->bcdUSB = __constant_cpu_to_le16(0x0200), dev_qualifier->bDeviceClass = USB_CLASS_PER_INTERFACE, dev_qualifier->bDeviceSubClass = 0; dev_qualifier->bDeviceProtocol = 0; dev_qualifier->bMaxPacketSize0 = 64; dev_qualifier->bNumConfigurations = 1; dev_qualifier->bRESERVED = 0; return sizeof(struct usb_qualifier_descriptor); } static int usb_fill_descriptors(void *ptr, struct usb_descriptor_header **descriptors) { unsigned char *buf = ptr; struct usb_descriptor_header *item = descriptors[0]; unsigned cnt = 0; while (NULL != item) { unsigned len = item->bLength; memcpy(buf, item, len); buf += len; cnt++; item = descriptors[cnt]; } return buf-(u8 *)ptr; } static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl, struct usb_request *req) { int i; unsigned short id = ctl->wValue; unsigned short type = id >> 8; id &= 0xff; if ((type == USB_DT_DEVICE) && (id == 0)) { req->length = sizeof(desc_device); if (usb_msm_is_iad()) { desc_device.bDeviceClass = 0xEF; desc_device.bDeviceSubClass = 0x02; desc_device.bDeviceProtocol = 0x01; } memcpy(req->buf, &desc_device, req->length); return 0; } if ((type == USB_DT_DEVICE_QUALIFIER) && (id == 0)) { struct usb_qualifier_descriptor dq; req->length = get_qualifier_descriptor(&dq); if (usb_msm_is_iad()) { dq.bDeviceClass = 0xEF; dq.bDeviceSubClass = 0x02; dq.bDeviceProtocol = 0x01; } memcpy(req->buf, &dq, req->length); return 0; } if ((type == USB_DT_OTHER_SPEED_CONFIG) && (id == 0)) goto get_config; if ((type == USB_DT_CONFIG) && (id == 0)) { struct usb_config_descriptor cfg; unsigned ifc_count = 0; char *ptr, *start; get_config: ifc_count = 0; start = req->buf; ptr = start + USB_DT_CONFIG_SIZE; ifc_count = ui->next_ifc_num; for (i = 0; i < ui->num_funcs; i++) { struct usb_function_info *fi = ui->func[i]; struct usb_descriptor_header **dh = NULL; if (!fi || !(ui->composition->functions & (1 << i))) continue; switch (ui->speed) { case USB_SPEED_HIGH: if (type == USB_DT_OTHER_SPEED_CONFIG) dh = fi->func->fs_descriptors; else dh = fi->func->hs_descriptors; break; case USB_SPEED_FULL: if (type == USB_DT_OTHER_SPEED_CONFIG) dh = fi->func->hs_descriptors; else dh = fi->func->fs_descriptors; break; default: printk(KERN_ERR "Unsupported speed(%x)\n", ui->speed); return -1; } ptr += usb_fill_descriptors(ptr, dh); } #define USB_REMOTE_WAKEUP_SUPPORT 1 cfg.bLength = USB_DT_CONFIG_SIZE; if (type == USB_DT_OTHER_SPEED_CONFIG) cfg.bDescriptorType = USB_DT_OTHER_SPEED_CONFIG; else cfg.bDescriptorType = USB_DT_CONFIG; cfg.wTotalLength = ptr - start; cfg.bNumInterfaces = ifc_count; cfg.bConfigurationValue = 1; cfg.iConfiguration = 0; cfg.bmAttributes = USB_CONFIG_ATT_ONE | ui->selfpowered << USB_CONFIG_ATT_SELFPOWER_POS | USB_REMOTE_WAKEUP_SUPPORT << USB_CONFIG_ATT_WAKEUP_POS; cfg.bMaxPower = ui->maxpower; memcpy(start, &cfg, USB_DT_CONFIG_SIZE); req->length = ptr - start; return 0; } if (type == USB_DT_STRING) { char *buffer = req->buf; buffer[0] = 0; if (id > ui->strdesc_index) return -1; if (id == STRING_LANGUAGE_ID) memcpy(buffer, str_lang_desc, str_lang_desc[0]); else copy_string_descriptor(ui->strdesc[id], buffer); if (buffer[0]) { req->length = buffer[0]; return 0; } else return -1; } return -1; } /*****Gadget Framework Functions***/ struct device *usb_get_device(void) { if (the_usb_info) { if (the_usb_info->pdev) return &(the_usb_info->pdev->dev); } return NULL; } EXPORT_SYMBOL(usb_get_device); int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req) { struct usb_info *ui = the_usb_info; struct msm_request *req = to_msm_request(_req); struct msm_request *temp_req, *prev_req; unsigned long flags; if (!(ui && req && ept->req)) return -EINVAL; spin_lock_irqsave(&ui->lock, flags); if (req->busy) { req->req.status = 0; req->busy = 0; /* See if the request is the first request in the ept queue */ if (ept->req == req) { /* Stop the transfer */ do { writel((1 << ept->bit), USB_ENDPTFLUSH); while (readl(USB_ENDPTFLUSH) & (1 << ept->bit)) udelay(100); } while (readl(USB_ENDPTSTAT) & (1 << ept->bit)); if (!req->next) ept->last = NULL; ept->req = req->next; ept->head->next = req->item->next; goto cancel_req; } /* Request could be in the middle of ept queue */ prev_req = temp_req = ept->req; do { if (req == temp_req) { if (req->live) { /* Stop the transfer */ do { writel((1 << ept->bit), USB_ENDPTFLUSH); while (readl(USB_ENDPTFLUSH) & (1 << ept->bit)) udelay(100); } while (readl(USB_ENDPTSTAT) & (1 << ept->bit)); } prev_req->next = temp_req->next; prev_req->item->next = temp_req->item->next; if (!req->next) ept->last = prev_req; goto cancel_req; } prev_req = temp_req; temp_req = temp_req->next; } while (temp_req != NULL); goto error; cancel_req: if (req->live) { /* prepare the transaction descriptor item for the hardware */ req->item->next = TERMINATE; req->item->info = 0; req->live = 0; dma_unmap_single(NULL, req->dma, req->req.length, (ept->flags & EPT_FLAG_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); /* Reprime the endpoint for the remaining transfers */ if (ept->req) { temp_req = ept->req; while (temp_req != NULL) { temp_req->live = 0; temp_req = temp_req->next; } usb_ept_start(ept); } } else dma_unmap_single(NULL, req->dma, req->req.length, (ept->flags & EPT_FLAG_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); spin_unlock_irqrestore(&ui->lock, flags); return 0; } error: spin_unlock_irqrestore(&ui->lock, flags); return -EINVAL; } EXPORT_SYMBOL(usb_ept_cancel_xfer); int usb_ept_set_halt(struct usb_endpoint *ept) { struct usb_info *ui = ept->ui; int in = ept->flags & EPT_FLAG_IN; unsigned n; if (ui->in_lpm) { pr_err("%s: controller is in lpm, cannot proceed\n", __func__); return -1; } ept->ept_halted = 1; n = readl(USB_ENDPTCTRL(ept->num)); if (in) n |= CTRL_TXS; else n |= CTRL_RXS; writel(n, USB_ENDPTCTRL(ept->num)); return 0; } EXPORT_SYMBOL(usb_ept_set_halt); int usb_ept_clear_halt(struct usb_endpoint *ept) { struct usb_info *ui = ept->ui; int in = ept->flags & EPT_FLAG_IN; unsigned n; if (ui->in_lpm) { pr_err("%s: controller is in lpm, cannot proceed\n", __func__); return -1; } if (ept->ept_halted) ept->ept_halted = 0; n = readl(USB_ENDPTCTRL(ept->num)); /*clear stall bit and set data toggle bit*/ if (in) { n &= (~CTRL_TXS); n |= (CTRL_TXR); } else { n &= ~(CTRL_RXS); n |= (CTRL_RXR); } writel(n, USB_ENDPTCTRL(ept->num)); return 0; } EXPORT_SYMBOL(usb_ept_clear_halt); int usb_ept_is_stalled(struct usb_endpoint *ept) { struct usb_info *ui = ept->ui; int in = ept->flags & EPT_FLAG_IN; unsigned n; n = readl(USB_ENDPTCTRL(ept->num)); if (in && (n & CTRL_TXS)) return 1; else if (n & CTRL_RXS) return 1; return 0; } void usb_ept_fifo_flush(struct usb_endpoint *ept) { flush_endpoint(ept); } EXPORT_SYMBOL(usb_ept_fifo_flush); struct usb_function *usb_ept_get_function(struct usb_endpoint *ept) { return NULL; } EXPORT_SYMBOL(usb_ept_get_function); void usb_free_endpoint_all_req(struct usb_endpoint *ep) { struct msm_request *temp; struct msm_request *req; if (!ep) return; req = ep->req; while (req) { temp = req->next; req->busy = 0; if (&req->req) usb_ept_free_req(ep, &req->req); req = temp; } } EXPORT_SYMBOL(usb_free_endpoint_all_req); int usb_function_unregister(struct usb_function *func) { struct usb_info *ui = the_usb_info; int i; struct usb_function_info *fi; unsigned long flags; if (!func) return -EINVAL; fi = usb_find_function(func->name); if (!fi) return -EINVAL; if (ui->running) { disable_irq(ui->irq); spin_lock_irqsave(&ui->lock, flags); ui->running = 0; ui->online = 0; ui->bound = 0; spin_unlock_irqrestore(&ui->lock, flags); usb_uninit(ui); /* we should come out of lpm to access registers */ if (ui->in_lpm) { if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) { disable_irq(ui->gpio_irq[0]); disable_irq(ui->gpio_irq[1]); } usb_lpm_exit(ui); if (cancel_work_sync(&ui->li.wakeup_phy)) usb_lpm_wakeup_phy(NULL); ui->in_lpm = 0; } /* disable usb and session valid interrupts */ writel(0, USB_USBINTR); writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC); /* stop the controller */ usb_disable_pullup(ui); msleep(100); enable_irq(ui->irq); } pr_info("%s: func->name = %s\n", __func__, func->name); ui->composition = NULL; if (func->configure) func->configure(0, func->context); if (func->unbind) func->unbind(func->context); list_del(&fi->list); for (i = 0; i < ui->num_funcs; i++) if (fi == ui->func[i]) ui->func[i] = NULL; kfree(fi); return 0; } EXPORT_SYMBOL(usb_function_unregister); MODULE_LICENSE("GPL");
gpl-2.0
krizky82/semc-kernel-msm7x30-ics
drivers/usb/class/cdc-acm.c
17
48916
/* * cdc-acm.c * * Copyright (c) 1999 Armin Fuerst <fuerst@in.tum.de> * Copyright (c) 1999 Pavel Machek <pavel@suse.cz> * Copyright (c) 1999 Johannes Erdfelt <johannes@erdfelt.com> * Copyright (c) 2000 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2004 Oliver Neukum <oliver@neukum.name> * Copyright (c) 2005 David Kubicek <dave@awk.cz> * * USB Abstract Control Model driver for USB modems and ISDN adapters * * Sponsored by SuSE * * ChangeLog: * v0.9 - thorough cleaning, URBification, almost a rewrite * v0.10 - some more cleanups * v0.11 - fixed flow control, read error doesn't stop reads * v0.12 - added TIOCM ioctls, added break handling, made struct acm * kmalloced * v0.13 - added termios, added hangup * v0.14 - sized down struct acm * v0.15 - fixed flow control again - characters could be lost * v0.16 - added code for modems with swapped data and control interfaces * v0.17 - added new style probing * v0.18 - fixed new style probing for devices with more configurations * v0.19 - fixed CLOCAL handling (thanks to Richard Shih-Ping Chan) * v0.20 - switched to probing on interface (rather than device) class * v0.21 - revert to probing on device for devices with multiple configs * v0.22 - probe only the control interface. if usbcore doesn't choose the * config we want, sysadmin changes bConfigurationValue in sysfs. * v0.23 - use softirq for rx processing, as needed by tty layer * v0.24 - change probe method to evaluate CDC union descriptor * v0.25 - downstream tasks paralelized to maximize throughput * v0.26 - multiple write urbs, writesize increased */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #undef VERBOSE_DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <linux/list.h> #include "cdc-acm.h" #define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */ /* * Version Information */ #define DRIVER_VERSION "v0.26" #define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek" #define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters" static struct usb_driver acm_driver; static struct tty_driver *acm_tty_driver; static struct acm *acm_table[ACM_TTY_MINORS]; static DEFINE_MUTEX(open_mutex); #define ACM_READY(acm) (acm && acm->dev && acm->port.count) static const struct tty_port_operations acm_port_ops = { }; #ifdef VERBOSE_DEBUG #define verbose 1 #else #define verbose 0 #endif /* * Functions for ACM control messages. */ static int acm_ctrl_msg(struct acm *acm, int request, int value, void *buf, int len) { int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0), request, USB_RT_ACM, value, acm->control->altsetting[0].desc.bInterfaceNumber, buf, len, 5000); dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d", request, value, len, retval); return retval < 0 ? retval : 0; } /* devices aren't required to support these requests. * the cdc acm descriptor tells whether they do... */ #define acm_set_control(acm, control) \ acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0) #define acm_set_line(acm, line) \ acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line)) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) /* * Write buffer management. * All of these assume proper locks taken by the caller. */ static int acm_wb_alloc(struct acm *acm) { int i, wbn; struct acm_wb *wb; wbn = 0; i = 0; for (;;) { wb = &acm->wb[wbn]; if (!wb->use) { wb->use = 1; return wbn; } wbn = (wbn + 1) % ACM_NW; if (++i >= ACM_NW) return -1; } } static int acm_wb_is_avail(struct acm *acm) { int i, n; unsigned long flags; n = ACM_NW; spin_lock_irqsave(&acm->write_lock, flags); for (i = 0; i < ACM_NW; i++) n -= acm->wb[i].use; spin_unlock_irqrestore(&acm->write_lock, flags); return n; } /* * Finish write. Caller must hold acm->write_lock */ static void acm_write_done(struct acm *acm, struct acm_wb *wb) { wb->use = 0; acm->transmitting--; usb_autopm_put_interface_async(acm->control); } /* * Poke write. * * the caller is responsible for locking */ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) { int rc; acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { dbg("usb_submit_urb(write bulk) failed: %d", rc); acm_write_done(acm, wb); } return rc; } static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dbg("%s susp_count: %d", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) acm->delayed_wb = wb; else usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; } /* * attributes exported through sysfs */ static ssize_t show_caps (struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct acm *acm = usb_get_intfdata(intf); return sprintf(buf, "%d", acm->ctrl_caps); } static DEVICE_ATTR(bmCapabilities, S_IRUGO, show_caps, NULL); static ssize_t show_country_codes (struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct acm *acm = usb_get_intfdata(intf); memcpy(buf, acm->country_codes, acm->country_code_size); return acm->country_code_size; } static DEVICE_ATTR(wCountryCodes, S_IRUGO, show_country_codes, NULL); static ssize_t show_country_rel_date (struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct acm *acm = usb_get_intfdata(intf); return sprintf(buf, "%d", acm->country_rel_date); } static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL); /* * Interrupt handlers for various ACM device responses */ /* control interface reports status changes with "interrupt" transfers */ static void acm_ctrl_irq(struct urb *urb) { struct acm *acm = urb->context; struct usb_cdc_notification *dr = urb->transfer_buffer; struct tty_struct *tty; unsigned char *data; int newctrl; int retval; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } if (!ACM_READY(acm)) goto exit; usb_mark_last_busy(acm->dev); data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: dbg("%s network", dr->wValue ? "connected to" : "disconnected from"); break; case USB_CDC_NOTIFY_SERIAL_STATE: tty = tty_port_tty_get(&acm->port); newctrl = get_unaligned_le16(data); if (tty) { if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { dbg("calling hangup"); tty_hangup(tty); } tty_kref_put(tty); } acm->ctrlin = newctrl; dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c", acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-', acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-', acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-', acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-'); break; default: dbg("unknown notification %d received: index %d len %d data0 %d data1 %d", dr->bNotificationType, dr->wIndex, dr->wLength, data[0], data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " "result %d", __func__, retval); } /* data interface returns incoming bytes, or we got unthrottled */ static void acm_read_bulk(struct urb *urb) { struct acm_rb *buf; struct acm_ru *rcv = urb->context; struct acm *acm = rcv->instance; int status = urb->status; dbg("Entering acm_read_bulk with status %d", status); if (!ACM_READY(acm)) { dev_dbg(&acm->data->dev, "Aborting, acm not ready"); return; } usb_mark_last_busy(acm->dev); if (status) dev_dbg(&acm->data->dev, "bulk rx status %d\n", status); buf = rcv->buffer; buf->size = urb->actual_length; if (likely(status == 0)) { spin_lock(&acm->read_lock); acm->processing++; list_add_tail(&rcv->list, &acm->spare_read_urbs); list_add_tail(&buf->list, &acm->filled_read_bufs); spin_unlock(&acm->read_lock); } else { /* we drop the buffer due to an error */ spin_lock(&acm->read_lock); list_add_tail(&rcv->list, &acm->spare_read_urbs); list_add(&buf->list, &acm->spare_read_bufs); spin_unlock(&acm->read_lock); /* nevertheless the tasklet must be kicked unconditionally so the queue cannot dry up */ } if (likely(!acm->susp_count)) tasklet_schedule(&acm->urb_task); } static void acm_rx_tasklet(unsigned long _acm) { struct acm *acm = (void *)_acm; struct acm_rb *buf; struct tty_struct *tty; struct acm_ru *rcv; unsigned long flags; unsigned char throttled; dbg("Entering acm_rx_tasklet"); if (!ACM_READY(acm)) { dbg("acm_rx_tasklet: ACM not ready"); return; } spin_lock_irqsave(&acm->throttle_lock, flags); throttled = acm->throttle; spin_unlock_irqrestore(&acm->throttle_lock, flags); if (throttled) { dbg("acm_rx_tasklet: throttled"); return; } tty = tty_port_tty_get(&acm->port); next_buffer: spin_lock_irqsave(&acm->read_lock, flags); if (list_empty(&acm->filled_read_bufs)) { spin_unlock_irqrestore(&acm->read_lock, flags); goto urbs; } buf = list_entry(acm->filled_read_bufs.next, struct acm_rb, list); list_del(&buf->list); spin_unlock_irqrestore(&acm->read_lock, flags); dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size); if (tty) { spin_lock_irqsave(&acm->throttle_lock, flags); throttled = acm->throttle; spin_unlock_irqrestore(&acm->throttle_lock, flags); if (!throttled) { tty_buffer_request_room(tty, buf->size); tty_insert_flip_string(tty, buf->base, buf->size); tty_flip_buffer_push(tty); } else { tty_kref_put(tty); dbg("Throttling noticed"); spin_lock_irqsave(&acm->read_lock, flags); list_add(&buf->list, &acm->filled_read_bufs); spin_unlock_irqrestore(&acm->read_lock, flags); return; } } spin_lock_irqsave(&acm->read_lock, flags); list_add(&buf->list, &acm->spare_read_bufs); spin_unlock_irqrestore(&acm->read_lock, flags); goto next_buffer; urbs: tty_kref_put(tty); while (!list_empty(&acm->spare_read_bufs)) { spin_lock_irqsave(&acm->read_lock, flags); if (list_empty(&acm->spare_read_urbs)) { acm->processing = 0; spin_unlock_irqrestore(&acm->read_lock, flags); return; } rcv = list_entry(acm->spare_read_urbs.next, struct acm_ru, list); list_del(&rcv->list); spin_unlock_irqrestore(&acm->read_lock, flags); buf = list_entry(acm->spare_read_bufs.next, struct acm_rb, list); list_del(&buf->list); rcv->buffer = buf; if (acm->is_int_ep) usb_fill_int_urb(rcv->urb, acm->dev, acm->rx_endpoint, buf->base, acm->readsize, acm_read_bulk, rcv, acm->bInterval); else usb_fill_bulk_urb(rcv->urb, acm->dev, acm->rx_endpoint, buf->base, acm->readsize, acm_read_bulk, rcv); rcv->urb->transfer_dma = buf->dma; rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* This shouldn't kill the driver as unsuccessful URBs are returned to the free-urbs-pool and resubmited ASAP */ spin_lock_irqsave(&acm->read_lock, flags); if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) { list_add(&buf->list, &acm->spare_read_bufs); list_add(&rcv->list, &acm->spare_read_urbs); acm->processing = 0; spin_unlock_irqrestore(&acm->read_lock, flags); return; } else { spin_unlock_irqrestore(&acm->read_lock, flags); dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf); } } spin_lock_irqsave(&acm->read_lock, flags); acm->processing = 0; spin_unlock_irqrestore(&acm->read_lock, flags); } /* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb) { struct acm_wb *wb = urb->context; struct acm *acm = wb->instance; unsigned long flags; if (verbose || urb->status || (urb->actual_length != urb->transfer_buffer_length)) dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n", urb->actual_length, urb->transfer_buffer_length, urb->status); spin_lock_irqsave(&acm->write_lock, flags); acm_write_done(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); if (ACM_READY(acm)) schedule_work(&acm->work); else wake_up_interruptible(&acm->drain_wait); } static void acm_softint(struct work_struct *work) { struct acm *acm = container_of(work, struct acm, work); struct tty_struct *tty; dev_vdbg(&acm->data->dev, "tx work\n"); if (!ACM_READY(acm)) return; tty = tty_port_tty_get(&acm->port); if (!tty) return; tty_wakeup(tty); tty_kref_put(tty); } /* * TTY handlers */ static int acm_tty_open(struct tty_struct *tty, struct file *filp) { struct acm *acm; int rv = -ENODEV; int i; dbg("Entering acm_tty_open."); mutex_lock(&open_mutex); acm = acm_table[tty->index]; if (!acm || !acm->dev) goto err_out; else rv = 0; set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); tty->driver_data = acm; tty_port_tty_set(&acm->port, tty); if (usb_autopm_get_interface(acm->control) < 0) goto early_bail; else acm->control->needs_remote_wakeup = 1; mutex_lock(&acm->mutex); if (acm->port.count++) { usb_autopm_put_interface(acm->control); goto done; } acm->ctrlurb->dev = acm->dev; if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) { dbg("usb_submit_urb(ctrl irq) failed"); goto bail_out; } if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) && (acm->ctrl_caps & USB_CDC_CAP_LINE)) goto full_bailout; usb_autopm_put_interface(acm->control); INIT_LIST_HEAD(&acm->spare_read_urbs); INIT_LIST_HEAD(&acm->spare_read_bufs); INIT_LIST_HEAD(&acm->filled_read_bufs); for (i = 0; i < acm->rx_buflimit; i++) list_add(&(acm->ru[i].list), &acm->spare_read_urbs); for (i = 0; i < acm->rx_buflimit; i++) list_add(&(acm->rb[i].list), &acm->spare_read_bufs); acm->throttle = 0; set_bit(ASYNCB_INITIALIZED, &acm->port.flags); rv = tty_port_block_til_ready(&acm->port, tty, filp); tasklet_schedule(&acm->urb_task); done: mutex_unlock(&acm->mutex); err_out: mutex_unlock(&open_mutex); return rv; full_bailout: usb_kill_urb(acm->ctrlurb); bail_out: usb_autopm_put_interface(acm->control); acm->port.count--; mutex_unlock(&acm->mutex); early_bail: mutex_unlock(&open_mutex); tty_port_tty_set(&acm->port, NULL); return -EIO; } static void acm_tty_unregister(struct acm *acm) { int i, nr; nr = acm->rx_buflimit; tty_unregister_device(acm_tty_driver, acm->minor); usb_put_intf(acm->control); acm_table[acm->minor] = NULL; usb_free_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); for (i = 0; i < nr; i++) usb_free_urb(acm->ru[i].urb); kfree(acm->country_codes); kfree(acm); } static int acm_tty_chars_in_buffer(struct tty_struct *tty); static void acm_port_down(struct acm *acm, int drain) { int i, nr = acm->rx_buflimit; mutex_lock(&open_mutex); if (acm->dev) { usb_autopm_get_interface(acm->control); acm_set_control(acm, acm->ctrlout = 0); /* try letting the last writes drain naturally */ if (drain) { wait_event_interruptible_timeout(acm->drain_wait, (ACM_NW == acm_wb_is_avail(acm)) || !acm->dev, ACM_CLOSE_TIMEOUT * HZ); } usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); tasklet_disable(&acm->urb_task); for (i = 0; i < nr; i++) usb_kill_urb(acm->ru[i].urb); tasklet_enable(&acm->urb_task); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } mutex_unlock(&open_mutex); } static void acm_tty_hangup(struct tty_struct *tty) { struct acm *acm = tty->driver_data; tty_port_hangup(&acm->port); acm_port_down(acm, 0); } static void acm_tty_close(struct tty_struct *tty, struct file *filp) { struct acm *acm = tty->driver_data; /* Perform the closing process and see if we need to do the hardware shutdown */ if (!acm) return; if (tty_port_close_start(&acm->port, tty, filp) == 0) { mutex_lock(&open_mutex); if (!acm->dev) { tty_port_tty_set(&acm->port, NULL); acm_tty_unregister(acm); tty->driver_data = NULL; } mutex_unlock(&open_mutex); return; } acm_port_down(acm, 0); tty_port_close_end(&acm->port, tty); tty_port_tty_set(&acm->port, NULL); } static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct acm *acm = tty->driver_data; int stat; unsigned long flags; int wbn; struct acm_wb *wb; dbg("Entering acm_tty_write to write %d bytes,", count); if (!ACM_READY(acm)) return -EINVAL; if (!count) return 0; spin_lock_irqsave(&acm->write_lock, flags); wbn = acm_wb_alloc(acm); if (wbn < 0) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; } wb = &acm->wb[wbn]; count = (count > acm->writesize) ? acm->writesize : count; dbg("Get %d bytes...", count); memcpy(wb->buf, buf, count); wb->len = count; spin_unlock_irqrestore(&acm->write_lock, flags); stat = acm_write_start(acm, wbn); if (stat < 0) return stat; return count; } static int acm_tty_write_room(struct tty_struct *tty) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return -EINVAL; /* * Do not let the line discipline to know that we have a reserve, * or it might get too enthusiastic. */ return acm_wb_is_avail(acm) ? acm->writesize : 0; } static int acm_tty_chars_in_buffer(struct tty_struct *tty) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return 0; /* * This is inaccurate (overcounts), but it works. */ return (ACM_NW - acm_wb_is_avail(acm)) * acm->writesize; } static void acm_tty_throttle(struct tty_struct *tty) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return; spin_lock_bh(&acm->throttle_lock); acm->throttle = 1; spin_unlock_bh(&acm->throttle_lock); } static void acm_tty_unthrottle(struct tty_struct *tty) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return; spin_lock_bh(&acm->throttle_lock); acm->throttle = 0; spin_unlock_bh(&acm->throttle_lock); tasklet_schedule(&acm->urb_task); } static int acm_tty_break_ctl(struct tty_struct *tty, int state) { struct acm *acm = tty->driver_data; int retval; if (!ACM_READY(acm)) return -EINVAL; retval = acm_send_break(acm, state ? 0xffff : 0); if (retval < 0) dbg("send break failed"); return retval; } static int acm_tty_tiocmget(struct tty_struct *tty, struct file *file) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return -EINVAL; return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) | (acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) | (acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) | (acm->ctrlin & ACM_CTRL_RI ? TIOCM_RI : 0) | (acm->ctrlin & ACM_CTRL_DCD ? TIOCM_CD : 0) | TIOCM_CTS; } static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct acm *acm = tty->driver_data; unsigned int newctrl; if (!ACM_READY(acm)) return -EINVAL; newctrl = acm->ctrlout; set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (set & TIOCM_RTS ? ACM_CTRL_RTS : 0); clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0); newctrl = (newctrl & ~clear) | set; if (acm->ctrlout == newctrl) return 0; return acm_set_control(acm, acm->ctrlout = newctrl); } static int acm_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) return -EINVAL; return -ENOIOCTLCMD; } static const __u32 acm_tty_speed[] = { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000, 3000000, 3500000, 4000000 }; static const __u8 acm_tty_size[] = { 5, 6, 7, 8 }; static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old) { struct acm *acm = tty->driver_data; struct ktermios *termios = tty->termios; struct usb_cdc_line_coding newline; int newctrl = acm->ctrlout; if (!ACM_READY(acm)) return; newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty)); newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0; newline.bParityType = termios->c_cflag & PARENB ? (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0; newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); if (!newline.dwDTERate) { newline.dwDTERate = acm->line.dwDTERate; newctrl &= ~ACM_CTRL_DTR; } else newctrl |= ACM_CTRL_DTR; if (newctrl != acm->ctrlout) acm_set_control(acm, acm->ctrlout = newctrl); if (memcmp(&acm->line, &newline, sizeof newline)) { memcpy(&acm->line, &newline, sizeof newline); dbg("set line: %d %d %d %d", le32_to_cpu(newline.dwDTERate), newline.bCharFormat, newline.bParityType, newline.bDataBits); acm_set_line(acm, &acm->line); } } /* * USB probe and disconnect routines. */ /* Little helpers: write/read buffers free */ static void acm_write_buffers_free(struct acm *acm) { int i; struct acm_wb *wb; struct usb_device *usb_dev = interface_to_usbdev(acm->control); for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah); } static void acm_read_buffers_free(struct acm *acm) { struct usb_device *usb_dev = interface_to_usbdev(acm->control); int i, n = acm->rx_buflimit; for (i = 0; i < n; i++) usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma); } /* Little helper: write buffers allocate */ static int acm_write_buffers_alloc(struct acm *acm) { int i; struct acm_wb *wb; for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { wb->buf = usb_buffer_alloc(acm->dev, acm->writesize, GFP_KERNEL, &wb->dmah); if (!wb->buf) { while (i != 0) { --i; --wb; usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah); } return -ENOMEM; } } return 0; } static int acm_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_cdc_union_desc *union_header = NULL; struct usb_cdc_country_functional_desc *cfd = NULL; unsigned char *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; struct usb_interface *control_interface; struct usb_interface *data_interface; struct usb_endpoint_descriptor *epctrl = NULL; struct usb_endpoint_descriptor *epread = NULL; struct usb_endpoint_descriptor *epwrite = NULL; struct usb_device *usb_dev = interface_to_usbdev(intf); struct acm *acm; int minor; int ctrlsize, readsize; u8 *buf; u8 ac_management_function = 0; u8 call_management_function = 0; int call_interface_num = -1; int data_interface_num; unsigned long quirks; int num_rx_buf; int i; int combined_interfaces = 0; /* normal quirks */ quirks = (unsigned long)id->driver_info; num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ if (quirks == NO_UNION_NORMAL) { data_interface = usb_ifnum_to_if(usb_dev, 1); control_interface = usb_ifnum_to_if(usb_dev, 0); goto skip_normal_probe; } /* normal probing*/ if (!buffer) { dev_err(&intf->dev, "Weird descriptor references\n"); return -EINVAL; } if (!buflen) { if (intf->cur_altsetting->endpoint && intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { dev_dbg(&intf->dev, "Seeking extra descriptors on endpoint\n"); buflen = intf->cur_altsetting->endpoint->extralen; buffer = intf->cur_altsetting->endpoint->extra; } else { dev_err(&intf->dev, "Zero length descriptor references\n"); return -EINVAL; } } while (buflen > 0) { if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (union_header) { dev_err(&intf->dev, "More than one " "union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/ cfd = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: /* maybe check version */ break; /* for now we ignore it */ case USB_CDC_ACM_TYPE: ac_management_function = buffer[3]; break; case USB_CDC_CALL_MANAGEMENT_TYPE: call_management_function = buffer[3]; call_interface_num = buffer[4]; if ((call_management_function & 3) != 3) dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); break; default: /* there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: " "type %02x, length %d\n", buffer[2], buffer[0]); break; } next_desc: buflen -= buffer[0]; buffer += buffer[0]; } if (!union_header) { if (call_interface_num > 0) { dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n"); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); control_interface = intf; } else { if (intf->cur_altsetting->desc.bNumEndpoints != 3) { dev_dbg(&intf->dev,"No union descriptor, giving up\n"); return -ENODEV; } else { dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n"); combined_interfaces = 1; control_interface = data_interface = intf; goto look_for_collapsed_interface; } } } else { control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); if (!control_interface || !data_interface) { dev_dbg(&intf->dev, "no interfaces\n"); return -ENODEV; } } if (data_interface_num != call_interface_num) dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n"); if (control_interface == data_interface) { /* some broken devices designed for windows work this way */ dev_warn(&intf->dev,"Control and data interfaces are not separated!\n"); combined_interfaces = 1; /* a popular other OS doesn't use it */ quirks |= NO_CAP_LINE; if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) { dev_err(&intf->dev, "This needs exactly 3 endpoints\n"); return -EINVAL; } look_for_collapsed_interface: for (i = 0; i < 3; i++) { struct usb_endpoint_descriptor *ep; ep = &data_interface->cur_altsetting->endpoint[i].desc; if (usb_endpoint_is_int_in(ep)) epctrl = ep; else if (usb_endpoint_is_bulk_out(ep)) epwrite = ep; else if (usb_endpoint_is_bulk_in(ep)) epread = ep; else return -EINVAL; } if (!epctrl || !epread || !epwrite) return -ENODEV; else goto made_compressed_probe; } skip_normal_probe: /*workaround for switched interfaces */ if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) { if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) { struct usb_interface *t; dev_dbg(&intf->dev, "Your device has switched interfaces.\n"); t = control_interface; control_interface = data_interface; data_interface = t; } else { return -EINVAL; } } /* Accept probe requests only for the control interface */ if (!combined_interfaces && intf != control_interface) return -ENODEV; if (!combined_interfaces && usb_interface_claimed(data_interface)) { /* valid in this context */ dev_dbg(&intf->dev, "The data interface isn't available\n"); return -EBUSY; } if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; epread = &data_interface->cur_altsetting->endpoint[0].desc; epwrite = &data_interface->cur_altsetting->endpoint[1].desc; /* workaround for switched endpoints */ if (!usb_endpoint_dir_in(epread)) { /* descriptors are swapped */ struct usb_endpoint_descriptor *t; dev_dbg(&intf->dev, "The data interface has switched endpoints\n"); t = epread; epread = epwrite; epwrite = t; } made_compressed_probe: dbg("interfaces are valid"); for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++); if (minor == ACM_TTY_MINORS) { dev_err(&intf->dev, "no more free acm devices\n"); return -ENODEV; } acm = kzalloc(sizeof(struct acm), GFP_KERNEL); if (acm == NULL) { dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n"); goto alloc_fail; } ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize); readsize = le16_to_cpu(epread->wMaxPacketSize) * (quirks == SINGLE_RX_URB ? 1 : 2); acm->combined_interfaces = combined_interfaces; acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20; acm->control = control_interface; acm->data = data_interface; acm->minor = minor; acm->dev = usb_dev; acm->ctrl_caps = ac_management_function; if (quirks & NO_CAP_LINE) acm->ctrl_caps &= ~USB_CDC_CAP_LINE; acm->ctrlsize = ctrlsize; acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; acm->urb_task.func = acm_rx_tasklet; acm->urb_task.data = (unsigned long) acm; INIT_WORK(&acm->work, acm_softint); init_waitqueue_head(&acm->drain_wait); spin_lock_init(&acm->throttle_lock); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) { dev_dbg(&intf->dev, "out of memory (ctrl buffer alloc)\n"); goto alloc_fail2; } acm->ctrl_buffer = buf; if (acm_write_buffers_alloc(acm) < 0) { dev_dbg(&intf->dev, "out of memory (write buffer alloc)\n"); goto alloc_fail4; } acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); if (!acm->ctrlurb) { dev_dbg(&intf->dev, "out of memory (ctrlurb kmalloc)\n"); goto alloc_fail5; } for (i = 0; i < num_rx_buf; i++) { struct acm_ru *rcv = &(acm->ru[i]); rcv->urb = usb_alloc_urb(0, GFP_KERNEL); if (rcv->urb == NULL) { dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n"); goto alloc_fail6; } rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; rcv->instance = acm; } for (i = 0; i < num_rx_buf; i++) { struct acm_rb *rb = &(acm->rb[i]); rb->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &rb->dma); if (!rb->base) { dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); goto alloc_fail7; } } for (i = 0; i < ACM_NW; i++) { struct acm_wb *snd = &(acm->wb[i]); snd->urb = usb_alloc_urb(0, GFP_KERNEL); if (snd->urb == NULL) { dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)"); goto alloc_fail8; } if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; snd->instance = acm; } usb_set_intfdata(intf, acm); i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); if (i < 0) goto alloc_fail8; if (cfd) { /* export the country data */ acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL); if (!acm->country_codes) goto skip_countries; acm->country_code_size = cfd->bLength - 4; memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4); acm->country_rel_date = cfd->iCountryCodeRelDate; i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); if (i < 0) { kfree(acm->country_codes); acm->country_codes = NULL; acm->country_code_size = 0; goto skip_countries; } i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate); if (i < 0) { device_remove_file(&intf->dev, &dev_attr_wCountryCodes); kfree(acm->country_codes); acm->country_codes = NULL; acm->country_code_size = 0; goto skip_countries; } } skip_countries: usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, /* works around buggy devices */ epctrl->bInterval ? epctrl->bInterval : 0xff); acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; acm->ctrlurb->transfer_dma = acm->ctrl_dma; dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); acm_set_control(acm, acm->ctrlout); acm->line.dwDTERate = cpu_to_le32(9600); acm->line.bDataBits = 8; acm_set_line(acm, &acm->line); usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); tty_register_device(acm_tty_driver, minor, &control_interface->dev); acm_table[minor] = acm; return 0; alloc_fail8: for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); alloc_fail7: acm_read_buffers_free(acm); alloc_fail6: for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->ru[i].urb); usb_free_urb(acm->ctrlurb); alloc_fail5: acm_write_buffers_free(acm); alloc_fail4: usb_buffer_free(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); alloc_fail2: kfree(acm); alloc_fail: return -ENOMEM; } static void stop_data_traffic(struct acm *acm) { int i; dbg("Entering stop_data_traffic"); tasklet_disable(&acm->urb_task); usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->ru[i].urb); tasklet_enable(&acm->urb_task); cancel_work_sync(&acm->work); } static void acm_disconnect(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct usb_device *usb_dev = interface_to_usbdev(intf); struct tty_struct *tty; /* sibling interface is already cleaning up */ if (!acm) return; mutex_lock(&open_mutex); if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); device_remove_file(&acm->control->dev, &dev_attr_iCountryCodeRelDate); } device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); acm->dev = NULL; usb_set_intfdata(acm->control, NULL); usb_set_intfdata(acm->data, NULL); stop_data_traffic(acm); acm_write_buffers_free(acm); usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); acm_read_buffers_free(acm); if (!acm->combined_interfaces) usb_driver_release_interface(&acm_driver, intf == acm->control ? acm->data : acm->control); if (acm->port.count == 0) { acm_tty_unregister(acm); mutex_unlock(&open_mutex); return; } mutex_unlock(&open_mutex); tty = tty_port_tty_get(&acm->port); if (tty) { tty_hangup(tty); tty_kref_put(tty); } } #ifdef CONFIG_PM static int acm_suspend(struct usb_interface *intf, pm_message_t message) { struct acm *acm = usb_get_intfdata(intf); int cnt; if (message.event & PM_EVENT_AUTO) { int b; spin_lock_irq(&acm->read_lock); spin_lock(&acm->write_lock); b = acm->processing + acm->transmitting; spin_unlock(&acm->write_lock); spin_unlock_irq(&acm->read_lock); if (b) return -EBUSY; } spin_lock_irq(&acm->read_lock); spin_lock(&acm->write_lock); cnt = acm->susp_count++; spin_unlock(&acm->write_lock); spin_unlock_irq(&acm->read_lock); if (cnt) return 0; /* we treat opened interfaces differently, we must guard against open */ mutex_lock(&acm->mutex); if (acm->port.count) stop_data_traffic(acm); mutex_unlock(&acm->mutex); return 0; } static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct acm_wb *wb; int rv = 0; int cnt; spin_lock_irq(&acm->read_lock); acm->susp_count -= 1; cnt = acm->susp_count; spin_unlock_irq(&acm->read_lock); if (cnt) return 0; mutex_lock(&acm->mutex); if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; tasklet_schedule(&acm->urb_task); } err_out: mutex_unlock(&acm->mutex); return rv; } #endif /* CONFIG_PM */ #define NOKIA_PCSUITE_ACM_INFO(x) \ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ USB_CDC_ACM_PROTO_VENDOR) #define SAMSUNG_PCSUITE_ACM_INFO(x) \ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ USB_CDC_ACM_PROTO_VENDOR) /* * USB driver structure. */ static struct usb_device_id acm_ids[] = { /* quirky and broken devices */ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x079b, 0x000f), /* BT On-Air USB MODEM */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0ace, 0x1602), /* ZyDAS 56K USB MODEM */ .driver_info = SINGLE_RX_URB, }, { USB_DEVICE(0x0ace, 0x1608), /* ZyDAS 56K USB MODEM */ .driver_info = SINGLE_RX_URB, /* firmware bug */ }, { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ .driver_info = SINGLE_RX_URB, /* firmware bug */ }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, /* Motorola H24 HSPA module: */ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */ { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on data interface instead of communications interface. Maybe we should define a new quirk for this. */ }, { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, /* Nokia S60 phones expose two ACM channels. The first is * a modem and is picked up by the standard AT-command * information below. The second is 'vendor-specific' but * is treated as a serial device at the S60 end, so we want * to expose it on Linux too. */ { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */ { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */ { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */ { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */ { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */ { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */ { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */ { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */ { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */ { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */ { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */ { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */ { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */ { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */ { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */ { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */ { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */ { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */ { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */ { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */ { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */ { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */ { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */ { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */ { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */ { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */ { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */ { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */ { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */ { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */ { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */ { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */ { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */ { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */ { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */ { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */ { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ { NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */ { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ /* Support for Owen devices */ { USB_DEVICE(0x03eb, 0x0030), }, /* Owen SI30 */ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, /* control interfaces with various AT-command sets */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER) }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_PCCA101) }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_PCCA101_WAKE) }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_GSM) }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_3G) }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_CDMA) }, { } }; MODULE_DEVICE_TABLE(usb, acm_ids); static struct usb_driver acm_driver = { .name = "cdc_acm", .probe = acm_probe, .disconnect = acm_disconnect, #ifdef CONFIG_PM .suspend = acm_suspend, .resume = acm_resume, #endif .id_table = acm_ids, #ifdef CONFIG_PM .supports_autosuspend = 1, #endif }; /* * TTY driver structures. */ static const struct tty_operations acm_ops = { .open = acm_tty_open, .close = acm_tty_close, .hangup = acm_tty_hangup, .write = acm_tty_write, .write_room = acm_tty_write_room, .ioctl = acm_tty_ioctl, .throttle = acm_tty_throttle, .unthrottle = acm_tty_unthrottle, .chars_in_buffer = acm_tty_chars_in_buffer, .break_ctl = acm_tty_break_ctl, .set_termios = acm_tty_set_termios, .tiocmget = acm_tty_tiocmget, .tiocmset = acm_tty_tiocmset, }; /* * Init / exit. */ static int __init acm_init(void) { int retval; acm_tty_driver = alloc_tty_driver(ACM_TTY_MINORS); if (!acm_tty_driver) return -ENOMEM; acm_tty_driver->owner = THIS_MODULE, acm_tty_driver->driver_name = "acm", acm_tty_driver->name = "ttyACM", acm_tty_driver->major = ACM_TTY_MAJOR, acm_tty_driver->minor_start = 0, acm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL, acm_tty_driver->subtype = SERIAL_TYPE_NORMAL, acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; acm_tty_driver->init_termios = tty_std_termios; acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; tty_set_operations(acm_tty_driver, &acm_ops); retval = tty_register_driver(acm_tty_driver); if (retval) { put_tty_driver(acm_tty_driver); return retval; } retval = usb_register(&acm_driver); if (retval) { tty_unregister_driver(acm_tty_driver); put_tty_driver(acm_tty_driver); return retval; } printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; } static void __exit acm_exit(void) { usb_deregister(&acm_driver); tty_unregister_driver(acm_tty_driver); put_tty_driver(acm_tty_driver); } module_init(acm_init); module_exit(acm_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
gpl-2.0
kartzan/android_kernel_samsung_msm8930-common
net/wireless/sme.c
785
27070
/* * SME code for cfg80211's connect emulation. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <linux/export.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include "nl80211.h" #include "reg.h" struct cfg80211_conn { struct cfg80211_connect_params params; /* these are sub-states of the _CONNECTING sme_state */ enum { CFG80211_CONN_IDLE, CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, CFG80211_CONN_DEAUTH_ASSOC_FAIL, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; u8 *ie; size_t ie_len; bool auto_auth, prev_bssid_valid; }; static bool cfg80211_is_all_idle(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; bool is_all_idle = true; mutex_lock(&cfg80211_mutex); /* * All devices must be idle as otherwise if you are actively * scanning some new beacon hints could be learned and would * count as new regulatory hints. */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) { cfg80211_lock_rdev(rdev); list_for_each_entry(wdev, &rdev->netdev_list, list) { wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) is_all_idle = false; wdev_unlock(wdev); } cfg80211_unlock_rdev(rdev); } mutex_unlock(&cfg80211_mutex); return is_all_idle; } static void disconnect_work(struct work_struct *work) { if (!cfg80211_is_all_idle()) return; regulatory_hint_disconnect(); } static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_scan_request *request; int n_channels, err; ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); if (rdev->scan_req) return -EBUSY; if (wdev->conn->params.channel) { n_channels = 1; } else { enum ieee80211_band band; n_channels = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wdev->wiphy->bands[band]) continue; n_channels += wdev->wiphy->bands[band]->n_channels; } } request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + sizeof(request->channels[0]) * n_channels, GFP_KERNEL); if (!request) return -ENOMEM; if (wdev->conn->params.channel) request->channels[0] = wdev->conn->params.channel; else { int i = 0, j; enum ieee80211_band band; struct ieee80211_supported_band *bands; struct ieee80211_channel *channel; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { bands = wdev->wiphy->bands[band]; if (!bands) continue; for (j = 0; j < bands->n_channels; j++) { channel = &bands->channels[j]; if (channel->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i++] = channel; } request->rates[band] = (1 << bands->n_bitrates) - 1; } n_channels = i; } request->n_channels = n_channels; request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = 1; memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len); request->ssids[0].ssid_len = wdev->conn->params.ssid_len; request->dev = wdev->netdev; request->wiphy = &rdev->wiphy; rdev->scan_req = request; err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request); if (!err) { wdev->conn->state = CFG80211_CONN_SCANNING; nl80211_send_scan_start(rdev, wdev->netdev); dev_hold(wdev->netdev); } else { rdev->scan_req = NULL; kfree(request); } return err; } static int cfg80211_conn_do_work(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_connect_params *params; const u8 *prev_bssid = NULL; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return 0; params = &wdev->conn->params; switch (wdev->conn->state) { case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: BUG_ON(!rdev->ops->auth); wdev->conn->state = CFG80211_CONN_AUTHENTICATING; return __cfg80211_mlme_auth(rdev, wdev->netdev, params->channel, params->auth_type, params->bssid, params->ssid, params->ssid_len, NULL, 0, params->key, params->key_len, params->key_idx); case CFG80211_CONN_ASSOCIATE_NEXT: BUG_ON(!rdev->ops->assoc); wdev->conn->state = CFG80211_CONN_ASSOCIATING; if (wdev->conn->prev_bssid_valid) prev_bssid = wdev->conn->prev_bssid; err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel, params->bssid, prev_bssid, params->ssid, params->ssid_len, params->ie, params->ie_len, params->mfp != NL80211_MFP_NO, &params->crypto, params->flags, &params->ht_capa, &params->ht_capa_mask); if (err) __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return err; case CFG80211_CONN_DEAUTH_ASSOC_FAIL: __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); /* return an error so that we call __cfg80211_connect_result() */ return -EINVAL; default: return 0; } } void cfg80211_conn_work(struct work_struct *work) { struct cfg80211_registered_device *rdev = container_of(work, struct cfg80211_registered_device, conn_work); struct wireless_dev *wdev; u8 bssid_buf[ETH_ALEN], *bssid = NULL; rtnl_lock(); cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) { wdev_lock(wdev); if (!netif_running(wdev->netdev)) { wdev_unlock(wdev); continue; } if (wdev->sme_state != CFG80211_SME_CONNECTING) { wdev_unlock(wdev); continue; } if (wdev->conn->params.bssid) { memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } if (cfg80211_conn_do_work(wdev)) __cfg80211_connect_result( wdev->netdev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); wdev_unlock(wdev); } mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); rtnl_unlock(); } static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_bss *bss; u16 capa = WLAN_CAPABILITY_ESS; ASSERT_WDEV_LOCK(wdev); if (wdev->conn->params.privacy) capa |= WLAN_CAPABILITY_PRIVACY; bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, capa); if (!bss) return NULL; memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); wdev->conn->params.bssid = wdev->conn->bssid; wdev->conn->params.channel = bss->channel; wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); return bss; } static void __cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state != CFG80211_SME_CONNECTING) return; if (!wdev->conn) return; if (wdev->conn->state != CFG80211_CONN_SCANNING && wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) return; bss = cfg80211_get_conn_bss(wdev); if (bss) { cfg80211_put_bss(bss); } else { /* not found */ if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) schedule_work(&rdev->conn_work); else __cfg80211_connect_result( wdev->netdev, wdev->conn->params.bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); } } void cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); wdev_lock(wdev); __cfg80211_sme_scan_done(dev); wdev_unlock(wdev); mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); } void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); ASSERT_WDEV_LOCK(wdev); /* should only RX auth frames when connecting */ if (wdev->sme_state != CFG80211_SME_CONNECTING) return; if (WARN_ON(!wdev->conn)) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && wdev->conn->auto_auth && wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { /* select automatically between only open, shared, leap */ switch (wdev->conn->params.auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: if (wdev->connect_keys) wdev->conn->params.auth_type = NL80211_AUTHTYPE_SHARED_KEY; else wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; case NL80211_AUTHTYPE_SHARED_KEY: wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; default: /* huh? */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; break; } wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, status_code, false, NULL); } else if (wdev->sme_state == CFG80211_SME_CONNECTING && wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (WARN_ON(!wdev->conn)) return false; if (!wdev->conn->prev_bssid_valid) return false; /* * Some stupid APs don't accept reassoc, so we * need to fall back to trying regular assoc. */ wdev->conn->prev_bssid_valid = false; wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); return true; } void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; schedule_work(&rdev->conn_work); } void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, bool wextev, struct cfg80211_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; u8 *country_ie; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; if (wdev->sme_state != CFG80211_SME_CONNECTING) return; nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, status, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (wextev) { if (req_ie && status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = req_ie_len; wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie); } if (resp_ie && status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = resp_ie_len; wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (bssid && status == WLAN_STATUS_SUCCESS) { memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; } wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } #endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; } if (wdev->conn) wdev->conn->state = CFG80211_CONN_IDLE; if (status != WLAN_STATUS_SUCCESS) { wdev->sme_state = CFG80211_SME_IDLE; if (wdev->conn) kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; cfg80211_put_bss(bss); return; } if (!bss) bss = cfg80211_get_bss(wdev->wiphy, wdev->conn ? wdev->conn->params.channel : NULL, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (WARN_ON(!bss)) return; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); if (!country_ie) return; /* * ieee80211_bss_get_ie() ensures we can access: * - country_ie + 2, the start of the country ie data, and * - and country_ie[1] which is the IE length */ regulatory_hint_11d(wdev->wiphy, bss->channel->band, country_ie + 2, country_ie[1]); } void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) return; ev->type = EVENT_CONNECT_RESULT; if (bssid) memcpy(ev->cr.bssid, bssid, ETH_ALEN); if (req_ie_len) { ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); ev->cr.req_ie_len = req_ie_len; memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); } if (resp_ie_len) { ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; ev->cr.resp_ie_len = resp_ie_len; memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); } ev->cr.status = status; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_connect_result); void __cfg80211_roamed(struct wireless_dev *wdev, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len) { #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; if (wdev->sme_state != CFG80211_SME_CONNECTED) goto out; /* internal error -- how did we get to CONNECTED w/o BSS? */ if (WARN_ON(!wdev->current_bss)) { goto out; } cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (req_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, &wrqu, req_ie); } if (resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, &wrqu, resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif return; out: cfg80211_put_bss(bss); } void cfg80211_roamed(struct net_device *dev, struct ieee80211_channel *channel, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (WARN_ON(!bss)) return; cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie, resp_ie_len, gfp); } EXPORT_SYMBOL(cfg80211_roamed); void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); if (WARN_ON(!bss)) return; ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) { cfg80211_put_bss(bss); return; } ev->type = EVENT_ROAMED; ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); ev->rm.req_ie_len = req_ie_len; memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; ev->rm.resp_ie_len = resp_ie_len; memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); ev->rm.bss = bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_roamed_bss); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); int i; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; #ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_CONNECTED) return; #endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); } wdev->current_bss = NULL; wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; if (wdev->conn) { kfree(wdev->conn->ie); wdev->conn->ie = NULL; kfree(wdev->conn); wdev->conn = NULL; } nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); #ifdef CONFIG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); wdev->wext.connect.ssid_len = 0; #endif schedule_work(&cfg80211_disconnect_work); } void cfg80211_disconnected(struct net_device *dev, u16 reason, u8 *ie, size_t ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; ev->type = EVENT_DISCONNECTED; ev->dc.ie = ((u8 *)ev) + sizeof(*ev); ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_disconnected); int __cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss = NULL; int err; ASSERT_WDEV_LOCK(wdev); #ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_IDLE) return -EALREADY; if (WARN_ON(wdev->connect_keys)) { #else if (wdev->connect_keys) { #endif kfree(wdev->connect_keys); wdev->connect_keys = NULL; } cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; idx = connkeys->def; cipher = connkeys->params[idx].cipher; /* If given a WEP key we may need it for shared key auth */ if (cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104) { connect->key_idx = idx; connect->key = connkeys->params[idx].key; connect->key_len = connkeys->params[idx].key_len; /* * If ciphers are not set (e.g. when going through * iwconfig), we have to set them appropriately here. */ if (connect->crypto.cipher_group == 0) connect->crypto.cipher_group = cipher; if (connect->crypto.n_ciphers_pairwise == 0) { connect->crypto.n_ciphers_pairwise = 1; connect->crypto.ciphers_pairwise[0] = cipher; } } } if (!rdev->ops->connect) { if (!rdev->ops->auth || !rdev->ops->assoc) return -EOPNOTSUPP; if (WARN_ON(wdev->conn)) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); if (!wdev->conn) return -ENOMEM; /* * Copy all parameters, and treat explicitly IEs, BSSID, SSID. */ memcpy(&wdev->conn->params, connect, sizeof(*connect)); if (connect->bssid) { wdev->conn->params.bssid = wdev->conn->bssid; memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); } if (connect->ie) { wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, GFP_KERNEL); wdev->conn->params.ie = wdev->conn->ie; if (!wdev->conn->ie) { kfree(wdev->conn); wdev->conn = NULL; return -ENOMEM; } } if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { wdev->conn->auto_auth = true; /* start with open system ... should mostly work */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } else { wdev->conn->auto_auth = false; } memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; wdev->conn->params.ssid = wdev->ssid; wdev->conn->params.ssid_len = connect->ssid_len; /* see if we have the bss already */ bss = cfg80211_get_conn_bss(wdev); wdev->sme_state = CFG80211_SME_CONNECTING; wdev->connect_keys = connkeys; if (prev_bssid) { memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); wdev->conn->prev_bssid_valid = true; } /* we're good if we have a matching bss struct */ if (bss) { wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; err = cfg80211_conn_do_work(wdev); cfg80211_put_bss(bss); } else { /* otherwise we'll need to scan for the AP first */ err = cfg80211_conn_scan(wdev); /* * If we can't scan right now, then we need to scan again * after the current scan finished, since the parameters * changed (unless we find a good AP anyway). */ if (err == -EBUSY) { err = 0; wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; } } if (err) { kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; wdev->sme_state = CFG80211_SME_IDLE; wdev->connect_keys = NULL; wdev->ssid_len = 0; } return err; } else { wdev->sme_state = CFG80211_SME_CONNECTING; wdev->connect_keys = connkeys; err = rdev->ops->connect(&rdev->wiphy, dev, connect); if (err) { wdev->connect_keys = NULL; wdev->sme_state = CFG80211_SME_IDLE; return err; } memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; return 0; } } int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys) { int err; mutex_lock(&rdev->devlist_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); mutex_unlock(&rdev->devlist_mtx); return err; } int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state == CFG80211_SME_IDLE) return -EINVAL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; if (!rdev->ops->disconnect) { if (!rdev->ops->deauth) return -EOPNOTSUPP; /* was it connected by userspace SME? */ if (!wdev->conn) { cfg80211_mlme_down(rdev, dev); return 0; } if (wdev->sme_state == CFG80211_SME_CONNECTING && (wdev->conn->state == CFG80211_CONN_SCANNING || wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { wdev->sme_state = CFG80211_SME_IDLE; kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; wdev->ssid_len = 0; return 0; } /* wdev->conn->params.bssid must be set if > SCANNING */ err = __cfg80211_mlme_deauth(rdev, dev, wdev->conn->params.bssid, NULL, 0, reason, false); if (err) return err; } else { err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); if (err) return err; } if (wdev->sme_state == CFG80211_SME_CONNECTED) __cfg80211_disconnected(dev, NULL, 0, 0, false); else if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, wextev, NULL); return 0; } int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { int err; wdev_lock(dev->ieee80211_ptr); err = __cfg80211_disconnect(rdev, dev, reason, wextev); wdev_unlock(dev->ieee80211_ptr); return err; } void cfg80211_sme_disassoc(struct net_device *dev, struct cfg80211_internal_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return; if (wdev->conn->state == CFG80211_CONN_IDLE) return; /* * Ok, so the association was made by this SME -- we don't * want it any more so deauthenticate too. */ memcpy(bssid, bss->pub.bssid, ETH_ALEN); __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); }
gpl-2.0
tanxjian/gec2440-linux
drivers/isdn/hisax/gazel.c
785
16260
/* $Id: gazel.c,v 2.19.2.4 2004/01/14 16:04:48 keil Exp $ * * low level stuff for Gazel isdn cards * * Author BeWan Systems * based on source code from Karsten Keil * Copyright by BeWan Systems * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hscx.h" #include "isdnl1.h" #include "ipac.h" #include <linux/pci.h> static const char *gazel_revision = "$Revision: 2.19.2.4 $"; #define R647 1 #define R685 2 #define R753 3 #define R742 4 #define PLX_CNTRL 0x50 /* registre de controle PLX */ #define RESET_GAZEL 0x4 #define RESET_9050 0x40000000 #define PLX_INCSR 0x4C /* registre d'IT du 9050 */ #define INT_ISAC_EN 0x8 /* 1 = enable IT isac */ #define INT_ISAC 0x20 /* 1 = IT isac en cours */ #define INT_HSCX_EN 0x1 /* 1 = enable IT hscx */ #define INT_HSCX 0x4 /* 1 = IT hscx en cours */ #define INT_PCI_EN 0x40 /* 1 = enable IT PCI */ #define INT_IPAC_EN 0x3 /* enable IT ipac */ #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) static inline u_char readreg(unsigned int adr, u_short off) { return bytein(adr + off); } static inline void writereg(unsigned int adr, u_short off, u_char data) { byteout(adr + off, data); } static inline void read_fifo(unsigned int adr, u_char * data, int size) { insb(adr, data, size); } static void write_fifo(unsigned int adr, u_char * data, int size) { outsb(adr, data, size); } static inline u_char readreg_ipac(unsigned int adr, u_short off) { register u_char ret; byteout(adr, off); ret = bytein(adr + 4); return ret; } static inline void writereg_ipac(unsigned int adr, u_short off, u_char data) { byteout(adr, off); byteout(adr + 4, data); } static inline void read_fifo_ipac(unsigned int adr, u_short off, u_char * data, int size) { byteout(adr, off); insb(adr + 4, data, size); } static void write_fifo_ipac(unsigned int adr, u_short off, u_char * data, int size) { byteout(adr, off); outsb(adr + 4, data, size); } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: return (readreg(cs->hw.gazel.isac, off2)); case R753: case R742: return (readreg_ipac(cs->hw.gazel.ipac, 0x80 + off2)); } return 0; } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: writereg(cs->hw.gazel.isac, off2, value); break; case R753: case R742: writereg_ipac(cs->hw.gazel.ipac, 0x80 + off2, value); break; } } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: read_fifo(cs->hw.gazel.isacfifo, data, size); break; case R753: case R742: read_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size); break; } } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: write_fifo(cs->hw.gazel.isacfifo, data, size); break; case R753: case R742: write_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size); break; } } static void ReadHSCXfifo(struct IsdnCardState *cs, int hscx, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: read_fifo(cs->hw.gazel.hscxfifo[hscx], data, size); break; case R753: case R742: read_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size); break; } } static void WriteHSCXfifo(struct IsdnCardState *cs, int hscx, u_char * data, int size) { switch (cs->subtyp) { case R647: case R685: write_fifo(cs->hw.gazel.hscxfifo[hscx], data, size); break; case R753: case R742: write_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size); break; } } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: return (readreg(cs->hw.gazel.hscx[hscx], off2)); case R753: case R742: return (readreg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2)); } return 0; } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { u_short off2 = offset; switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); case R685: writereg(cs->hw.gazel.hscx[hscx], off2, value); break; case R753: case R742: writereg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2, value); break; } } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) ReadHSCX(cs, nr, reg) #define WRITEHSCX(cs, nr, reg, data) WriteHSCX(cs, nr, reg, data) #define READHSCXFIFO(cs, nr, ptr, cnt) ReadHSCXfifo(cs, nr, ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) WriteHSCXfifo(cs, nr, ptr, cnt) #include "hscx_irq.c" static irqreturn_t gazel_interrupt(int intno, void *dev_id) { #define MAXCOUNT 5 struct IsdnCardState *cs = dev_id; u_char valisac, valhscx; int count = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); do { valhscx = ReadHSCX(cs, 1, HSCX_ISTA); if (valhscx) hscx_int_main(cs, valhscx); valisac = ReadISAC(cs, ISAC_ISTA); if (valisac) isac_interrupt(cs, valisac); count++; } while ((valhscx || valisac) && (count < MAXCOUNT)); WriteHSCX(cs, 0, HSCX_MASK, 0xFF); WriteHSCX(cs, 1, HSCX_MASK, 0xFF); WriteISAC(cs, ISAC_MASK, 0xFF); WriteISAC(cs, ISAC_MASK, 0x0); WriteHSCX(cs, 0, HSCX_MASK, 0x0); WriteHSCX(cs, 1, HSCX_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t gazel_interrupt_ipac(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char ista, val; int count = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); ista = ReadISAC(cs, IPAC_ISTA - 0x80); do { if (ista & 0x0f) { val = ReadHSCX(cs, 1, HSCX_ISTA); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) { hscx_int_main(cs, val); } } if (ista & 0x20) { val = 0xfe & ReadISAC(cs, ISAC_ISTA); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = ReadISAC(cs, IPAC_ISTA - 0x80); count++; } while ((ista & 0x3f) && (count < MAXCOUNT)); WriteISAC(cs, IPAC_MASK - 0x80, 0xFF); WriteISAC(cs, IPAC_MASK - 0x80, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_gazel(struct IsdnCardState *cs) { unsigned int i; switch (cs->subtyp) { case R647: for (i = 0x0000; i < 0xC000; i += 0x1000) release_region(i + cs->hw.gazel.hscx[0], 16); release_region(0xC000 + cs->hw.gazel.hscx[0], 1); break; case R685: release_region(cs->hw.gazel.hscx[0], 0x100); release_region(cs->hw.gazel.cfg_reg, 0x80); break; case R753: release_region(cs->hw.gazel.ipac, 0x8); release_region(cs->hw.gazel.cfg_reg, 0x80); break; case R742: release_region(cs->hw.gazel.ipac, 8); break; } } static int reset_gazel(struct IsdnCardState *cs) { unsigned long plxcntrl, addr = cs->hw.gazel.cfg_reg; switch (cs->subtyp) { case R647: writereg(addr, 0, 0); HZDELAY(10); writereg(addr, 0, 1); HZDELAY(2); break; case R685: plxcntrl = inl(addr + PLX_CNTRL); plxcntrl |= (RESET_9050 + RESET_GAZEL); outl(plxcntrl, addr + PLX_CNTRL); plxcntrl &= ~(RESET_9050 + RESET_GAZEL); HZDELAY(4); outl(plxcntrl, addr + PLX_CNTRL); HZDELAY(10); outb(INT_ISAC_EN + INT_HSCX_EN + INT_PCI_EN, addr + PLX_INCSR); break; case R753: plxcntrl = inl(addr + PLX_CNTRL); plxcntrl |= (RESET_9050 + RESET_GAZEL); outl(plxcntrl, addr + PLX_CNTRL); plxcntrl &= ~(RESET_9050 + RESET_GAZEL); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20); HZDELAY(4); outl(plxcntrl, addr + PLX_CNTRL); HZDELAY(10); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00); WriteISAC(cs, IPAC_ACFG - 0x80, 0xff); WriteISAC(cs, IPAC_AOE - 0x80, 0x0); WriteISAC(cs, IPAC_MASK - 0x80, 0xff); WriteISAC(cs, IPAC_CONF - 0x80, 0x1); outb(INT_IPAC_EN + INT_PCI_EN, addr + PLX_INCSR); WriteISAC(cs, IPAC_MASK - 0x80, 0xc0); break; case R742: WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20); HZDELAY(4); WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00); WriteISAC(cs, IPAC_ACFG - 0x80, 0xff); WriteISAC(cs, IPAC_AOE - 0x80, 0x0); WriteISAC(cs, IPAC_MASK - 0x80, 0xff); WriteISAC(cs, IPAC_CONF - 0x80, 0x1); WriteISAC(cs, IPAC_MASK - 0x80, 0xc0); break; } return (0); } static int Gazel_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_gazel(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_gazel(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithscxisac(cs, 1); if ((cs->subtyp==R647)||(cs->subtyp==R685)) { int i; for (i=0;i<(2+MAX_WAITING_CALLS);i++) { cs->bcs[i].hw.hscx.tsaxr0 = 0x1f; cs->bcs[i].hw.hscx.tsaxr1 = 0x23; } } spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } static int reserve_regions(struct IsdnCard *card, struct IsdnCardState *cs) { unsigned int i, j, base = 0, adr = 0, len = 0; switch (cs->subtyp) { case R647: base = cs->hw.gazel.hscx[0]; if (!request_region(adr = (0xC000 + base), len = 1, "gazel")) goto error; for (i = 0x0000; i < 0xC000; i += 0x1000) { if (!request_region(adr = (i + base), len = 16, "gazel")) goto error; } if (i != 0xC000) { for (j = 0; j < i; j+= 0x1000) release_region(j + base, 16); release_region(0xC000 + base, 1); goto error; } break; case R685: if (!request_region(adr = cs->hw.gazel.hscx[0], len = 0x100, "gazel")) goto error; if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) { release_region(cs->hw.gazel.hscx[0],0x100); goto error; } break; case R753: if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel")) goto error; if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) { release_region(cs->hw.gazel.ipac, 8); goto error; } break; case R742: if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel")) goto error; break; } return 0; error: printk(KERN_WARNING "Gazel: io ports 0x%x-0x%x already in use\n", adr, adr + len); return 1; } static int __devinit setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs) { printk(KERN_INFO "Gazel: ISA PnP card automatic recognition\n"); // we got an irq parameter, assume it is an ISA card // R742 decodes address even in not started... // R647 returns FF if not present or not started // eventually needs improvment if (readreg_ipac(card->para[1], IPAC_ID) == 1) cs->subtyp = R742; else cs->subtyp = R647; setup_isac(cs); cs->hw.gazel.cfg_reg = card->para[1] + 0xC000; cs->hw.gazel.ipac = card->para[1]; cs->hw.gazel.isac = card->para[1] + 0x8000; cs->hw.gazel.hscx[0] = card->para[1]; cs->hw.gazel.hscx[1] = card->para[1] + 0x4000; cs->irq = card->para[0]; cs->hw.gazel.isacfifo = cs->hw.gazel.isac; cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0]; cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1]; switch (cs->subtyp) { case R647: printk(KERN_INFO "Gazel: Card ISA R647/R648 found\n"); cs->dc.isac.adf2 = 0x87; printk(KERN_INFO "Gazel: config irq:%d isac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg); printk(KERN_INFO "Gazel: hscx A:0x%X hscx B:0x%X\n", cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]); break; case R742: printk(KERN_INFO "Gazel: Card ISA R742 found\n"); test_and_set_bit(HW_IPAC, &cs->HW_Flags); printk(KERN_INFO "Gazel: config irq:%d ipac:0x%X\n", cs->irq, cs->hw.gazel.ipac); break; } return (0); } #ifdef CONFIG_PCI_LEGACY static struct pci_dev *dev_tel __devinitdata = NULL; static int __devinit setup_gazelpci(struct IsdnCardState *cs) { u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0; u_char pci_irq = 0, found; u_int nbseek, seekcard; printk(KERN_WARNING "Gazel: PCI card automatic recognition\n"); found = 0; seekcard = PCI_DEVICE_ID_PLX_R685; for (nbseek = 0; nbseek < 4; nbseek++) { if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, seekcard, dev_tel))) { if (pci_enable_device(dev_tel)) return 1; pci_irq = dev_tel->irq; pci_ioaddr0 = pci_resource_start(dev_tel, 1); pci_ioaddr1 = pci_resource_start(dev_tel, 2); found = 1; } if (found) break; else { switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: seekcard = PCI_DEVICE_ID_PLX_R753; break; case PCI_DEVICE_ID_PLX_R753: seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO; break; case PCI_DEVICE_ID_PLX_DJINN_ITOO: seekcard = PCI_DEVICE_ID_PLX_OLITEC; break; } } } if (!found) { printk(KERN_WARNING "Gazel: No PCI card found\n"); return (1); } if (!pci_irq) { printk(KERN_WARNING "Gazel: No IRQ for PCI card found\n"); return 1; } cs->hw.gazel.pciaddr[0] = pci_ioaddr0; cs->hw.gazel.pciaddr[1] = pci_ioaddr1; setup_isac(cs); pci_ioaddr1 &= 0xfffe; cs->hw.gazel.cfg_reg = pci_ioaddr0 & 0xfffe; cs->hw.gazel.ipac = pci_ioaddr1; cs->hw.gazel.isac = pci_ioaddr1 + 0x80; cs->hw.gazel.hscx[0] = pci_ioaddr1; cs->hw.gazel.hscx[1] = pci_ioaddr1 + 0x40; cs->hw.gazel.isacfifo = cs->hw.gazel.isac; cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0]; cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1]; cs->irq = pci_irq; cs->irq_flags |= IRQF_SHARED; switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: printk(KERN_INFO "Gazel: Card PCI R685 found\n"); cs->subtyp = R685; cs->dc.isac.adf2 = 0x87; printk(KERN_INFO "Gazel: config irq:%d isac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg); printk(KERN_INFO "Gazel: hscx A:0x%X hscx B:0x%X\n", cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]); break; case PCI_DEVICE_ID_PLX_R753: case PCI_DEVICE_ID_PLX_DJINN_ITOO: case PCI_DEVICE_ID_PLX_OLITEC: printk(KERN_INFO "Gazel: Card PCI R753 found\n"); cs->subtyp = R753; test_and_set_bit(HW_IPAC, &cs->HW_Flags); printk(KERN_INFO "Gazel: config irq:%d ipac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.ipac, cs->hw.gazel.cfg_reg); break; } return (0); } #endif /* CONFIG_PCI_LEGACY */ int __devinit setup_gazel(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; u_char val; strcpy(tmp, gazel_revision); printk(KERN_INFO "Gazel: Driver Revision %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_GAZEL) return (0); if (card->para[0]) { if (setup_gazelisa(card, cs)) return (0); } else { #ifdef CONFIG_PCI_LEGACY if (setup_gazelpci(cs)) return (0); #else printk(KERN_WARNING "Gazel: Card PCI requested and NO_PCI_BIOS, unable to config\n"); return (0); #endif /* CONFIG_PCI */ } if (reserve_regions(card, cs)) { return (0); } if (reset_gazel(cs)) { printk(KERN_WARNING "Gazel: wrong IRQ\n"); release_io_gazel(cs); return (0); } cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Gazel_card_msg; switch (cs->subtyp) { case R647: case R685: cs->irq_func = &gazel_interrupt; ISACVersion(cs, "Gazel:"); if (HscxVersion(cs, "Gazel:")) { printk(KERN_WARNING "Gazel: wrong HSCX versions check IO address\n"); release_io_gazel(cs); return (0); } break; case R742: case R753: cs->irq_func = &gazel_interrupt_ipac; val = ReadISAC(cs, IPAC_ID - 0x80); printk(KERN_INFO "Gazel: IPAC version %x\n", val); break; } return (1); }
gpl-2.0
andr7e/rk3188_tablet_jb
kernel/drivers/mtd/mtdchar.c
1041
26769
/* * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/device.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/backing-dev.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/blkpg.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/map.h> #include <asm/uaccess.h> #define MTD_INODE_FS_MAGIC 0x11307854 static DEFINE_MUTEX(mtd_mutex); static struct vfsmount *mtd_inode_mnt __read_mostly; /* * Data structure to hold the pointer to the mtd device as well * as mode information ofr various use cases. */ struct mtd_file_info { struct mtd_info *mtd; struct inode *ino; enum mtd_file_modes mode; }; static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; switch (orig) { case SEEK_SET: break; case SEEK_CUR: offset += file->f_pos; break; case SEEK_END: offset += mtd->size; break; default: return -EINVAL; } if (offset >= 0 && offset <= mtd->size) return file->f_pos = offset; return -EINVAL; } static int mtd_open(struct inode *inode, struct file *file) { int minor = iminor(inode); int devnum = minor >> 1; int ret = 0; struct mtd_info *mtd; struct mtd_file_info *mfi; struct inode *mtd_ino; DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); /* You can't open the RO devices RW */ if ((file->f_mode & FMODE_WRITE) && (minor & 1)) return -EACCES; mutex_lock(&mtd_mutex); mtd = get_mtd_device(NULL, devnum); if (IS_ERR(mtd)) { ret = PTR_ERR(mtd); goto out; } if (mtd->type == MTD_ABSENT) { put_mtd_device(mtd); ret = -ENODEV; goto out; } mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); if (!mtd_ino) { put_mtd_device(mtd); ret = -ENOMEM; goto out; } if (mtd_ino->i_state & I_NEW) { mtd_ino->i_private = mtd; mtd_ino->i_mode = S_IFCHR; mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; unlock_new_inode(mtd_ino); } file->f_mapping = mtd_ino->i_mapping; /* You can't open it RW if it's not a writeable device */ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { iput(mtd_ino); put_mtd_device(mtd); ret = -EACCES; goto out; } mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); if (!mfi) { iput(mtd_ino); put_mtd_device(mtd); ret = -ENOMEM; goto out; } mfi->ino = mtd_ino; mfi->mtd = mtd; file->private_data = mfi; out: mutex_unlock(&mtd_mutex); return ret; } /* mtd_open */ /*====================================================================*/ static int mtd_close(struct inode *inode, struct file *file) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); /* Only sync if opened RW */ if ((file->f_mode & FMODE_WRITE) && mtd->sync) mtd->sync(mtd); iput(mfi->ino); put_mtd_device(mtd); file->private_data = NULL; kfree(mfi); return 0; } /* mtd_close */ /* Back in June 2001, dwmw2 wrote: * * FIXME: This _really_ needs to die. In 2.5, we should lock the * userspace buffer down and use it directly with readv/writev. * * The implementation below, using mtd_kmalloc_up_to, mitigates * allocation failures when the system is under low-memory situations * or if memory is highly fragmented at the cost of reducing the * performance of the requested transfer due to a smaller buffer size. * * A more complex but more memory-efficient implementation based on * get_user_pages and iovecs to cover extents of those pages is a * longer-term goal, as intimated by dwmw2 above. However, for the * write case, this requires yet more complex head and tail transfer * handling when those head and tail offsets and sizes are such that * alignment requirements are not met in the NAND subdriver. */ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; size_t retlen=0; size_t total_retlen=0; int ret=0; int len; size_t size = count; char *kbuf; DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; kbuf = mtd_kmalloc_up_to(mtd, &size); if (!kbuf) return -ENOMEM; while (count) { len = min_t(size_t, count, size); switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_OTP_USER: ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_RAW: { struct mtd_oob_ops ops; ops.mode = MTD_OOB_RAW; ops.datbuf = kbuf; ops.oobbuf = NULL; ops.len = len; ret = mtd->read_oob(mtd, *ppos, &ops); retlen = ops.retlen; break; } default: ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); } /* Nand returns -EBADMSG on ecc errors, but it returns * the data. For our userspace tools it is important * to dump areas with ecc errors ! * For kernel internal usage it also might return -EUCLEAN * to signal the caller that a bitflip has occurred and has * been corrected by the ECC algorithm. * Userspace software which accesses NAND this way * must be aware of the fact that it deals with NAND */ if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { *ppos += retlen; if (copy_to_user(buf, kbuf, retlen)) { kfree(kbuf); return -EFAULT; } else total_retlen += retlen; count -= retlen; buf += retlen; if (retlen == 0) count = 0; } else { kfree(kbuf); return ret; } } kfree(kbuf); return total_retlen; } /* mtd_read */ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; size_t size = count; char *kbuf; size_t retlen; size_t total_retlen=0; int ret=0; int len; DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); if (*ppos == mtd->size) return -ENOSPC; if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; kbuf = mtd_kmalloc_up_to(mtd, &size); if (!kbuf) return -ENOMEM; while (count) { len = min_t(size_t, count, size); if (copy_from_user(kbuf, buf, len)) { kfree(kbuf); return -EFAULT; } switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: ret = -EROFS; break; case MTD_MODE_OTP_USER: if (!mtd->write_user_prot_reg) { ret = -EOPNOTSUPP; break; } ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_RAW: { struct mtd_oob_ops ops; ops.mode = MTD_OOB_RAW; ops.datbuf = kbuf; ops.oobbuf = NULL; ops.ooboffs = 0; ops.len = len; ret = mtd->write_oob(mtd, *ppos, &ops); retlen = ops.retlen; break; } default: ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); } if (!ret) { *ppos += retlen; total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } } kfree(kbuf); return total_retlen; } /* mtd_write */ /*====================================================================== IOCTL calls for getting device parameters. ======================================================================*/ static void mtdchar_erase_callback (struct erase_info *instr) { wake_up((wait_queue_head_t *)instr->priv); } #ifdef CONFIG_HAVE_MTD_OTP static int otp_select_filemode(struct mtd_file_info *mfi, int mode) { struct mtd_info *mtd = mfi->mtd; int ret = 0; switch (mode) { case MTD_OTP_FACTORY: if (!mtd->read_fact_prot_reg) ret = -EOPNOTSUPP; else mfi->mode = MTD_MODE_OTP_FACTORY; break; case MTD_OTP_USER: if (!mtd->read_fact_prot_reg) ret = -EOPNOTSUPP; else mfi->mode = MTD_MODE_OTP_USER; break; default: ret = -EINVAL; case MTD_OTP_OFF: break; } return ret; } #else # define otp_select_filemode(f,m) -EOPNOTSUPP #endif static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) { struct mtd_oob_ops ops; uint32_t retlen; int ret = 0; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; if (length > 4096) return -EINVAL; if (!mtd->write_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; if (ret) return ret; ops.ooblen = length; ops.ooboffs = start & (mtd->oobsize - 1); ops.datbuf = NULL; ops.mode = MTD_OOB_PLACE; if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL; ops.oobbuf = memdup_user(ptr, length); if (IS_ERR(ops.oobbuf)) return PTR_ERR(ops.oobbuf); start &= ~((uint64_t)mtd->oobsize - 1); ret = mtd->write_oob(mtd, start, &ops); if (ops.oobretlen > 0xFFFFFFFFU) ret = -EOVERFLOW; retlen = ops.oobretlen; if (copy_to_user(retp, &retlen, sizeof(length))) ret = -EFAULT; kfree(ops.oobbuf); return ret; } static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) { struct mtd_oob_ops ops; int ret = 0; if (length > 4096) return -EINVAL; if (!mtd->read_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_WRITE, ptr, length) ? 0 : -EFAULT; if (ret) return ret; ops.ooblen = length; ops.ooboffs = start & (mtd->oobsize - 1); ops.datbuf = NULL; ops.mode = MTD_OOB_PLACE; if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL; ops.oobbuf = kmalloc(length, GFP_KERNEL); if (!ops.oobbuf) return -ENOMEM; start &= ~((uint64_t)mtd->oobsize - 1); ret = mtd->read_oob(mtd, start, &ops); if (put_user(ops.oobretlen, retp)) ret = -EFAULT; else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, ops.oobretlen)) ret = -EFAULT; kfree(ops.oobbuf); return ret; } /* * Copies (and truncates, if necessary) data from the larger struct, * nand_ecclayout, to the smaller, deprecated layout struct, * nand_ecclayout_user. This is necessary only to suppport the deprecated * API ioctl ECCGETLAYOUT while allowing all new functionality to use * nand_ecclayout flexibly (i.e. the struct may change size in new * releases without requiring major rewrites). */ static int shrink_ecclayout(const struct nand_ecclayout *from, struct nand_ecclayout_user *to) { int i; if (!from || !to) return -EINVAL; memset(to, 0, sizeof(*to)); to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); for (i = 0; i < to->eccbytes; i++) to->eccpos[i] = from->eccpos[i]; for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { if (from->oobfree[i].length == 0 && from->oobfree[i].offset == 0) break; to->oobavail += from->oobfree[i].length; to->oobfree[i] = from->oobfree[i]; } return 0; } static int mtd_blkpg_ioctl(struct mtd_info *mtd, struct blkpg_ioctl_arg __user *arg) { struct blkpg_ioctl_arg a; struct blkpg_partition p; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) return -EFAULT; if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) return -EFAULT; switch (a.op) { case BLKPG_ADD_PARTITION: /* Only master mtd device must be used to add partitions */ if (mtd_is_partition(mtd)) return -EINVAL; return mtd_add_partition(mtd, p.devname, p.start, p.length); case BLKPG_DEL_PARTITION: if (p.pno < 0) return -EINVAL; return mtd_del_partition(mtd, p.pno); default: return -EINVAL; } } static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; void __user *argp = (void __user *)arg; int ret = 0; u_long size; struct mtd_info_user info; DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; if (cmd & IOC_IN) { if (!access_ok(VERIFY_READ, argp, size)) return -EFAULT; } if (cmd & IOC_OUT) { if (!access_ok(VERIFY_WRITE, argp, size)) return -EFAULT; } switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) return -EFAULT; break; case MEMGETREGIONINFO: { uint32_t ur_idx; struct mtd_erase_region_info *kr; struct region_info_user __user *ur = argp; if (get_user(ur_idx, &(ur->regionindex))) return -EFAULT; if (ur_idx >= mtd->numeraseregions) return -EINVAL; kr = &(mtd->eraseregions[ur_idx]); if (put_user(kr->offset, &(ur->offset)) || put_user(kr->erasesize, &(ur->erasesize)) || put_user(kr->numblocks, &(ur->numblocks))) return -EFAULT; break; } case MEMGETINFO: memset(&info, 0, sizeof(info)); info.type = mtd->type; info.flags = mtd->flags; info.size = mtd->size; info.erasesize = mtd->erasesize; info.writesize = mtd->writesize; info.oobsize = mtd->oobsize; /* The below fields are obsolete */ info.ecctype = -1; if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) return -EFAULT; break; case MEMERASE: case MEMERASE64: { struct erase_info *erase; if(!(file->f_mode & FMODE_WRITE)) return -EPERM; erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; else { wait_queue_head_t waitq; DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&waitq); if (cmd == MEMERASE64) { struct erase_info_user64 einfo64; if (copy_from_user(&einfo64, argp, sizeof(struct erase_info_user64))) { kfree(erase); return -EFAULT; } erase->addr = einfo64.start; erase->len = einfo64.length; } else { struct erase_info_user einfo32; if (copy_from_user(&einfo32, argp, sizeof(struct erase_info_user))) { kfree(erase); return -EFAULT; } erase->addr = einfo32.start; erase->len = einfo32.length; } erase->mtd = mtd; erase->callback = mtdchar_erase_callback; erase->priv = (unsigned long)&waitq; /* FIXME: Allow INTERRUPTIBLE. Which means not having the wait_queue head on the stack. If the wq_head is on the stack, and we leave because we got interrupted, then the wq_head is no longer there when the callback routine tries to wake us up. */ ret = mtd->erase(mtd, erase); if (!ret) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&waitq, &wait); if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED) schedule(); remove_wait_queue(&waitq, &wait); set_current_state(TASK_RUNNING); ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; } kfree(erase); } break; } case MEMWRITEOOB: { struct mtd_oob_buf buf; struct mtd_oob_buf __user *buf_user = argp; /* NOTE: writes return length to buf_user->length */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, buf.ptr, &buf_user->length); break; } case MEMREADOOB: { struct mtd_oob_buf buf; struct mtd_oob_buf __user *buf_user = argp; /* NOTE: writes return length to buf_user->start */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, buf.ptr, &buf_user->start); break; } case MEMWRITEOOB64: { struct mtd_oob_buf64 buf; struct mtd_oob_buf64 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, (void __user *)(uintptr_t)buf.usr_ptr, &buf_user->length); break; } case MEMREADOOB64: { struct mtd_oob_buf64 buf; struct mtd_oob_buf64 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, (void __user *)(uintptr_t)buf.usr_ptr, &buf_user->length); break; } case MEMLOCK: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->lock) ret = -EOPNOTSUPP; else ret = mtd->lock(mtd, einfo.start, einfo.length); break; } case MEMUNLOCK: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->unlock) ret = -EOPNOTSUPP; else ret = mtd->unlock(mtd, einfo.start, einfo.length); break; } case MEMISLOCKED: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->is_locked) ret = -EOPNOTSUPP; else ret = mtd->is_locked(mtd, einfo.start, einfo.length); break; } /* Legacy interface */ case MEMGETOOBSEL: { struct nand_oobinfo oi; if (!mtd->ecclayout) return -EOPNOTSUPP; if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) return -EINVAL; oi.useecc = MTD_NANDECC_AUTOPLACE; memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); memcpy(&oi.oobfree, mtd->ecclayout->oobfree, sizeof(oi.oobfree)); oi.eccbytes = mtd->ecclayout->eccbytes; if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) return -EFAULT; break; } case MEMGETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_isbad) ret = -EOPNOTSUPP; else return mtd->block_isbad(mtd, offs); break; } case MEMSETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_markbad) ret = -EOPNOTSUPP; else return mtd->block_markbad(mtd, offs); break; } #ifdef CONFIG_HAVE_MTD_OTP case OTPSELECT: { int mode; if (copy_from_user(&mode, argp, sizeof(int))) return -EFAULT; mfi->mode = MTD_MODE_NORMAL; ret = otp_select_filemode(mfi, mode); file->f_pos = 0; break; } case OTPGETREGIONCOUNT: case OTPGETREGIONINFO: { struct otp_info *buf = kmalloc(4096, GFP_KERNEL); if (!buf) return -ENOMEM; ret = -EOPNOTSUPP; switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: if (mtd->get_fact_prot_info) ret = mtd->get_fact_prot_info(mtd, buf, 4096); break; case MTD_MODE_OTP_USER: if (mtd->get_user_prot_info) ret = mtd->get_user_prot_info(mtd, buf, 4096); break; default: break; } if (ret >= 0) { if (cmd == OTPGETREGIONCOUNT) { int nbr = ret / sizeof(struct otp_info); ret = copy_to_user(argp, &nbr, sizeof(int)); } else ret = copy_to_user(argp, buf, ret); if (ret) ret = -EFAULT; } kfree(buf); break; } case OTPLOCK: { struct otp_info oinfo; if (mfi->mode != MTD_MODE_OTP_USER) return -EINVAL; if (copy_from_user(&oinfo, argp, sizeof(oinfo))) return -EFAULT; if (!mtd->lock_user_prot_reg) return -EOPNOTSUPP; ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); break; } #endif /* This ioctl is being deprecated - it truncates the ecc layout */ case ECCGETLAYOUT: { struct nand_ecclayout_user *usrlay; if (!mtd->ecclayout) return -EOPNOTSUPP; usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); if (!usrlay) return -ENOMEM; shrink_ecclayout(mtd->ecclayout, usrlay); if (copy_to_user(argp, usrlay, sizeof(*usrlay))) ret = -EFAULT; kfree(usrlay); break; } case ECCGETSTATS: { if (copy_to_user(argp, &mtd->ecc_stats, sizeof(struct mtd_ecc_stats))) return -EFAULT; break; } case MTDFILEMODE: { mfi->mode = 0; switch(arg) { case MTD_MODE_OTP_FACTORY: case MTD_MODE_OTP_USER: ret = otp_select_filemode(mfi, arg); break; case MTD_MODE_RAW: if (!mtd->read_oob || !mtd->write_oob) return -EOPNOTSUPP; mfi->mode = arg; case MTD_MODE_NORMAL: break; default: ret = -EINVAL; } file->f_pos = 0; break; } case BLKPG: { ret = mtd_blkpg_ioctl(mtd, (struct blkpg_ioctl_arg __user *)arg); break; } case BLKRRPART: { /* No reread partition feature. Just return ok */ ret = 0; break; } default: ret = -ENOTTY; } return ret; } /* memory_ioctl */ static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) { int ret; mutex_lock(&mtd_mutex); ret = mtd_ioctl(file, cmd, arg); mutex_unlock(&mtd_mutex); return ret; } #ifdef CONFIG_COMPAT struct mtd_oob_buf32 { u_int32_t start; u_int32_t length; compat_caddr_t ptr; /* unsigned char* */ }; #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) static long mtd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; void __user *argp = compat_ptr(arg); int ret = 0; mutex_lock(&mtd_mutex); switch (cmd) { case MEMWRITEOOB32: { struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, compat_ptr(buf.ptr), &buf_user->length); break; } case MEMREADOOB32: { struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; /* NOTE: writes return length to buf->start */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, compat_ptr(buf.ptr), &buf_user->start); break; } default: ret = mtd_ioctl(file, cmd, (unsigned long)argp); } mutex_unlock(&mtd_mutex); return ret; } #endif /* CONFIG_COMPAT */ /* * try to determine where a shared mapping can be made * - only supported for NOMMU at the moment (MMU can't doesn't copy private * mappings) */ #ifndef CONFIG_MMU static unsigned long mtd_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; if (mtd->get_unmapped_area) { unsigned long offset; if (addr != 0) return (unsigned long) -EINVAL; if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) return (unsigned long) -EINVAL; offset = pgoff << PAGE_SHIFT; if (offset > mtd->size - len) return (unsigned long) -EINVAL; return mtd->get_unmapped_area(mtd, len, offset, flags); } /* can't map directly */ return (unsigned long) -ENOSYS; } #endif /* * set up a mapping for shared memory segments */ static int mtd_mmap(struct file *file, struct vm_area_struct *vma) { #ifdef CONFIG_MMU struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; unsigned long start; unsigned long off; u32 len; if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { off = vma->vm_pgoff << PAGE_SHIFT; start = map->phys; len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); start &= PAGE_MASK; if ((vma->vm_end - vma->vm_start + off) > len) return -EINVAL; off += start; vma->vm_pgoff = off >> PAGE_SHIFT; vma->vm_flags |= VM_IO | VM_RESERVED; #ifdef pgprot_noncached if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0; } return -ENOSYS; #else return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; #endif } static const struct file_operations mtd_fops = { .owner = THIS_MODULE, .llseek = mtd_lseek, .read = mtd_read, .write = mtd_write, .unlocked_ioctl = mtd_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mtd_compat_ioctl, #endif .open = mtd_open, .release = mtd_close, .mmap = mtd_mmap, #ifndef CONFIG_MMU .get_unmapped_area = mtd_get_unmapped_area, #endif }; static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); } static struct file_system_type mtd_inodefs_type = { .name = "mtd_inodefs", .mount = mtd_inodefs_mount, .kill_sb = kill_anon_super, }; static void mtdchar_notify_add(struct mtd_info *mtd) { } static void mtdchar_notify_remove(struct mtd_info *mtd) { struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); if (mtd_ino) { /* Destroy the inode if it exists */ mtd_ino->i_nlink = 0; iput(mtd_ino); } } static struct mtd_notifier mtdchar_notifier = { .add = mtdchar_notify_add, .remove = mtdchar_notify_remove, }; static int __init init_mtdchar(void) { int ret; ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd", &mtd_fops); if (ret < 0) { pr_notice("Can't allocate major number %d for " "Memory Technology Devices.\n", MTD_CHAR_MAJOR); return ret; } ret = register_filesystem(&mtd_inodefs_type); if (ret) { pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); goto err_unregister_chdev; } mtd_inode_mnt = kern_mount(&mtd_inodefs_type); if (IS_ERR(mtd_inode_mnt)) { ret = PTR_ERR(mtd_inode_mnt); pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); goto err_unregister_filesystem; } register_mtd_user(&mtdchar_notifier); return ret; err_unregister_filesystem: unregister_filesystem(&mtd_inodefs_type); err_unregister_chdev: __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); return ret; } static void __exit cleanup_mtdchar(void) { unregister_mtd_user(&mtdchar_notifier); mntput(mtd_inode_mnt); unregister_filesystem(&mtd_inodefs_type); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); } module_init(init_mtdchar); module_exit(cleanup_mtdchar); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Direct character-device access to MTD devices"); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
gpl-2.0
mfornero/linux
drivers/media/platform/davinci/dm644x_ccdc.c
1041
31079
/* * Copyright (C) 2006-2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * CCDC hardware module for DM6446 * ------------------------------ * * This module is for configuring CCD controller of DM6446 VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This * module also allows application to configure individual * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header * files. The setparams() API is called by vpfe_capture driver * to configure module parameters. This file is named DM644x so that other * variants such DM6443 may be supported using the same module. * * TODO: Test Raw bayer parameter settings and bayer capture * Split module parameter structure to module specific ioctl structs * investigate if enum used for user space type definition * to be replaced by #defines or integer */ #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/videodev2.h> #include <linux/gfp.h> #include <linux/err.h> #include <linux/module.h> #include <media/davinci/dm644x_ccdc.h> #include <media/davinci/vpss.h> #include "dm644x_ccdc_regs.h" #include "ccdc_hw_device.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CCDC Driver for DM6446"); MODULE_AUTHOR("Texas Instruments"); static struct ccdc_oper_config { struct device *dev; /* CCDC interface type */ enum vpfe_hw_if_type if_type; /* Raw Bayer configuration */ struct ccdc_params_raw bayer; /* YCbCr configuration */ struct ccdc_params_ycbcr ycbcr; /* ccdc base address */ void __iomem *base_addr; } ccdc_cfg = { /* Raw configurations */ .bayer = { .pix_fmt = CCDC_PIXFMT_RAW, .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, .win = CCDC_WIN_VGA, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .config_params = { .data_sz = CCDC_DATA_10BITS, }, }, .ycbcr = { .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, .frm_fmt = CCDC_FRMFMT_INTERLACED, .win = CCDC_WIN_PAL, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .bt656_enable = 1, .pix_order = CCDC_PIXORDER_CBYCRY, .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED }, }; #define CCDC_MAX_RAW_YUV_FORMATS 2 /* Raw Bayer formats */ static u32 ccdc_raw_bayer_pix_formats[] = {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; /* Raw YUV formats */ static u32 ccdc_raw_yuv_pix_formats[] = {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; /* CCDC Save/Restore context */ static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)]; /* register access routines */ static inline u32 regr(u32 offset) { return __raw_readl(ccdc_cfg.base_addr + offset); } static inline void regw(u32 val, u32 offset) { __raw_writel(val, ccdc_cfg.base_addr + offset); } static void ccdc_enable(int flag) { regw(flag, CCDC_PCR); } static void ccdc_enable_vport(int flag) { if (flag) /* enable video port */ regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG); else regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG); } /* * ccdc_setwin() * This function will configure the window size * to be capture in CCDC reg */ static void ccdc_setwin(struct v4l2_rect *image_win, enum ccdc_frmfmt frm_fmt, int ppc) { int horz_start, horz_nr_pixels; int vert_start, vert_nr_lines; int val = 0, mid_img = 0; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin..."); /* * ppc - per pixel count. indicates how many pixels per cell * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. * raw capture this is 1 */ horz_start = image_win->left << (ppc - 1); horz_nr_pixels = (image_win->width << (ppc - 1)) - 1; regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels, CCDC_HORZ_INFO); vert_start = image_win->top; if (frm_fmt == CCDC_FRMFMT_INTERLACED) { vert_nr_lines = (image_win->height >> 1) - 1; vert_start >>= 1; /* Since first line doesn't have any data */ vert_start += 1; /* configure VDINT0 */ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT); regw(val, CCDC_VDINT); } else { /* Since first line doesn't have any data */ vert_start += 1; vert_nr_lines = image_win->height - 1; /* * configure VDINT0 and VDINT1. VDINT1 will be at half * of image height */ mid_img = vert_start + (image_win->height / 2); val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) | (mid_img & CCDC_VDINT_VDINT1_MASK); regw(val, CCDC_VDINT); } regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start, CCDC_VERT_START); regw(vert_nr_lines, CCDC_VERT_LINES); dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); } static void ccdc_readregs(void) { unsigned int val = 0; val = regr(CCDC_ALAW); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val); val = regr(CCDC_CLAMP); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val); val = regr(CCDC_DCSUB); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val); val = regr(CCDC_BLKCMP); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val); val = regr(CCDC_FPC_ADDR); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val); val = regr(CCDC_FPC); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val); val = regr(CCDC_FMTCFG); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val); val = regr(CCDC_COLPTN); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val); val = regr(CCDC_FMT_HORZ); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val); val = regr(CCDC_FMT_VERT); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val); val = regr(CCDC_HSIZE_OFF); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val); val = regr(CCDC_SDOFST); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val); val = regr(CCDC_VP_OUT); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val); val = regr(CCDC_SYN_MODE); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val); val = regr(CCDC_HORZ_INFO); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val); val = regr(CCDC_VERT_START); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val); val = regr(CCDC_VERT_LINES); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); } static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) { if (ccdcparam->alaw.enable) { u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) || (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) || (max_gamma > max_data)) { dev_dbg(ccdc_cfg.dev, "\nInvalid data line select"); return -1; } } return 0; } static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int *fpc_virtaddr = NULL; unsigned int *fpc_physaddr = NULL; memcpy(config_params, raw_params, sizeof(*raw_params)); /* * allocate memory for fault pixel table and copy the user * values to the table */ if (!config_params->fault_pxl.enable) return 0; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; fpc_virtaddr = (unsigned int *)phys_to_virt( (unsigned long)fpc_physaddr); /* * Allocate memory for FPC table if current * FPC table buffer is not big enough to * accommodate FPC Number requested */ if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { if (fpc_physaddr != NULL) { free_pages((unsigned long)fpc_physaddr, get_order (config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } /* Allocate memory for FPC table */ fpc_virtaddr = (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, get_order(raw_params-> fault_pxl.fp_num * FP_NUM_BYTES)); if (fpc_virtaddr == NULL) { dev_dbg(ccdc_cfg.dev, "\nUnable to allocate memory for FPC"); return -EFAULT; } fpc_physaddr = (unsigned int *)virt_to_phys((void *)fpc_virtaddr); } /* Copy number of fault pixels and FPC table */ config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; if (copy_from_user(fpc_virtaddr, (void __user *)raw_params->fault_pxl.fpc_table_addr, config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed"); return -EFAULT; } config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr; return 0; } static int ccdc_close(struct device *dev) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; if (fpc_physaddr != NULL) { fpc_virtaddr = (unsigned int *) phys_to_virt((unsigned long)fpc_physaddr); free_pages((unsigned long)fpc_virtaddr, get_order(config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } return 0; } /* * ccdc_restore_defaults() * This function will write defaults to all CCDC registers */ static void ccdc_restore_defaults(void) { int i; /* disable CCDC */ ccdc_enable(0); /* set all registers to default value */ for (i = 4; i <= 0x94; i += 4) regw(0, i); regw(CCDC_NO_CULLING, CCDC_CULLING); regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW); } static int ccdc_open(struct device *device) { ccdc_restore_defaults(); if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_enable_vport(1); return 0; } static void ccdc_sbl_reset(void) { vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); } /* Parameter operations */ static int ccdc_set_params(void __user *params) { struct ccdc_config_params_raw ccdc_raw_params; int x; if (ccdc_cfg.if_type != VPFE_RAW_BAYER) return -EINVAL; x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); if (x) { dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying" "ccdc params, %d\n", x); return -EFAULT; } if (!validate_ccdc_param(&ccdc_raw_params)) { if (!ccdc_update_raw_params(&ccdc_raw_params)) return 0; } return -EINVAL; } /* * ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture */ static void ccdc_config_ycbcr(void) { struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr; u32 syn_mode; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr..."); /* * first restore the CCDC registers to default values * This is important since we assume default values to be set in * a lot of registers that we didn't touch */ ccdc_restore_defaults(); /* * configure pixel format, frame format, configure video frame * format, enable output to SDRAM, enable internal timing generator * and 8bit pack mode */ syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) << CCDC_SYN_MODE_INPMOD_SHIFT) | ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) << CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE | CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE); /* setup BT.656 sync mode */ if (params->bt656_enable) { regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF); /* * configure the FID, VD, HD pin polarity, * fld,hd pol positive, vd negative, 8-bit data */ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE; if (ccdc_cfg.if_type == VPFE_BT656_10BIT) syn_mode |= CCDC_SYN_MODE_10BITS; else syn_mode |= CCDC_SYN_MODE_8BITS; } else { /* y/c external sync mode */ syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT)); } regw(syn_mode, CCDC_SYN_MODE); /* configure video window */ ccdc_setwin(&params->win, params->frm_fmt, 2); /* * configure the order of y cb cr in SDRAM, and disable latch * internal register on vsync */ if (ccdc_cfg.if_type == VPFE_BT656_10BIT) regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT, CCDC_CCDCFG); else regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * configure the horizontal line offset. This should be a * on 32 byte boundary. So clear LSB 5 bits */ regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF); /* configure the memory line offset */ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) /* two fields are interleaved in memory */ regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST); ccdc_sbl_reset(); dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n"); } static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp) { u32 val; if (!bclamp->enable) { /* configure DCSub */ val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK; regw(val, CCDC_DCSUB); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val); regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n"); return; } /* * Configure gain, Start pixel, No of line to be avg, * No of pixel/line to be avg, & Enable the Black clamping */ val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) | ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) << CCDC_BLK_ST_PXL_SHIFT) | ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) << CCDC_BLK_SAMPLE_LINE_SHIFT) | ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) << CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE); regw(val, CCDC_CLAMP); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val); /* If Black clamping is enable then make dcsub 0 */ regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB); dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n"); } static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) { u32 val; val = ((bcomp->b & CCDC_BLK_COMP_MASK) | ((bcomp->gb & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GB_COMP_SHIFT) | ((bcomp->gr & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GR_COMP_SHIFT) | ((bcomp->r & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_R_COMP_SHIFT)); regw(val, CCDC_BLKCMP); } static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) { u32 val; /* Initially disable FPC */ val = CCDC_FPC_DISABLE; regw(val, CCDC_FPC); if (!fpc->enable) return; /* Configure Fault pixel if needed */ regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n", (fpc->fpc_table_addr)); /* Write the FPC params with FPC disable */ val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; regw(val, CCDC_FPC); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); /* read the FPC register */ val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; regw(val, CCDC_FPC); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); } /* * ccdc_config_raw() * This function will configure CCDC for Raw capture mode */ static void ccdc_config_raw(void) { struct ccdc_params_raw *params = &ccdc_cfg.bayer; struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int syn_mode = 0; unsigned int val; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw..."); /* Reset CCDC */ ccdc_restore_defaults(); /* Disable latching function registers on VSYNC */ regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * Configure the vertical sync polarity(SYN_MODE.VDPOL), * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity * (SYN_MODE.FLDPOL), frame format(progressive or interlace), * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output * SDRAM, enable internal timing generator */ syn_mode = (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) | ((config_params->data_sz & CCDC_DATA_SZ_MASK) << CCDC_DATA_SZ_SHIFT) | ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) | CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE); /* Enable and configure aLaw register if needed */ if (config_params->alaw.enable) { val = ((config_params->alaw.gamma_wd & CCDC_ALAW_GAMMA_WD_MASK) | CCDC_ALAW_ENABLE); regw(val, CCDC_ALAW); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val); } /* Configure video window */ ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW); /* Configure Black Clamp */ ccdc_config_black_clamp(&config_params->blk_clamp); /* Configure Black level compensation */ ccdc_config_black_compense(&config_params->blk_comp); /* Configure Fault Pixel Correction */ ccdc_config_fpc(&config_params->fault_pxl); /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) syn_mode |= CCDC_DATA_PACK_ENABLE; /* disable video port */ val = CCDC_DISABLE_VIDEO_PORT; if (config_params->data_sz == CCDC_DATA_8BITS) val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; else val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; /* Write value in FMTCFG */ regw(val, CCDC_FMTCFG); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val); /* Configure the color pattern according to mt9t001 sensor */ regw(CCDC_COLPTN_VAL, CCDC_COLPTN); dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n"); /* * Configure Data formatter(Video port) pixel selection * (FMT_HORZ, FMT_VERT) */ val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) << CCDC_FMT_HORZ_FMTSPH_SHIFT) | (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK); regw(val, CCDC_FMT_HORZ); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val); val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK) << CCDC_FMT_VERT_FMTSLV_SHIFT; if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK; else val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK; dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n", params->win.height); regw(val, CCDC_FMT_VERT); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val); dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)..."); /* * Configure Horizontal offset register. If pack 8 is enabled then * 1 pixel will take 1 byte */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); else /* else one pixel will take 2 byte */ regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); /* Set value for SDOFST */ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { if (params->image_invert_enable) { /* For intelace inverse mode */ regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n"); } else { /* For intelace non inverse mode */ regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n"); } } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n"); } /* * Configure video port pixel selection (VPOUT) * Here -1 is to make the height value less than FMT_VERT.FMTLNV */ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; else val = ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK) << CCDC_VP_OUT_HORZ_NUM_SHIFT; val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK; regw(val, CCDC_VP_OUT); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val); regw(syn_mode, CCDC_SYN_MODE); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode); ccdc_sbl_reset(); dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw..."); ccdc_readregs(); } static int ccdc_configure(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_config_raw(); else ccdc_config_ycbcr(); return 0; } static int ccdc_set_buftype(enum ccdc_buftype buf_type) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.buf_type = buf_type; else ccdc_cfg.ycbcr.buf_type = buf_type; return 0; } static enum ccdc_buftype ccdc_get_buftype(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc_cfg.bayer.buf_type; return ccdc_cfg.ycbcr.buf_type; } static int ccdc_enum_pix(u32 *pix, int i) { int ret = -EINVAL; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) { *pix = ccdc_raw_bayer_pix_formats[i]; ret = 0; } } else { if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) { *pix = ccdc_raw_yuv_pix_formats[i]; ret = 0; } } return ret; } static int ccdc_set_pixel_format(u32 pixfmt) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; if (pixfmt == V4L2_PIX_FMT_SBGGR8) ccdc_cfg.bayer.config_params.alaw.enable = 1; else if (pixfmt != V4L2_PIX_FMT_SBGGR16) return -EINVAL; } else { if (pixfmt == V4L2_PIX_FMT_YUYV) ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; else if (pixfmt == V4L2_PIX_FMT_UYVY) ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; else return -EINVAL; } return 0; } static u32 ccdc_get_pixel_format(void) { struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw; u32 pixfmt; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) if (alaw->enable) pixfmt = V4L2_PIX_FMT_SBGGR8; else pixfmt = V4L2_PIX_FMT_SBGGR16; else { if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) pixfmt = V4L2_PIX_FMT_YUYV; else pixfmt = V4L2_PIX_FMT_UYVY; } return pixfmt; } static int ccdc_set_image_window(struct v4l2_rect *win) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.win = *win; else ccdc_cfg.ycbcr.win = *win; return 0; } static void ccdc_get_image_window(struct v4l2_rect *win) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) *win = ccdc_cfg.bayer.win; else *win = ccdc_cfg.ycbcr.win; } static unsigned int ccdc_get_line_length(void) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int len; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { if ((config_params->alaw.enable) || (config_params->data_sz == CCDC_DATA_8BITS)) len = ccdc_cfg.bayer.win.width; else len = ccdc_cfg.bayer.win.width * 2; } else len = ccdc_cfg.ycbcr.win.width * 2; return ALIGN(len, 32); } static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.frm_fmt = frm_fmt; else ccdc_cfg.ycbcr.frm_fmt = frm_fmt; return 0; } static enum ccdc_frmfmt ccdc_get_frame_format(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc_cfg.bayer.frm_fmt; else return ccdc_cfg.ycbcr.frm_fmt; } static int ccdc_getfid(void) { return (regr(CCDC_SYN_MODE) >> 15) & 1; } /* misc operations */ static inline void ccdc_setfbaddr(unsigned long addr) { regw(addr & 0xffffffe0, CCDC_SDR_ADDR); } static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params) { ccdc_cfg.if_type = params->if_type; switch (params->if_type) { case VPFE_BT656: case VPFE_YCBCR_SYNC_16: case VPFE_YCBCR_SYNC_8: case VPFE_BT656_10BIT: ccdc_cfg.ycbcr.vd_pol = params->vdpol; ccdc_cfg.ycbcr.hd_pol = params->hdpol; break; default: /* TODO add support for raw bayer here */ return -EINVAL; } return 0; } static void ccdc_save_context(void) { ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR); ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE); ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID); ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES); ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO); ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START); ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES); ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING); ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF); ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST); ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR); ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP); ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB); ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN); ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP); ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC); ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR); ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT); ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW); ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF); ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG); ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG); ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ); ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT); ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0); ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1); ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2); ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3); ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4); ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5); ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6); ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7); ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0); ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1); ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0); ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1); ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT); } static void ccdc_restore_context(void) { regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE); regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID); regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES); regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO); regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START); regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES); regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING); regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF); regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST); regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR); regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP); regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB); regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN); regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP); regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC); regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR); regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT); regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW); regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF); regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG); regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG); regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ); regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT); regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0); regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1); regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2); regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3); regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4); regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5); regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6); regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7); regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0); regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1); regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0); regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1); regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT); regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR); } static struct ccdc_hw_device ccdc_hw_dev = { .name = "DM6446 CCDC", .owner = THIS_MODULE, .hw_ops = { .open = ccdc_open, .close = ccdc_close, .reset = ccdc_sbl_reset, .enable = ccdc_enable, .set_hw_if_params = ccdc_set_hw_if_params, .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, .enum_pix = ccdc_enum_pix, .set_pixel_format = ccdc_set_pixel_format, .get_pixel_format = ccdc_get_pixel_format, .set_frame_format = ccdc_set_frame_format, .get_frame_format = ccdc_get_frame_format, .set_image_window = ccdc_set_image_window, .get_image_window = ccdc_get_image_window, .get_line_length = ccdc_get_line_length, .setfbaddr = ccdc_setfbaddr, .getfid = ccdc_getfid, }, }; static int dm644x_ccdc_probe(struct platform_device *pdev) { struct resource *res; int status = 0; /* * first try to register with vpfe. If not correct platform, then we * don't have to iomap */ status = vpfe_register_ccdc_device(&ccdc_hw_dev); if (status < 0) return status; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { status = -ENODEV; goto fail_nores; } res = request_mem_region(res->start, resource_size(res), res->name); if (!res) { status = -EBUSY; goto fail_nores; } ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; } ccdc_cfg.dev = &pdev->dev; printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name); return 0; fail_nomem: release_mem_region(res->start, resource_size(res)); fail_nores: vpfe_unregister_ccdc_device(&ccdc_hw_dev); return status; } static int dm644x_ccdc_remove(struct platform_device *pdev) { struct resource *res; iounmap(ccdc_cfg.base_addr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); vpfe_unregister_ccdc_device(&ccdc_hw_dev); return 0; } static int dm644x_ccdc_suspend(struct device *dev) { /* Save CCDC context */ ccdc_save_context(); /* Disable CCDC */ ccdc_enable(0); return 0; } static int dm644x_ccdc_resume(struct device *dev) { /* Restore CCDC context */ ccdc_restore_context(); return 0; } static const struct dev_pm_ops dm644x_ccdc_pm_ops = { .suspend = dm644x_ccdc_suspend, .resume = dm644x_ccdc_resume, }; static struct platform_driver dm644x_ccdc_driver = { .driver = { .name = "dm644x_ccdc", .pm = &dm644x_ccdc_pm_ops, }, .remove = dm644x_ccdc_remove, .probe = dm644x_ccdc_probe, }; module_platform_driver(dm644x_ccdc_driver);
gpl-2.0
TheJJ/linux
drivers/tty/serial/sccnxp.c
1297
26334
/* * NXP (Philips) SCC+++(SCN+++) serial driver * * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru> * * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <linux/device.h> #include <linux/console.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/io.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/platform_data/serial-sccnxp.h> #include <linux/regulator/consumer.h> #define SCCNXP_NAME "uart-sccnxp" #define SCCNXP_MAJOR 204 #define SCCNXP_MINOR 205 #define SCCNXP_MR_REG (0x00) # define MR0_BAUD_NORMAL (0 << 0) # define MR0_BAUD_EXT1 (1 << 0) # define MR0_BAUD_EXT2 (5 << 0) # define MR0_FIFO (1 << 3) # define MR0_TXLVL (1 << 4) # define MR1_BITS_5 (0 << 0) # define MR1_BITS_6 (1 << 0) # define MR1_BITS_7 (2 << 0) # define MR1_BITS_8 (3 << 0) # define MR1_PAR_EVN (0 << 2) # define MR1_PAR_ODD (1 << 2) # define MR1_PAR_NO (4 << 2) # define MR2_STOP1 (7 << 0) # define MR2_STOP2 (0xf << 0) #define SCCNXP_SR_REG (0x01) #define SCCNXP_CSR_REG SCCNXP_SR_REG # define SR_RXRDY (1 << 0) # define SR_FULL (1 << 1) # define SR_TXRDY (1 << 2) # define SR_TXEMT (1 << 3) # define SR_OVR (1 << 4) # define SR_PE (1 << 5) # define SR_FE (1 << 6) # define SR_BRK (1 << 7) #define SCCNXP_CR_REG (0x02) # define CR_RX_ENABLE (1 << 0) # define CR_RX_DISABLE (1 << 1) # define CR_TX_ENABLE (1 << 2) # define CR_TX_DISABLE (1 << 3) # define CR_CMD_MRPTR1 (0x01 << 4) # define CR_CMD_RX_RESET (0x02 << 4) # define CR_CMD_TX_RESET (0x03 << 4) # define CR_CMD_STATUS_RESET (0x04 << 4) # define CR_CMD_BREAK_RESET (0x05 << 4) # define CR_CMD_START_BREAK (0x06 << 4) # define CR_CMD_STOP_BREAK (0x07 << 4) # define CR_CMD_MRPTR0 (0x0b << 4) #define SCCNXP_RHR_REG (0x03) #define SCCNXP_THR_REG SCCNXP_RHR_REG #define SCCNXP_IPCR_REG (0x04) #define SCCNXP_ACR_REG SCCNXP_IPCR_REG # define ACR_BAUD0 (0 << 7) # define ACR_BAUD1 (1 << 7) # define ACR_TIMER_MODE (6 << 4) #define SCCNXP_ISR_REG (0x05) #define SCCNXP_IMR_REG SCCNXP_ISR_REG # define IMR_TXRDY (1 << 0) # define IMR_RXRDY (1 << 1) # define ISR_TXRDY(x) (1 << ((x * 4) + 0)) # define ISR_RXRDY(x) (1 << ((x * 4) + 1)) #define SCCNXP_IPR_REG (0x0d) #define SCCNXP_OPCR_REG SCCNXP_IPR_REG #define SCCNXP_SOP_REG (0x0e) #define SCCNXP_ROP_REG (0x0f) /* Route helpers */ #define MCTRL_MASK(sig) (0xf << (sig)) #define MCTRL_IBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_IP0) #define MCTRL_OBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_OP0) #define SCCNXP_HAVE_IO 0x00000001 #define SCCNXP_HAVE_MR0 0x00000002 struct sccnxp_chip { const char *name; unsigned int nr; unsigned long freq_min; unsigned long freq_std; unsigned long freq_max; unsigned int flags; unsigned int fifosize; }; struct sccnxp_port { struct uart_driver uart; struct uart_port port[SCCNXP_MAX_UARTS]; bool opened[SCCNXP_MAX_UARTS]; int irq; u8 imr; struct sccnxp_chip *chip; #ifdef CONFIG_SERIAL_SCCNXP_CONSOLE struct console console; #endif spinlock_t lock; bool poll; struct timer_list timer; struct sccnxp_pdata pdata; struct regulator *regulator; }; static const struct sccnxp_chip sc2681 = { .name = "SC2681", .nr = 2, .freq_min = 1000000, .freq_std = 3686400, .freq_max = 4000000, .flags = SCCNXP_HAVE_IO, .fifosize = 3, }; static const struct sccnxp_chip sc2691 = { .name = "SC2691", .nr = 1, .freq_min = 1000000, .freq_std = 3686400, .freq_max = 4000000, .flags = 0, .fifosize = 3, }; static const struct sccnxp_chip sc2692 = { .name = "SC2692", .nr = 2, .freq_min = 1000000, .freq_std = 3686400, .freq_max = 4000000, .flags = SCCNXP_HAVE_IO, .fifosize = 3, }; static const struct sccnxp_chip sc2891 = { .name = "SC2891", .nr = 1, .freq_min = 100000, .freq_std = 3686400, .freq_max = 8000000, .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, .fifosize = 16, }; static const struct sccnxp_chip sc2892 = { .name = "SC2892", .nr = 2, .freq_min = 100000, .freq_std = 3686400, .freq_max = 8000000, .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, .fifosize = 16, }; static const struct sccnxp_chip sc28202 = { .name = "SC28202", .nr = 2, .freq_min = 1000000, .freq_std = 14745600, .freq_max = 50000000, .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, .fifosize = 256, }; static const struct sccnxp_chip sc68681 = { .name = "SC68681", .nr = 2, .freq_min = 1000000, .freq_std = 3686400, .freq_max = 4000000, .flags = SCCNXP_HAVE_IO, .fifosize = 3, }; static const struct sccnxp_chip sc68692 = { .name = "SC68692", .nr = 2, .freq_min = 1000000, .freq_std = 3686400, .freq_max = 4000000, .flags = SCCNXP_HAVE_IO, .fifosize = 3, }; static inline u8 sccnxp_read(struct uart_port *port, u8 reg) { return readb(port->membase + (reg << port->regshift)); } static inline void sccnxp_write(struct uart_port *port, u8 reg, u8 v) { writeb(v, port->membase + (reg << port->regshift)); } static inline u8 sccnxp_port_read(struct uart_port *port, u8 reg) { return sccnxp_read(port, (port->line << 3) + reg); } static inline void sccnxp_port_write(struct uart_port *port, u8 reg, u8 v) { sccnxp_write(port, (port->line << 3) + reg, v); } static int sccnxp_update_best_err(int a, int b, int *besterr) { int err = abs(a - b); if ((*besterr < 0) || (*besterr > err)) { *besterr = err; return 0; } return 1; } static const struct { u8 csr; u8 acr; u8 mr0; int baud; } baud_std[] = { { 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, }, { 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, }, { 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, }, { 2, ACR_BAUD0, MR0_BAUD_NORMAL, 134, }, { 3, ACR_BAUD1, MR0_BAUD_NORMAL, 150, }, { 3, ACR_BAUD0, MR0_BAUD_NORMAL, 200, }, { 4, ACR_BAUD0, MR0_BAUD_NORMAL, 300, }, { 0, ACR_BAUD1, MR0_BAUD_EXT1, 450, }, { 1, ACR_BAUD0, MR0_BAUD_EXT2, 880, }, { 3, ACR_BAUD1, MR0_BAUD_EXT1, 900, }, { 5, ACR_BAUD0, MR0_BAUD_NORMAL, 600, }, { 7, ACR_BAUD0, MR0_BAUD_NORMAL, 1050, }, { 2, ACR_BAUD0, MR0_BAUD_EXT2, 1076, }, { 6, ACR_BAUD0, MR0_BAUD_NORMAL, 1200, }, { 10, ACR_BAUD1, MR0_BAUD_NORMAL, 1800, }, { 7, ACR_BAUD1, MR0_BAUD_NORMAL, 2000, }, { 8, ACR_BAUD0, MR0_BAUD_NORMAL, 2400, }, { 5, ACR_BAUD1, MR0_BAUD_EXT1, 3600, }, { 9, ACR_BAUD0, MR0_BAUD_NORMAL, 4800, }, { 10, ACR_BAUD0, MR0_BAUD_NORMAL, 7200, }, { 11, ACR_BAUD0, MR0_BAUD_NORMAL, 9600, }, { 8, ACR_BAUD0, MR0_BAUD_EXT1, 14400, }, { 12, ACR_BAUD1, MR0_BAUD_NORMAL, 19200, }, { 9, ACR_BAUD0, MR0_BAUD_EXT1, 28800, }, { 12, ACR_BAUD0, MR0_BAUD_NORMAL, 38400, }, { 11, ACR_BAUD0, MR0_BAUD_EXT1, 57600, }, { 12, ACR_BAUD1, MR0_BAUD_EXT1, 115200, }, { 12, ACR_BAUD0, MR0_BAUD_EXT1, 230400, }, { 0, 0, 0, 0 } }; static int sccnxp_set_baud(struct uart_port *port, int baud) { struct sccnxp_port *s = dev_get_drvdata(port->dev); int div_std, tmp_baud, bestbaud = baud, besterr = -1; struct sccnxp_chip *chip = s->chip; u8 i, acr = 0, csr = 0, mr0 = 0; /* Find best baud from table */ for (i = 0; baud_std[i].baud && besterr; i++) { if (baud_std[i].mr0 && !(chip->flags & SCCNXP_HAVE_MR0)) continue; div_std = DIV_ROUND_CLOSEST(chip->freq_std, baud_std[i].baud); tmp_baud = DIV_ROUND_CLOSEST(port->uartclk, div_std); if (!sccnxp_update_best_err(baud, tmp_baud, &besterr)) { acr = baud_std[i].acr; csr = baud_std[i].csr; mr0 = baud_std[i].mr0; bestbaud = tmp_baud; } } if (chip->flags & SCCNXP_HAVE_MR0) { /* Enable FIFO, set half level for TX */ mr0 |= MR0_FIFO | MR0_TXLVL; /* Update MR0 */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR0); sccnxp_port_write(port, SCCNXP_MR_REG, mr0); } sccnxp_port_write(port, SCCNXP_ACR_REG, acr | ACR_TIMER_MODE); sccnxp_port_write(port, SCCNXP_CSR_REG, (csr << 4) | csr); if (baud != bestbaud) dev_dbg(port->dev, "Baudrate desired: %i, calculated: %i\n", baud, bestbaud); return bestbaud; } static void sccnxp_enable_irq(struct uart_port *port, int mask) { struct sccnxp_port *s = dev_get_drvdata(port->dev); s->imr |= mask << (port->line * 4); sccnxp_write(port, SCCNXP_IMR_REG, s->imr); } static void sccnxp_disable_irq(struct uart_port *port, int mask) { struct sccnxp_port *s = dev_get_drvdata(port->dev); s->imr &= ~(mask << (port->line * 4)); sccnxp_write(port, SCCNXP_IMR_REG, s->imr); } static void sccnxp_set_bit(struct uart_port *port, int sig, int state) { u8 bitmask; struct sccnxp_port *s = dev_get_drvdata(port->dev); if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(sig)) { bitmask = 1 << MCTRL_OBIT(s->pdata.mctrl_cfg[port->line], sig); if (state) sccnxp_write(port, SCCNXP_SOP_REG, bitmask); else sccnxp_write(port, SCCNXP_ROP_REG, bitmask); } } static void sccnxp_handle_rx(struct uart_port *port) { u8 sr; unsigned int ch, flag; for (;;) { sr = sccnxp_port_read(port, SCCNXP_SR_REG); if (!(sr & SR_RXRDY)) break; sr &= SR_PE | SR_FE | SR_OVR | SR_BRK; ch = sccnxp_port_read(port, SCCNXP_RHR_REG); port->icount.rx++; flag = TTY_NORMAL; if (unlikely(sr)) { if (sr & SR_BRK) { port->icount.brk++; sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET); if (uart_handle_break(port)) continue; } else if (sr & SR_PE) port->icount.parity++; else if (sr & SR_FE) port->icount.frame++; else if (sr & SR_OVR) { port->icount.overrun++; sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET); } sr &= port->read_status_mask; if (sr & SR_BRK) flag = TTY_BREAK; else if (sr & SR_PE) flag = TTY_PARITY; else if (sr & SR_FE) flag = TTY_FRAME; else if (sr & SR_OVR) flag = TTY_OVERRUN; } if (uart_handle_sysrq_char(port, ch)) continue; if (sr & port->ignore_status_mask) continue; uart_insert_char(port, sr, SR_OVR, ch, flag); } tty_flip_buffer_push(&port->state->port); } static void sccnxp_handle_tx(struct uart_port *port) { u8 sr; struct circ_buf *xmit = &port->state->xmit; struct sccnxp_port *s = dev_get_drvdata(port->dev); if (unlikely(port->x_char)) { sccnxp_port_write(port, SCCNXP_THR_REG, port->x_char); port->icount.tx++; port->x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { /* Disable TX if FIFO is empty */ if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXEMT) { sccnxp_disable_irq(port, IMR_TXRDY); /* Set direction to input */ if (s->chip->flags & SCCNXP_HAVE_IO) sccnxp_set_bit(port, DIR_OP, 0); } return; } while (!uart_circ_empty(xmit)) { sr = sccnxp_port_read(port, SCCNXP_SR_REG); if (!(sr & SR_TXRDY)) break; sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static void sccnxp_handle_events(struct sccnxp_port *s) { int i; u8 isr; do { isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG); isr &= s->imr; if (!isr) break; for (i = 0; i < s->uart.nr; i++) { if (s->opened[i] && (isr & ISR_RXRDY(i))) sccnxp_handle_rx(&s->port[i]); if (s->opened[i] && (isr & ISR_TXRDY(i))) sccnxp_handle_tx(&s->port[i]); } } while (1); } static void sccnxp_timer(unsigned long data) { struct sccnxp_port *s = (struct sccnxp_port *)data; unsigned long flags; spin_lock_irqsave(&s->lock, flags); sccnxp_handle_events(s); spin_unlock_irqrestore(&s->lock, flags); mod_timer(&s->timer, jiffies + usecs_to_jiffies(s->pdata.poll_time_us)); } static irqreturn_t sccnxp_ist(int irq, void *dev_id) { struct sccnxp_port *s = (struct sccnxp_port *)dev_id; unsigned long flags; spin_lock_irqsave(&s->lock, flags); sccnxp_handle_events(s); spin_unlock_irqrestore(&s->lock, flags); return IRQ_HANDLED; } static void sccnxp_start_tx(struct uart_port *port) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; spin_lock_irqsave(&s->lock, flags); /* Set direction to output */ if (s->chip->flags & SCCNXP_HAVE_IO) sccnxp_set_bit(port, DIR_OP, 1); sccnxp_enable_irq(port, IMR_TXRDY); spin_unlock_irqrestore(&s->lock, flags); } static void sccnxp_stop_tx(struct uart_port *port) { /* Do nothing */ } static void sccnxp_stop_rx(struct uart_port *port) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; spin_lock_irqsave(&s->lock, flags); sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE); spin_unlock_irqrestore(&s->lock, flags); } static unsigned int sccnxp_tx_empty(struct uart_port *port) { u8 val; unsigned long flags; struct sccnxp_port *s = dev_get_drvdata(port->dev); spin_lock_irqsave(&s->lock, flags); val = sccnxp_port_read(port, SCCNXP_SR_REG); spin_unlock_irqrestore(&s->lock, flags); return (val & SR_TXEMT) ? TIOCSER_TEMT : 0; } static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; if (!(s->chip->flags & SCCNXP_HAVE_IO)) return; spin_lock_irqsave(&s->lock, flags); sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR); sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS); spin_unlock_irqrestore(&s->lock, flags); } static unsigned int sccnxp_get_mctrl(struct uart_port *port) { u8 bitmask, ipr; unsigned long flags; struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR; if (!(s->chip->flags & SCCNXP_HAVE_IO)) return mctrl; spin_lock_irqsave(&s->lock, flags); ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG); if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DSR_IP)) { bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], DSR_IP); mctrl &= ~TIOCM_DSR; mctrl |= (ipr & bitmask) ? TIOCM_DSR : 0; } if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(CTS_IP)) { bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], CTS_IP); mctrl &= ~TIOCM_CTS; mctrl |= (ipr & bitmask) ? TIOCM_CTS : 0; } if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DCD_IP)) { bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], DCD_IP); mctrl &= ~TIOCM_CAR; mctrl |= (ipr & bitmask) ? TIOCM_CAR : 0; } if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(RNG_IP)) { bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], RNG_IP); mctrl &= ~TIOCM_RNG; mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0; } spin_unlock_irqrestore(&s->lock, flags); return mctrl; } static void sccnxp_break_ctl(struct uart_port *port, int break_state) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; spin_lock_irqsave(&s->lock, flags); sccnxp_port_write(port, SCCNXP_CR_REG, break_state ? CR_CMD_START_BREAK : CR_CMD_STOP_BREAK); spin_unlock_irqrestore(&s->lock, flags); } static void sccnxp_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; u8 mr1, mr2; int baud; spin_lock_irqsave(&s->lock, flags); /* Mask termios capabilities we don't support */ termios->c_cflag &= ~CMSPAR; /* Disable RX & TX, reset break condition, status and FIFOs */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET | CR_RX_DISABLE | CR_TX_DISABLE); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET); /* Word size */ switch (termios->c_cflag & CSIZE) { case CS5: mr1 = MR1_BITS_5; break; case CS6: mr1 = MR1_BITS_6; break; case CS7: mr1 = MR1_BITS_7; break; case CS8: default: mr1 = MR1_BITS_8; break; } /* Parity */ if (termios->c_cflag & PARENB) { if (termios->c_cflag & PARODD) mr1 |= MR1_PAR_ODD; } else mr1 |= MR1_PAR_NO; /* Stop bits */ mr2 = (termios->c_cflag & CSTOPB) ? MR2_STOP2 : MR2_STOP1; /* Update desired format */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR1); sccnxp_port_write(port, SCCNXP_MR_REG, mr1); sccnxp_port_write(port, SCCNXP_MR_REG, mr2); /* Set read status mask */ port->read_status_mask = SR_OVR; if (termios->c_iflag & INPCK) port->read_status_mask |= SR_PE | SR_FE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) port->read_status_mask |= SR_BRK; /* Set status ignore mask */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNBRK) port->ignore_status_mask |= SR_BRK; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= SR_PE; if (!(termios->c_cflag & CREAD)) port->ignore_status_mask |= SR_PE | SR_OVR | SR_FE | SR_BRK; /* Setup baudrate */ baud = uart_get_baud_rate(port, termios, old, 50, (s->chip->flags & SCCNXP_HAVE_MR0) ? 230400 : 38400); baud = sccnxp_set_baud(port, baud); /* Update timeout according to new baud rate */ uart_update_timeout(port, termios->c_cflag, baud); /* Report actual baudrate back to core */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); /* Enable RX & TX */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE); spin_unlock_irqrestore(&s->lock, flags); } static int sccnxp_startup(struct uart_port *port) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; spin_lock_irqsave(&s->lock, flags); if (s->chip->flags & SCCNXP_HAVE_IO) { /* Outputs are controlled manually */ sccnxp_write(port, SCCNXP_OPCR_REG, 0); } /* Reset break condition, status and FIFOs */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET); sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET); /* Enable RX & TX */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE); /* Enable RX interrupt */ sccnxp_enable_irq(port, IMR_RXRDY); s->opened[port->line] = 1; spin_unlock_irqrestore(&s->lock, flags); return 0; } static void sccnxp_shutdown(struct uart_port *port) { struct sccnxp_port *s = dev_get_drvdata(port->dev); unsigned long flags; spin_lock_irqsave(&s->lock, flags); s->opened[port->line] = 0; /* Disable interrupts */ sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY); /* Disable TX & RX */ sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE | CR_TX_DISABLE); /* Leave direction to input */ if (s->chip->flags & SCCNXP_HAVE_IO) sccnxp_set_bit(port, DIR_OP, 0); spin_unlock_irqrestore(&s->lock, flags); } static const char *sccnxp_type(struct uart_port *port) { struct sccnxp_port *s = dev_get_drvdata(port->dev); return (port->type == PORT_SC26XX) ? s->chip->name : NULL; } static void sccnxp_release_port(struct uart_port *port) { /* Do nothing */ } static int sccnxp_request_port(struct uart_port *port) { /* Do nothing */ return 0; } static void sccnxp_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) port->type = PORT_SC26XX; } static int sccnxp_verify_port(struct uart_port *port, struct serial_struct *s) { if ((s->type == PORT_UNKNOWN) || (s->type == PORT_SC26XX)) return 0; if (s->irq == port->irq) return 0; return -EINVAL; } static const struct uart_ops sccnxp_ops = { .tx_empty = sccnxp_tx_empty, .set_mctrl = sccnxp_set_mctrl, .get_mctrl = sccnxp_get_mctrl, .stop_tx = sccnxp_stop_tx, .start_tx = sccnxp_start_tx, .stop_rx = sccnxp_stop_rx, .break_ctl = sccnxp_break_ctl, .startup = sccnxp_startup, .shutdown = sccnxp_shutdown, .set_termios = sccnxp_set_termios, .type = sccnxp_type, .release_port = sccnxp_release_port, .request_port = sccnxp_request_port, .config_port = sccnxp_config_port, .verify_port = sccnxp_verify_port, }; #ifdef CONFIG_SERIAL_SCCNXP_CONSOLE static void sccnxp_console_putchar(struct uart_port *port, int c) { int tryes = 100000; while (tryes--) { if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXRDY) { sccnxp_port_write(port, SCCNXP_THR_REG, c); break; } barrier(); } } static void sccnxp_console_write(struct console *co, const char *c, unsigned n) { struct sccnxp_port *s = (struct sccnxp_port *)co->data; struct uart_port *port = &s->port[co->index]; unsigned long flags; spin_lock_irqsave(&s->lock, flags); uart_console_write(port, c, n, sccnxp_console_putchar); spin_unlock_irqrestore(&s->lock, flags); } static int sccnxp_console_setup(struct console *co, char *options) { struct sccnxp_port *s = (struct sccnxp_port *)co->data; struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0]; int baud = 9600, bits = 8, parity = 'n', flow = 'n'; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } #endif static const struct platform_device_id sccnxp_id_table[] = { { .name = "sc2681", .driver_data = (kernel_ulong_t)&sc2681, }, { .name = "sc2691", .driver_data = (kernel_ulong_t)&sc2691, }, { .name = "sc2692", .driver_data = (kernel_ulong_t)&sc2692, }, { .name = "sc2891", .driver_data = (kernel_ulong_t)&sc2891, }, { .name = "sc2892", .driver_data = (kernel_ulong_t)&sc2892, }, { .name = "sc28202", .driver_data = (kernel_ulong_t)&sc28202, }, { .name = "sc68681", .driver_data = (kernel_ulong_t)&sc68681, }, { .name = "sc68692", .driver_data = (kernel_ulong_t)&sc68692, }, { } }; MODULE_DEVICE_TABLE(platform, sccnxp_id_table); static int sccnxp_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev); int i, ret, uartclk; struct sccnxp_port *s; void __iomem *membase; struct clk *clk; membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(membase)) return PTR_ERR(membase); s = devm_kzalloc(&pdev->dev, sizeof(struct sccnxp_port), GFP_KERNEL); if (!s) { dev_err(&pdev->dev, "Error allocating port structure\n"); return -ENOMEM; } platform_set_drvdata(pdev, s); spin_lock_init(&s->lock); s->chip = (struct sccnxp_chip *)pdev->id_entry->driver_data; s->regulator = devm_regulator_get(&pdev->dev, "vcc"); if (!IS_ERR(s->regulator)) { ret = regulator_enable(s->regulator); if (ret) { dev_err(&pdev->dev, "Failed to enable regulator: %i\n", ret); return ret; } } else if (PTR_ERR(s->regulator) == -EPROBE_DEFER) return -EPROBE_DEFER; clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { if (PTR_ERR(clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_out; } dev_notice(&pdev->dev, "Using default clock frequency\n"); uartclk = s->chip->freq_std; } else uartclk = clk_get_rate(clk); /* Check input frequency */ if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { dev_err(&pdev->dev, "Frequency out of bounds\n"); ret = -EINVAL; goto err_out; } if (pdata) memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata)); if (s->pdata.poll_time_us) { dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n", s->pdata.poll_time_us); s->poll = 1; } if (!s->poll) { s->irq = platform_get_irq(pdev, 0); if (s->irq < 0) { dev_err(&pdev->dev, "Missing irq resource data\n"); ret = -ENXIO; goto err_out; } } s->uart.owner = THIS_MODULE; s->uart.dev_name = "ttySC"; s->uart.major = SCCNXP_MAJOR; s->uart.minor = SCCNXP_MINOR; s->uart.nr = s->chip->nr; #ifdef CONFIG_SERIAL_SCCNXP_CONSOLE s->uart.cons = &s->console; s->uart.cons->device = uart_console_device; s->uart.cons->write = sccnxp_console_write; s->uart.cons->setup = sccnxp_console_setup; s->uart.cons->flags = CON_PRINTBUFFER; s->uart.cons->index = -1; s->uart.cons->data = s; strcpy(s->uart.cons->name, "ttySC"); #endif ret = uart_register_driver(&s->uart); if (ret) { dev_err(&pdev->dev, "Registering UART driver failed\n"); goto err_out; } for (i = 0; i < s->uart.nr; i++) { s->port[i].line = i; s->port[i].dev = &pdev->dev; s->port[i].irq = s->irq; s->port[i].type = PORT_SC26XX; s->port[i].fifosize = s->chip->fifosize; s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE; s->port[i].iotype = UPIO_MEM; s->port[i].mapbase = res->start; s->port[i].membase = membase; s->port[i].regshift = s->pdata.reg_shift; s->port[i].uartclk = uartclk; s->port[i].ops = &sccnxp_ops; uart_add_one_port(&s->uart, &s->port[i]); /* Set direction to input */ if (s->chip->flags & SCCNXP_HAVE_IO) sccnxp_set_bit(&s->port[i], DIR_OP, 0); } /* Disable interrupts */ s->imr = 0; sccnxp_write(&s->port[0], SCCNXP_IMR_REG, 0); if (!s->poll) { ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL, sccnxp_ist, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&pdev->dev), s); if (!ret) return 0; dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq); } else { init_timer(&s->timer); setup_timer(&s->timer, sccnxp_timer, (unsigned long)s); mod_timer(&s->timer, jiffies + usecs_to_jiffies(s->pdata.poll_time_us)); return 0; } uart_unregister_driver(&s->uart); err_out: if (!IS_ERR(s->regulator)) return regulator_disable(s->regulator); return ret; } static int sccnxp_remove(struct platform_device *pdev) { int i; struct sccnxp_port *s = platform_get_drvdata(pdev); if (!s->poll) devm_free_irq(&pdev->dev, s->irq, s); else del_timer_sync(&s->timer); for (i = 0; i < s->uart.nr; i++) uart_remove_one_port(&s->uart, &s->port[i]); uart_unregister_driver(&s->uart); if (!IS_ERR(s->regulator)) return regulator_disable(s->regulator); return 0; } static struct platform_driver sccnxp_uart_driver = { .driver = { .name = SCCNXP_NAME, }, .probe = sccnxp_probe, .remove = sccnxp_remove, .id_table = sccnxp_id_table, }; module_platform_driver(sccnxp_uart_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); MODULE_DESCRIPTION("SCCNXP serial driver");
gpl-2.0
Drgravy/g3stock
drivers/input/misc/lis3dh_acc.c
1809
40271
/******************** (C) COPYRIGHT 2010 STMicroelectronics ******************** * * File Name : lis3dh_acc.c * Authors : MSH - Motion Mems BU - Application Team * : Matteo Dameno (matteo.dameno@st.com) * : Carmine Iascone (carmine.iascone@st.com) * : Samuel Huo (samuel.huo@st.com) * Version : V.1.1.0 * Date : 07/10/2012 * Description : LIS3DH accelerometer sensor driver * ******************************************************************************* * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THE PRESENT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, FOR THE SOLE * PURPOSE TO SUPPORT YOUR APPLICATION DEVELOPMENT. * AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT, * INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE * CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING * INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * THIS SOFTWARE IS SPECIFICALLY DESIGNED FOR EXCLUSIVE USE WITH ST PARTS. * ****************************************************************************** Revision 1.0.0 05/11/09 First Release; Revision 1.0.3 22/01/2010 Linux K&R Compliant Release; Revision 1.0.5 16/08/2010 modified _get_acceleration_data function; modified _update_odr function; manages 2 interrupts; Revision 1.0.6 15/11/2010 supports sysfs; no more support for ioctl; Revision 1.0.7 26/11/2010 checks for availability of interrupts pins correction on FUZZ and FLAT values; Revision 1.0.8 2010/Apr/01 corrects a bug in interrupt pin management in 1.0.7 Revision 1.0.9 07/25/2011 Romove several unused functions,add 5ms delay in init,change sysfs attributes. Revision 1.1.0 07/10/2012 To replace some deprecated functions for 3.4 kernel; To pass the checkpatch's formatting requirement; To add regulator request; ******************************************************************************/ #include <linux/err.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/input/lis3dh.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #define DEBUG 1 #define G_MAX 16000 #define SENSITIVITY_2G 1 /** mg/LSB */ #define SENSITIVITY_4G 2 /** mg/LSB */ #define SENSITIVITY_8G 4 /** mg/LSB */ #define SENSITIVITY_16G 12 /** mg/LSB */ /* Accelerometer Sensor Operating Mode */ #define LIS3DH_ACC_ENABLE 0x01 #define LIS3DH_ACC_DISABLE 0x00 #define HIGH_RESOLUTION 0x08 #define AXISDATA_REG 0x28 #define WHOAMI_LIS3DH_ACC 0x33 /* Expected content for WAI */ /* CONTROL REGISTERS */ #define WHO_AM_I 0x0F /* WhoAmI register */ #define TEMP_CFG_REG 0x1F /* temper sens control reg */ /* ctrl 1: ODR3 ODR2 ODR ODR0 LPen Zenable Yenable Zenable */ #define CTRL_REG1 0x20 /* control reg 1 */ #define CTRL_REG2 0x21 /* control reg 2 */ #define CTRL_REG3 0x22 /* control reg 3 */ #define CTRL_REG4 0x23 /* control reg 4 */ #define CTRL_REG5 0x24 /* control reg 5 */ #define CTRL_REG6 0x25 /* control reg 6 */ #define FIFO_CTRL_REG 0x2E /* FiFo control reg */ #define INT_CFG1 0x30 /* interrupt 1 config */ #define INT_SRC1 0x31 /* interrupt 1 source */ #define INT_THS1 0x32 /* interrupt 1 threshold */ #define INT_DUR1 0x33 /* interrupt 1 duration */ #define TT_CFG 0x38 /* tap config */ #define TT_SRC 0x39 /* tap source */ #define TT_THS 0x3A /* tap threshold */ #define TT_LIM 0x3B /* tap time limit */ #define TT_TLAT 0x3C /* tap time latency */ #define TT_TW 0x3D /* tap time window */ /* end CONTROL REGISTRES */ #define ENABLE_HIGH_RESOLUTION 1 #define LIS3DH_ACC_PM_OFF 0x00 #define LIS3DH_ACC_ENABLE_ALL_AXES 0x07 #define PMODE_MASK 0x08 #define ODR_MASK 0XF0 #define ODR1 0x10 /* 1Hz output data rate */ #define ODR10 0x20 /* 10Hz output data rate */ #define ODR25 0x30 /* 25Hz output data rate */ #define ODR50 0x40 /* 50Hz output data rate */ #define ODR100 0x50 /* 100Hz output data rate */ #define ODR200 0x60 /* 200Hz output data rate */ #define ODR400 0x70 /* 400Hz output data rate */ #define ODR1250 0x90 /* 1250Hz output data rate */ #define IA 0x40 #define ZH 0x20 #define ZL 0x10 #define YH 0x08 #define YL 0x04 #define XH 0x02 #define XL 0x01 /* */ /* CTRL REG BITS*/ #define CTRL_REG3_I1_AOI1 0x40 #define CTRL_REG6_I2_TAPEN 0x80 #define CTRL_REG6_HLACTIVE 0x02 /* */ #define NO_MASK 0xFF #define INT1_DURATION_MASK 0x7F #define INT1_THRESHOLD_MASK 0x7F #define TAP_CFG_MASK 0x3F #define TAP_THS_MASK 0x7F #define TAP_TLIM_MASK 0x7F #define TAP_TLAT_MASK NO_MASK #define TAP_TW_MASK NO_MASK /* TAP_SOURCE_REG BIT */ #define DTAP 0x20 #define STAP 0x10 #define SIGNTAP 0x08 #define ZTAP 0x04 #define YTAP 0x02 #define XTAZ 0x01 #define FUZZ 0 #define FLAT 0 #define I2C_RETRY_DELAY 5 #define I2C_RETRIES 5 #define I2C_AUTO_INCREMENT 0x80 /* RESUME STATE INDICES */ #define RES_CTRL_REG1 0 #define RES_CTRL_REG2 1 #define RES_CTRL_REG3 2 #define RES_CTRL_REG4 3 #define RES_CTRL_REG5 4 #define RES_CTRL_REG6 5 #define RES_INT_CFG1 6 #define RES_INT_THS1 7 #define RES_INT_DUR1 8 #define RES_TT_CFG 9 #define RES_TT_THS 10 #define RES_TT_LIM 11 #define RES_TT_TLAT 12 #define RES_TT_TW 13 #define RES_TEMP_CFG_REG 14 #define RES_REFERENCE_REG 15 #define RES_FIFO_CTRL_REG 16 #define RESUME_ENTRIES 17 /* end RESUME STATE INDICES */ struct { unsigned int cutoff_ms; unsigned int mask; } lis3dh_acc_odr_table[] = { { 1, ODR1250 }, { 3, ODR400 }, { 5, ODR200 }, { 10, ODR100 }, { 20, ODR50 }, { 40, ODR25 }, { 100, ODR10 }, { 1000, ODR1 }, }; struct lis3dh_acc_data { struct i2c_client *client; struct lis3dh_acc_platform_data *pdata; struct mutex lock; struct delayed_work input_work; struct input_dev *input_dev; int hw_initialized; /* hw_working=-1 means not tested yet */ int hw_working; atomic_t enabled; int on_before_suspend; u8 sensitivity; u8 resume_state[RESUME_ENTRIES]; int irq1; struct work_struct irq1_work; struct workqueue_struct *irq1_work_queue; int irq2; struct work_struct irq2_work; struct workqueue_struct *irq2_work_queue; #ifdef DEBUG u8 reg_addr; #endif }; struct sensor_regulator { struct regulator *vreg; const char *name; u32 min_uV; u32 max_uV; }; struct sensor_regulator lis3dh_acc_vreg[] = { {NULL, "vdd", 1700000, 3600000}, {NULL, "vddio", 1700000, 3600000}, }; static int lis3dh_acc_config_regulator(struct lis3dh_acc_data *acc, bool on) { int rc = 0, i; int num_reg = sizeof(lis3dh_acc_vreg) / sizeof(struct sensor_regulator); if (on) { for (i = 0; i < num_reg; i++) { lis3dh_acc_vreg[i].vreg = regulator_get(&acc->client->dev, lis3dh_acc_vreg[i].name); if (IS_ERR(lis3dh_acc_vreg[i].vreg)) { rc = PTR_ERR(lis3dh_acc_vreg[i].vreg); pr_err("%s:regulator get failed rc=%d\n", __func__, rc); lis3dh_acc_vreg[i].vreg = NULL; goto error_vdd; } if (regulator_count_voltages( lis3dh_acc_vreg[i].vreg) > 0) { rc = regulator_set_voltage( lis3dh_acc_vreg[i].vreg, lis3dh_acc_vreg[i].min_uV, lis3dh_acc_vreg[i].max_uV); if (rc) { pr_err("%s: set voltage failed rc=%d\n", __func__, rc); regulator_put(lis3dh_acc_vreg[i].vreg); lis3dh_acc_vreg[i].vreg = NULL; goto error_vdd; } } rc = regulator_enable(lis3dh_acc_vreg[i].vreg); if (rc) { pr_err("%s: regulator_enable failed rc =%d\n", __func__, rc); if (regulator_count_voltages( lis3dh_acc_vreg[i].vreg) > 0) { regulator_set_voltage( lis3dh_acc_vreg[i].vreg, 0, lis3dh_acc_vreg[i].max_uV); } regulator_put(lis3dh_acc_vreg[i].vreg); lis3dh_acc_vreg[i].vreg = NULL; goto error_vdd; } } return rc; } else { i = num_reg; } error_vdd: while (--i >= 0) { if (!IS_ERR_OR_NULL(lis3dh_acc_vreg[i].vreg)) { if (regulator_count_voltages( lis3dh_acc_vreg[i].vreg) > 0) { regulator_set_voltage(lis3dh_acc_vreg[i].vreg, 0, lis3dh_acc_vreg[i].max_uV); } regulator_disable(lis3dh_acc_vreg[i].vreg); regulator_put(lis3dh_acc_vreg[i].vreg); lis3dh_acc_vreg[i].vreg = NULL; } } return rc; } static int lis3dh_acc_i2c_read(struct lis3dh_acc_data *acc, u8 *buf, int len) { int err; int tries = 0; struct i2c_msg msgs[] = { { .addr = acc->client->addr, .flags = acc->client->flags & I2C_M_TEN, .len = 1, .buf = buf, }, { .addr = acc->client->addr, .flags = (acc->client->flags & I2C_M_TEN) | I2C_M_RD, .len = len, .buf = buf, }, }; do { err = i2c_transfer(acc->client->adapter, msgs, 2); if (err != 2) msleep_interruptible(I2C_RETRY_DELAY); } while ((err != 2) && (++tries < I2C_RETRIES)); if (err != 2) { dev_err(&acc->client->dev, "read transfer error\n"); err = -EIO; } else { err = 0; } return err; } static int lis3dh_acc_i2c_write(struct lis3dh_acc_data *acc, u8 *buf, int len) { int err; int tries = 0; struct i2c_msg msgs[] = { { .addr = acc->client->addr, .flags = acc->client->flags & I2C_M_TEN, .len = len + 1, .buf = buf, }, }; do { err = i2c_transfer(acc->client->adapter, msgs, 1); if (err != 1) msleep_interruptible(I2C_RETRY_DELAY); } while ((err != 1) && (++tries < I2C_RETRIES)); if (err != 1) { dev_err(&acc->client->dev, "write transfer error\n"); err = -EIO; } else { err = 0; } return err; } static int lis3dh_acc_hw_init(struct lis3dh_acc_data *acc) { int err = -1; u8 buf[7]; printk(KERN_INFO "%s: hw init start\n", LIS3DH_ACC_DEV_NAME); buf[0] = WHO_AM_I; err = lis3dh_acc_i2c_read(acc, buf, 1); if (err < 0) { dev_warn(&acc->client->dev, "Error reading WHO_AM_I: is device available/working?\n"); goto err_firstread; } else acc->hw_working = 1; if (buf[0] != WHOAMI_LIS3DH_ACC) { dev_err(&acc->client->dev, "device unknown. Expected: 0x%x, Replies: 0x%x\n", WHOAMI_LIS3DH_ACC, buf[0]); err = -1; /* choose the right coded error */ goto err_unknown_device; } buf[0] = CTRL_REG1; buf[1] = acc->resume_state[RES_CTRL_REG1]; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto err_resume_state; buf[0] = TEMP_CFG_REG; buf[1] = acc->resume_state[RES_TEMP_CFG_REG]; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto err_resume_state; buf[0] = FIFO_CTRL_REG; buf[1] = acc->resume_state[RES_FIFO_CTRL_REG]; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto err_resume_state; buf[0] = (I2C_AUTO_INCREMENT | TT_THS); buf[1] = acc->resume_state[RES_TT_THS]; buf[2] = acc->resume_state[RES_TT_LIM]; buf[3] = acc->resume_state[RES_TT_TLAT]; buf[4] = acc->resume_state[RES_TT_TW]; err = lis3dh_acc_i2c_write(acc, buf, 4); if (err < 0) goto err_resume_state; buf[0] = TT_CFG; buf[1] = acc->resume_state[RES_TT_CFG]; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto err_resume_state; buf[0] = (I2C_AUTO_INCREMENT | INT_THS1); buf[1] = acc->resume_state[RES_INT_THS1]; buf[2] = acc->resume_state[RES_INT_DUR1]; err = lis3dh_acc_i2c_write(acc, buf, 2); if (err < 0) goto err_resume_state; buf[0] = INT_CFG1; buf[1] = acc->resume_state[RES_INT_CFG1]; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto err_resume_state; buf[0] = (I2C_AUTO_INCREMENT | CTRL_REG2); buf[1] = acc->resume_state[RES_CTRL_REG2]; buf[2] = acc->resume_state[RES_CTRL_REG3]; buf[3] = acc->resume_state[RES_CTRL_REG4]; buf[4] = acc->resume_state[RES_CTRL_REG5]; buf[5] = acc->resume_state[RES_CTRL_REG6]; err = lis3dh_acc_i2c_write(acc, buf, 5); if (err < 0) goto err_resume_state; acc->hw_initialized = 1; printk(KERN_INFO "%s: hw init done\n", LIS3DH_ACC_DEV_NAME); return 0; err_firstread: acc->hw_working = 0; err_unknown_device: err_resume_state: acc->hw_initialized = 0; dev_err(&acc->client->dev, "hw init error 0x%x,0x%x: %d\n", buf[0], buf[1], err); return err; } static void lis3dh_acc_device_power_off(struct lis3dh_acc_data *acc) { int err; u8 buf[2] = { CTRL_REG1, LIS3DH_ACC_PM_OFF }; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) dev_err(&acc->client->dev, "soft power off failed: %d\n", err); if (acc->pdata->gpio_int1) disable_irq_nosync(acc->irq1); if (acc->pdata->gpio_int2) disable_irq_nosync(acc->irq2); lis3dh_acc_config_regulator(acc, false); if (acc->hw_initialized) { if (acc->pdata->gpio_int1) disable_irq_nosync(acc->irq1); if (acc->pdata->gpio_int2) disable_irq_nosync(acc->irq2); acc->hw_initialized = 0; } } static int lis3dh_acc_device_power_on(struct lis3dh_acc_data *acc) { int err = -1; err = lis3dh_acc_config_regulator(acc, true); if (err < 0) { dev_err(&acc->client->dev, "power_on failed: %d\n", err); return err; } if (acc->pdata->gpio_int1 >= 0) enable_irq(acc->irq1); if (acc->pdata->gpio_int2 >= 0) enable_irq(acc->irq2); msleep(20); if (!acc->hw_initialized) { err = lis3dh_acc_hw_init(acc); if (acc->hw_working == 1 && err < 0) { lis3dh_acc_device_power_off(acc); return err; } } if (acc->hw_initialized) { if (acc->pdata->gpio_int1 >= 0) enable_irq(acc->irq1); if (acc->pdata->gpio_int2 >= 0) enable_irq(acc->irq2); } return 0; } static irqreturn_t lis3dh_acc_isr1(int irq, void *dev) { struct lis3dh_acc_data *acc = dev; disable_irq_nosync(irq); queue_work(acc->irq1_work_queue, &acc->irq1_work); #ifdef DEBUG printk(KERN_INFO "%s: isr1 queued\n", LIS3DH_ACC_DEV_NAME); #endif return IRQ_HANDLED; } static irqreturn_t lis3dh_acc_isr2(int irq, void *dev) { struct lis3dh_acc_data *acc = dev; disable_irq_nosync(irq); queue_work(acc->irq2_work_queue, &acc->irq2_work); #ifdef DEBUG printk(KERN_INFO "%s: isr2 queued\n", LIS3DH_ACC_DEV_NAME); #endif return IRQ_HANDLED; } static void lis3dh_acc_irq1_work_func(struct work_struct *work) { struct lis3dh_acc_data *acc = container_of(work, struct lis3dh_acc_data, irq1_work); /* TODO add interrupt service procedure. ie:lis3dh_acc_get_int1_source(acc); */ ; /* */ printk(KERN_INFO "%s: IRQ1 triggered\n", LIS3DH_ACC_DEV_NAME); goto exit; exit: enable_irq(acc->irq1); } static void lis3dh_acc_irq2_work_func(struct work_struct *work) { struct lis3dh_acc_data *acc = container_of(work, struct lis3dh_acc_data, irq2_work); /* TODO add interrupt service procedure. ie:lis3dh_acc_get_tap_source(acc); */ ; /* */ printk(KERN_INFO "%s: IRQ2 triggered\n", LIS3DH_ACC_DEV_NAME); goto exit; exit: enable_irq(acc->irq2); } int lis3dh_acc_update_g_range(struct lis3dh_acc_data *acc, u8 new_g_range) { int err = -1; u8 sensitivity; u8 buf[2]; u8 updated_val; u8 init_val; u8 new_val; u8 mask = LIS3DH_ACC_FS_MASK | HIGH_RESOLUTION; switch (new_g_range) { case LIS3DH_ACC_G_2G: sensitivity = SENSITIVITY_2G; break; case LIS3DH_ACC_G_4G: sensitivity = SENSITIVITY_4G; break; case LIS3DH_ACC_G_8G: sensitivity = SENSITIVITY_8G; break; case LIS3DH_ACC_G_16G: sensitivity = SENSITIVITY_16G; break; default: dev_err(&acc->client->dev, "invalid g range requested: %u\n", new_g_range); return -EINVAL; } if (atomic_read(&acc->enabled)) { /* Updates configuration register 4, * which contains g range setting */ buf[0] = CTRL_REG4; err = lis3dh_acc_i2c_read(acc, buf, 1); if (err < 0) goto error; init_val = buf[0]; acc->resume_state[RES_CTRL_REG4] = init_val; new_val = new_g_range | HIGH_RESOLUTION; updated_val = ((mask & new_val) | ((~mask) & init_val)); buf[1] = updated_val; buf[0] = CTRL_REG4; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) goto error; acc->resume_state[RES_CTRL_REG4] = updated_val; acc->sensitivity = sensitivity; } return err; error: dev_err(&acc->client->dev, "update g range failed 0x%x,0x%x: %d\n", buf[0], buf[1], err); return err; } int lis3dh_acc_update_odr(struct lis3dh_acc_data *acc, int poll_interval_ms) { int err = -1; int i; u8 config[2]; /* Following, looks for the longest possible odr interval scrolling the * odr_table vector from the end (shortest interval) backward (longest * interval), to support the poll_interval requested by the system. * It must be the longest interval lower then the poll interval.*/ for (i = ARRAY_SIZE(lis3dh_acc_odr_table) - 1; i >= 0; i--) { if (lis3dh_acc_odr_table[i].cutoff_ms <= poll_interval_ms) break; } config[1] = lis3dh_acc_odr_table[i].mask; config[1] |= LIS3DH_ACC_ENABLE_ALL_AXES; /* If device is currently enabled, we need to write new * configuration out to it */ if (atomic_read(&acc->enabled)) { config[0] = CTRL_REG1; err = lis3dh_acc_i2c_write(acc, config, 1); if (err < 0) goto error; acc->resume_state[RES_CTRL_REG1] = config[1]; } return err; error: dev_err(&acc->client->dev, "update odr failed 0x%x,0x%x: %d\n", config[0], config[1], err); return err; } static int lis3dh_acc_register_write(struct lis3dh_acc_data *acc, u8 *buf, u8 reg_address, u8 new_value) { int err = -1; /* Sets configuration register at reg_address * NOTE: this is a straight overwrite */ buf[0] = reg_address; buf[1] = new_value; err = lis3dh_acc_i2c_write(acc, buf, 1); if (err < 0) return err; return err; } static int lis3dh_acc_get_acceleration_data(struct lis3dh_acc_data *acc, int *xyz) { int err = -1; /* Data bytes from hardware xL, xH, yL, yH, zL, zH */ u8 acc_data[6]; /* x,y,z hardware data */ s16 hw_d[3] = { 0 }; acc_data[0] = (I2C_AUTO_INCREMENT | AXISDATA_REG); err = lis3dh_acc_i2c_read(acc, acc_data, 6); if (err < 0) return err; hw_d[0] = (((s16) ((acc_data[1] << 8) | acc_data[0])) >> 4); hw_d[1] = (((s16) ((acc_data[3] << 8) | acc_data[2])) >> 4); hw_d[2] = (((s16) ((acc_data[5] << 8) | acc_data[4])) >> 4); hw_d[0] = hw_d[0] * acc->sensitivity; hw_d[1] = hw_d[1] * acc->sensitivity; hw_d[2] = hw_d[2] * acc->sensitivity; xyz[0] = ((acc->pdata->negate_x) ? (-hw_d[acc->pdata->axis_map_x]) : (hw_d[acc->pdata->axis_map_x])); xyz[1] = ((acc->pdata->negate_y) ? (-hw_d[acc->pdata->axis_map_y]) : (hw_d[acc->pdata->axis_map_y])); xyz[2] = ((acc->pdata->negate_z) ? (-hw_d[acc->pdata->axis_map_z]) : (hw_d[acc->pdata->axis_map_z])); #ifdef DEBUG /* printk(KERN_INFO "%s read x=%d, y=%d, z=%d\n", LIS3DH_ACC_DEV_NAME, xyz[0], xyz[1], xyz[2]); */ #endif return err; } static void lis3dh_acc_report_values(struct lis3dh_acc_data *acc, int *xyz) { input_report_abs(acc->input_dev, ABS_X, xyz[0]); input_report_abs(acc->input_dev, ABS_Y, xyz[1]); input_report_abs(acc->input_dev, ABS_Z, xyz[2]); input_sync(acc->input_dev); } static int lis3dh_acc_enable(struct lis3dh_acc_data *acc) { int err; if (!atomic_cmpxchg(&acc->enabled, 0, 1)) { err = lis3dh_acc_device_power_on(acc); if (err < 0) { atomic_set(&acc->enabled, 0); return err; } schedule_delayed_work(&acc->input_work, msecs_to_jiffies(acc->pdata->poll_interval)); } return 0; } static int lis3dh_acc_disable(struct lis3dh_acc_data *acc) { if (atomic_cmpxchg(&acc->enabled, 1, 0)) { cancel_delayed_work_sync(&acc->input_work); lis3dh_acc_device_power_off(acc); } return 0; } static ssize_t read_single_reg(struct device *dev, char *buf, u8 reg) { ssize_t ret; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); int err; u8 data = reg; err = lis3dh_acc_i2c_read(acc, &data, 1); if (err < 0) return err; ret = snprintf(buf, 4, "0x%02x\n", data); return ret; } static int write_reg(struct device *dev, const char *buf, u8 reg, u8 mask, int resumeIndex) { int err = -1; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); u8 x[2]; u8 new_val; unsigned long val; if (kstrtoul(buf, 16, &val)) return -EINVAL; new_val = ((u8) val & mask); x[0] = reg; x[1] = new_val; err = lis3dh_acc_register_write(acc, x, reg, new_val); if (err < 0) return err; acc->resume_state[resumeIndex] = new_val; return err; } static ssize_t attr_get_polling_rate(struct device *dev, struct device_attribute *attr, char *buf) { int val; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); mutex_lock(&acc->lock); val = acc->pdata->poll_interval; mutex_unlock(&acc->lock); return snprintf(buf, 8, "%d\n", val); } static ssize_t attr_set_polling_rate(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct lis3dh_acc_data *acc = dev_get_drvdata(dev); unsigned long interval_ms; if (kstrtoul(buf, 10, &interval_ms)) return -EINVAL; if (!interval_ms) return -EINVAL; mutex_lock(&acc->lock); acc->pdata->poll_interval = interval_ms; lis3dh_acc_update_odr(acc, interval_ms); mutex_unlock(&acc->lock); return size; } static ssize_t attr_get_range(struct device *dev, struct device_attribute *attr, char *buf) { char val; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); char range = 2; mutex_lock(&acc->lock); val = acc->pdata->g_range ; switch (val) { case LIS3DH_ACC_G_2G: range = 2; break; case LIS3DH_ACC_G_4G: range = 4; break; case LIS3DH_ACC_G_8G: range = 8; break; case LIS3DH_ACC_G_16G: range = 16; break; } mutex_unlock(&acc->lock); return snprintf(buf, 4, "%d\n", range); } static ssize_t attr_set_range(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct lis3dh_acc_data *acc = dev_get_drvdata(dev); unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&acc->lock); acc->pdata->g_range = val; lis3dh_acc_update_g_range(acc, val); mutex_unlock(&acc->lock); return size; } static ssize_t attr_get_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct lis3dh_acc_data *acc = dev_get_drvdata(dev); int val = atomic_read(&acc->enabled); return snprintf(buf, sizeof(val) + 2, "%d\n", val); } static ssize_t attr_set_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct lis3dh_acc_data *acc = dev_get_drvdata(dev); unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; if (val) lis3dh_acc_enable(acc); else lis3dh_acc_disable(acc); return size; } static ssize_t attr_set_intconfig1(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, INT_CFG1, NO_MASK, RES_INT_CFG1); } static ssize_t attr_get_intconfig1(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, INT_CFG1); } static ssize_t attr_set_duration1(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, INT_DUR1, INT1_DURATION_MASK, RES_INT_DUR1); } static ssize_t attr_get_duration1(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, INT_DUR1); } static ssize_t attr_set_thresh1(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, INT_THS1, INT1_THRESHOLD_MASK, RES_INT_THS1); } static ssize_t attr_get_thresh1(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, INT_THS1); } static ssize_t attr_get_source1(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, INT_SRC1); } static ssize_t attr_set_click_cfg(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, TT_CFG, TAP_CFG_MASK, RES_TT_CFG); } static ssize_t attr_get_click_cfg(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_CFG); } static ssize_t attr_get_click_source(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_SRC); } static ssize_t attr_set_click_ths(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, TT_THS, TAP_THS_MASK, RES_TT_THS); } static ssize_t attr_get_click_ths(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_THS); } static ssize_t attr_set_click_tlim(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, TT_LIM, TAP_TLIM_MASK, RES_TT_LIM); } static ssize_t attr_get_click_tlim(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_LIM); } static ssize_t attr_set_click_tlat(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, TT_TLAT, TAP_TLAT_MASK, RES_TT_TLAT); } static ssize_t attr_get_click_tlat(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_TLAT); } static ssize_t attr_set_click_tw(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return write_reg(dev, buf, TT_TLAT, TAP_TW_MASK, RES_TT_TLAT); } static ssize_t attr_get_click_tw(struct device *dev, struct device_attribute *attr, char *buf) { return read_single_reg(dev, buf, TT_TLAT); } #ifdef DEBUG /* PAY ATTENTION: These DEBUG funtions don't manage resume_state */ static ssize_t attr_reg_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int rc; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); u8 x[2]; unsigned long val; if (kstrtoul(buf, 16, &val)) return -EINVAL; mutex_lock(&acc->lock); x[0] = acc->reg_addr; mutex_unlock(&acc->lock); x[1] = val; rc = lis3dh_acc_i2c_write(acc, x, 1); /*TODO: error need to be managed */ return size; } static ssize_t attr_reg_get(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct lis3dh_acc_data *acc = dev_get_drvdata(dev); int rc; u8 data; mutex_lock(&acc->lock); data = acc->reg_addr; mutex_unlock(&acc->lock); rc = lis3dh_acc_i2c_read(acc, &data, 1); /* TODO: error need to be managed */ ret = snprintf(buf, 8, "0x%02x\n", data); return ret; } static ssize_t attr_addr_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct lis3dh_acc_data *acc = dev_get_drvdata(dev); unsigned long val; if (kstrtoul(buf, 16, &val)) return -EINVAL; mutex_lock(&acc->lock); acc->reg_addr = val; mutex_unlock(&acc->lock); return size; } #endif static struct device_attribute attributes[] = { __ATTR(poll_delay, 0664, attr_get_polling_rate, attr_set_polling_rate), __ATTR(range, 0664, attr_get_range, attr_set_range), __ATTR(enable, 0664, attr_get_enable, attr_set_enable), __ATTR(int1_config, 0664, attr_get_intconfig1, attr_set_intconfig1), __ATTR(int1_duration, 0664, attr_get_duration1, attr_set_duration1), __ATTR(int1_threshold, 0664, attr_get_thresh1, attr_set_thresh1), __ATTR(int1_source, 0444, attr_get_source1, NULL), __ATTR(click_config, 0664, attr_get_click_cfg, attr_set_click_cfg), __ATTR(click_source, 0444, attr_get_click_source, NULL), __ATTR(click_threshold, 0664, attr_get_click_ths, attr_set_click_ths), __ATTR(click_timelimit, 0664, attr_get_click_tlim, attr_set_click_tlim), __ATTR(click_timelatency, 0664, attr_get_click_tlat, attr_set_click_tlat), __ATTR(click_timewindow, 0664, attr_get_click_tw, attr_set_click_tw), #ifdef DEBUG __ATTR(reg_value, 0664, attr_reg_get, attr_reg_set), __ATTR(reg_addr, 0220, NULL, attr_addr_set), #endif }; static int create_sysfs_interfaces(struct device *dev) { int i; int err; for (i = 0; i < ARRAY_SIZE(attributes); i++) { err = device_create_file(dev, attributes + i); if (err) goto error; } return 0; error: for ( ; i >= 0; i--) device_remove_file(dev, attributes + i); dev_err(dev, "%s:Unable to create interface\n", __func__); return err; } static int remove_sysfs_interfaces(struct device *dev) { int i; for (i = 0; i < ARRAY_SIZE(attributes); i++) device_remove_file(dev, attributes + i); return 0; } static void lis3dh_acc_input_work_func(struct work_struct *work) { struct lis3dh_acc_data *acc; int xyz[3] = { 0 }; int err; acc = container_of((struct delayed_work *)work, struct lis3dh_acc_data, input_work); mutex_lock(&acc->lock); err = lis3dh_acc_get_acceleration_data(acc, xyz); if (err < 0) dev_err(&acc->client->dev, "get_acceleration_data failed\n"); else lis3dh_acc_report_values(acc, xyz); schedule_delayed_work(&acc->input_work, msecs_to_jiffies( acc->pdata->poll_interval)); mutex_unlock(&acc->lock); } int lis3dh_acc_input_open(struct input_dev *input) { struct lis3dh_acc_data *acc = input_get_drvdata(input); return lis3dh_acc_enable(acc); } void lis3dh_acc_input_close(struct input_dev *dev) { struct lis3dh_acc_data *acc = input_get_drvdata(dev); lis3dh_acc_disable(acc); } static int lis3dh_acc_validate_pdata(struct lis3dh_acc_data *acc) { acc->pdata->poll_interval = max(acc->pdata->poll_interval, acc->pdata->min_interval); if (acc->pdata->axis_map_x > 2 || acc->pdata->axis_map_y > 2 || acc->pdata->axis_map_z > 2) { dev_err(&acc->client->dev, "invalid axis_map value x:%u y:%u z%u\n", acc->pdata->axis_map_x, acc->pdata->axis_map_y, acc->pdata->axis_map_z); return -EINVAL; } /* Only allow 0 and 1 for negation boolean flag */ if (acc->pdata->negate_x > 1 || acc->pdata->negate_y > 1 || acc->pdata->negate_z > 1) { dev_err(&acc->client->dev, "invalid negate value x:%u y:%u z:%u\n", acc->pdata->negate_x, acc->pdata->negate_y, acc->pdata->negate_z); return -EINVAL; } /* Enforce minimum polling interval */ if (acc->pdata->poll_interval < acc->pdata->min_interval) { dev_err(&acc->client->dev, "minimum poll interval violated\n"); return -EINVAL; } return 0; } static int lis3dh_acc_input_init(struct lis3dh_acc_data *acc) { int err; INIT_DELAYED_WORK(&acc->input_work, lis3dh_acc_input_work_func); acc->input_dev = input_allocate_device(); if (!acc->input_dev) { err = -ENOMEM; dev_err(&acc->client->dev, "input device allocation failed\n"); goto err0; } acc->input_dev->open = lis3dh_acc_input_open; acc->input_dev->close = lis3dh_acc_input_close; acc->input_dev->name = ACCEL_INPUT_DEV_NAME; acc->input_dev->id.bustype = BUS_I2C; acc->input_dev->dev.parent = &acc->client->dev; input_set_drvdata(acc->input_dev, acc); set_bit(EV_ABS, acc->input_dev->evbit); /* next is used for interruptA sources data if the case */ set_bit(ABS_MISC, acc->input_dev->absbit); /* next is used for interruptB sources data if the case */ set_bit(ABS_WHEEL, acc->input_dev->absbit); input_set_abs_params(acc->input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT); input_set_abs_params(acc->input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT); input_set_abs_params(acc->input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT); /* next is used for interruptA sources data if the case */ input_set_abs_params(acc->input_dev, ABS_MISC, INT_MIN, INT_MAX, 0, 0); /* next is used for interruptB sources data if the case */ input_set_abs_params(acc->input_dev, ABS_WHEEL, INT_MIN, INT_MAX, 0, 0); err = input_register_device(acc->input_dev); if (err) { dev_err(&acc->client->dev, "unable to register input device %s\n", acc->input_dev->name); goto err1; } return 0; err1: input_free_device(acc->input_dev); err0: return err; } static void lis3dh_acc_input_cleanup(struct lis3dh_acc_data *acc) { input_unregister_device(acc->input_dev); input_free_device(acc->input_dev); } static int lis3dh_acc_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lis3dh_acc_data *acc; int err = -1; pr_info("%s: probe start.\n", LIS3DH_ACC_DEV_NAME); if (client->dev.platform_data == NULL) { dev_err(&client->dev, "platform data is NULL. exiting.\n"); err = -ENODEV; goto exit_check_functionality_failed; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "client not i2c capable\n"); err = -ENODEV; goto exit_check_functionality_failed; } /* if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "client not smb-i2c capable:2\n"); err = -EIO; goto exit_check_functionality_failed; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { dev_err(&client->dev, "client not smb-i2c capable:3\n"); err = -EIO; goto exit_check_functionality_failed; } */ acc = kzalloc(sizeof(struct lis3dh_acc_data), GFP_KERNEL); if (acc == NULL) { err = -ENOMEM; dev_err(&client->dev, "failed to allocate memory for module data: " "%d\n", err); goto exit_check_functionality_failed; } mutex_init(&acc->lock); mutex_lock(&acc->lock); acc->client = client; i2c_set_clientdata(client, acc); acc->pdata = kmalloc(sizeof(*acc->pdata), GFP_KERNEL); if (acc->pdata == NULL) { err = -ENOMEM; dev_err(&client->dev, "failed to allocate memory for pdata: %d\n", err); goto err_mutexunlock; } memcpy(acc->pdata, client->dev.platform_data, sizeof(*acc->pdata)); err = lis3dh_acc_validate_pdata(acc); if (err < 0) { dev_err(&client->dev, "failed to validate platform data\n"); goto exit_kfree_pdata; } if (acc->pdata->init) { err = acc->pdata->init(); if (err < 0) { dev_err(&client->dev, "init failed: %d\n", err); goto err_pdata_init; } } if (acc->pdata->gpio_int1 >= 0) { acc->irq1 = gpio_to_irq(acc->pdata->gpio_int1); printk(KERN_INFO "%s: %s has set irq1 to irq: %d\n", LIS3DH_ACC_DEV_NAME, __func__, acc->irq1); printk(KERN_INFO "%s: %s has mapped irq1 on gpio: %d\n", LIS3DH_ACC_DEV_NAME, __func__, acc->pdata->gpio_int1); } if (acc->pdata->gpio_int2 >= 0) { acc->irq2 = gpio_to_irq(acc->pdata->gpio_int2); printk(KERN_INFO "%s: %s has set irq2 to irq: %d\n", LIS3DH_ACC_DEV_NAME, __func__, acc->irq2); printk(KERN_INFO "%s: %s has mapped irq2 on gpio: %d\n", LIS3DH_ACC_DEV_NAME, __func__, acc->pdata->gpio_int2); } memset(acc->resume_state, 0, ARRAY_SIZE(acc->resume_state)); acc->resume_state[RES_CTRL_REG1] = LIS3DH_ACC_ENABLE_ALL_AXES; acc->resume_state[RES_CTRL_REG2] = 0x00; acc->resume_state[RES_CTRL_REG3] = 0x00; acc->resume_state[RES_CTRL_REG4] = 0x00; acc->resume_state[RES_CTRL_REG5] = 0x00; acc->resume_state[RES_CTRL_REG6] = 0x00; acc->resume_state[RES_TEMP_CFG_REG] = 0x00; acc->resume_state[RES_FIFO_CTRL_REG] = 0x00; acc->resume_state[RES_INT_CFG1] = 0x00; acc->resume_state[RES_INT_THS1] = 0x00; acc->resume_state[RES_INT_DUR1] = 0x00; acc->resume_state[RES_TT_CFG] = 0x00; acc->resume_state[RES_TT_THS] = 0x00; acc->resume_state[RES_TT_LIM] = 0x00; acc->resume_state[RES_TT_TLAT] = 0x00; acc->resume_state[RES_TT_TW] = 0x00; err = lis3dh_acc_device_power_on(acc); if (err < 0) { dev_err(&client->dev, "power on failed: %d\n", err); goto err_pdata_init; } atomic_set(&acc->enabled, 1); err = lis3dh_acc_update_g_range(acc, acc->pdata->g_range); if (err < 0) { dev_err(&client->dev, "update_g_range failed\n"); goto err_power_off; } err = lis3dh_acc_update_odr(acc, acc->pdata->poll_interval); if (err < 0) { dev_err(&client->dev, "update_odr failed\n"); goto err_power_off; } err = lis3dh_acc_input_init(acc); if (err < 0) { dev_err(&client->dev, "input init failed\n"); goto err_power_off; } err = create_sysfs_interfaces(&client->dev); if (err < 0) { dev_err(&client->dev, "device LIS3DH_ACC_DEV_NAME sysfs register failed\n"); goto err_input_cleanup; } lis3dh_acc_device_power_off(acc); /* As default, do not report information */ atomic_set(&acc->enabled, 0); if (acc->pdata->gpio_int1 >= 0) { INIT_WORK(&acc->irq1_work, lis3dh_acc_irq1_work_func); acc->irq1_work_queue = create_singlethread_workqueue("lis3dh_acc_wq1"); if (!acc->irq1_work_queue) { err = -ENOMEM; dev_err(&client->dev, "cannot create work queue1: %d\n", err); goto err_remove_sysfs_int; } err = request_irq(acc->irq1, lis3dh_acc_isr1, IRQF_TRIGGER_RISING, "lis3dh_acc_irq1", acc); if (err < 0) { dev_err(&client->dev, "request irq1 failed: %d\n", err); goto err_destoyworkqueue1; } disable_irq_nosync(acc->irq1); } if (acc->pdata->gpio_int2 >= 0) { INIT_WORK(&acc->irq2_work, lis3dh_acc_irq2_work_func); acc->irq2_work_queue = create_singlethread_workqueue("lis3dh_acc_wq2"); if (!acc->irq2_work_queue) { err = -ENOMEM; dev_err(&client->dev, "cannot create work queue2: %d\n", err); goto err_free_irq1; } err = request_irq(acc->irq2, lis3dh_acc_isr2, IRQF_TRIGGER_RISING, "lis3dh_acc_irq2", acc); if (err < 0) { dev_err(&client->dev, "request irq2 failed: %d\n", err); goto err_destoyworkqueue2; } disable_irq_nosync(acc->irq2); } mutex_unlock(&acc->lock); dev_info(&client->dev, "%s: probed\n", LIS3DH_ACC_DEV_NAME); return 0; err_destoyworkqueue2: if (acc->pdata->gpio_int2 >= 0) destroy_workqueue(acc->irq2_work_queue); err_free_irq1: free_irq(acc->irq1, acc); err_destoyworkqueue1: if (acc->pdata->gpio_int1 >= 0) destroy_workqueue(acc->irq1_work_queue); err_remove_sysfs_int: remove_sysfs_interfaces(&client->dev); err_input_cleanup: lis3dh_acc_input_cleanup(acc); err_power_off: lis3dh_acc_device_power_off(acc); err_pdata_init: if (acc->pdata->exit) acc->pdata->exit(); exit_kfree_pdata: kfree(acc->pdata); err_mutexunlock: mutex_unlock(&acc->lock); kfree(acc); exit_check_functionality_failed: printk(KERN_ERR "%s: Driver Init failed\n", LIS3DH_ACC_DEV_NAME); return err; } static int __devexit lis3dh_acc_remove(struct i2c_client *client) { struct lis3dh_acc_data *acc = i2c_get_clientdata(client); if (acc->pdata->gpio_int1 >= 0) { free_irq(acc->irq1, acc); gpio_free(acc->pdata->gpio_int1); destroy_workqueue(acc->irq1_work_queue); } if (acc->pdata->gpio_int2 >= 0) { free_irq(acc->irq2, acc); gpio_free(acc->pdata->gpio_int2); destroy_workqueue(acc->irq2_work_queue); } lis3dh_acc_input_cleanup(acc); lis3dh_acc_device_power_off(acc); remove_sysfs_interfaces(&client->dev); if (acc->pdata->exit) acc->pdata->exit(); kfree(acc->pdata); kfree(acc); return 0; } #ifdef CONFIG_PM static int lis3dh_acc_resume(struct i2c_client *client) { struct lis3dh_acc_data *acc = i2c_get_clientdata(client); if (acc->on_before_suspend) return lis3dh_acc_enable(acc); return 0; } static int lis3dh_acc_suspend(struct i2c_client *client, pm_message_t mesg) { struct lis3dh_acc_data *acc = i2c_get_clientdata(client); acc->on_before_suspend = atomic_read(&acc->enabled); return lis3dh_acc_disable(acc); } #else #define lis3dh_acc_suspend NULL #define lis3dh_acc_resume NULL #endif /* CONFIG_PM */ static const struct i2c_device_id lis3dh_acc_id[] = { { LIS3DH_ACC_DEV_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, lis3dh_acc_id); static struct i2c_driver lis3dh_acc_driver = { .driver = { .owner = THIS_MODULE, .name = LIS3DH_ACC_DEV_NAME, }, .probe = lis3dh_acc_probe, .remove = __devexit_p(lis3dh_acc_remove), .suspend = lis3dh_acc_suspend, .resume = lis3dh_acc_resume, .id_table = lis3dh_acc_id, }; static int __init lis3dh_acc_init(void) { printk(KERN_INFO "%s accelerometer driver: init\n", LIS3DH_ACC_DEV_NAME); return i2c_add_driver(&lis3dh_acc_driver); } static void __exit lis3dh_acc_exit(void) { #ifdef DEBUG printk(KERN_INFO "%s accelerometer driver exit\n", LIS3DH_ACC_DEV_NAME); #endif /* DEBUG */ i2c_del_driver(&lis3dh_acc_driver); return; } module_init(lis3dh_acc_init); module_exit(lis3dh_acc_exit); MODULE_DESCRIPTION("lis3dh digital accelerometer sysfs driver"); MODULE_AUTHOR("Matteo Dameno, Carmine Iascone, Samuel Huo, STMicroelectronics"); MODULE_LICENSE("GPL");
gpl-2.0
kornyone/htc-kernel-doubleshot_old
init/noinitramfs.c
1809
1426
/* * init/noinitramfs.c * * Copyright (C) 2006, NXP Semiconductors, All Rights Reserved * Author: Jean-Paul Saman <jean-paul.saman@nxp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/stat.h> #include <linux/kdev_t.h> #include <linux/syscalls.h> /* * Create a simple rootfs that is similar to the default initramfs */ static int __init default_rootfs(void) { int err; err = sys_mkdir("/dev", 0755); if (err < 0) goto out; err = sys_mknod((const char __user *) "/dev/console", S_IFCHR | S_IRUSR | S_IWUSR, new_encode_dev(MKDEV(5, 1))); if (err < 0) goto out; err = sys_mkdir("/root", 0700); if (err < 0) goto out; return 0; out: printk(KERN_WARNING "Failed to create a rootfs\n"); return err; } rootfs_initcall(default_rootfs);
gpl-2.0
kirananto/android_kernel_motorola_msm8916
drivers/s390/scsi/zfcp_cfdc.c
2321
12479
/* * zfcp device driver * * Userspace interface for accessing the * Access Control Lists / Control File Data Channel; * handling of response code and states for ports and LUNs. * * Copyright IBM Corp. 2008, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/compat.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <asm/compat.h> #include <asm/ccwdev.h> #include "zfcp_def.h" #include "zfcp_ext.h" #include "zfcp_fsf.h" #define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001 #define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101 #define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201 #define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401 #define ZFCP_CFDC_CMND_UPLOAD 0x00010002 #define ZFCP_CFDC_DOWNLOAD 0x00000001 #define ZFCP_CFDC_UPLOAD 0x00000002 #define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000 #define ZFCP_CFDC_IOC_MAGIC 0xDD #define ZFCP_CFDC_IOC \ _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data) /** * struct zfcp_cfdc_data - data for ioctl cfdc interface * @signature: request signature * @devno: FCP adapter device number * @command: command code * @fsf_status: returns status of FSF command to userspace * @fsf_status_qual: returned to userspace * @payloads: access conflicts list * @control_file: access control table */ struct zfcp_cfdc_data { u32 signature; u32 devno; u32 command; u32 fsf_status; u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; u8 payloads[256]; u8 control_file[0]; }; static int zfcp_cfdc_copy_from_user(struct scatterlist *sg, void __user *user_buffer) { unsigned int length; unsigned int size = ZFCP_CFDC_MAX_SIZE; while (size) { length = min((unsigned int)size, sg->length); if (copy_from_user(sg_virt(sg++), user_buffer, length)) return -EFAULT; user_buffer += length; size -= length; } return 0; } static int zfcp_cfdc_copy_to_user(void __user *user_buffer, struct scatterlist *sg) { unsigned int length; unsigned int size = ZFCP_CFDC_MAX_SIZE; while (size) { length = min((unsigned int) size, sg->length); if (copy_to_user(user_buffer, sg_virt(sg++), length)) return -EFAULT; user_buffer += length; size -= length; } return 0; } static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) { char busid[9]; struct ccw_device *cdev; struct zfcp_adapter *adapter; snprintf(busid, sizeof(busid), "0.0.%04x", devno); cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); if (!cdev) return NULL; adapter = zfcp_ccw_adapter_by_cdev(cdev); put_device(&cdev->dev); return adapter; } static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) { switch (command) { case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL: fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE; break; case ZFCP_CFDC_CMND_DOWNLOAD_FORCE: fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; fsf_cfdc->option = FSF_CFDC_OPTION_FORCE; break; case ZFCP_CFDC_CMND_FULL_ACCESS: fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS; break; case ZFCP_CFDC_CMND_RESTRICTED_ACCESS: fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS; break; case ZFCP_CFDC_CMND_UPLOAD: fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE; fsf_cfdc->option = 0; break; default: return -EINVAL; } return 0; } static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg, u8 __user *control_file) { int retval; retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES); if (retval) return retval; sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE; if (command & ZFCP_CFDC_WITH_CONTROL_FILE && command & ZFCP_CFDC_DOWNLOAD) { retval = zfcp_cfdc_copy_from_user(sg, control_file); if (retval) { zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES); return -EFAULT; } } return 0; } static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data, struct zfcp_fsf_req *req) { data->fsf_status = req->qtcb->header.fsf_status; memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual, sizeof(union fsf_status_qual)); memcpy(&data->payloads, &req->qtcb->bottom.support.els, sizeof(req->qtcb->bottom.support.els)); } static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, unsigned long arg) { struct zfcp_cfdc_data *data; struct zfcp_cfdc_data __user *data_user; struct zfcp_adapter *adapter; struct zfcp_fsf_req *req; struct zfcp_fsf_cfdc *fsf_cfdc; int retval; if (command != ZFCP_CFDC_IOC) return -ENOTTY; if (is_compat_task()) data_user = compat_ptr(arg); else data_user = (void __user *)arg; if (!data_user) return -EINVAL; fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL); if (!fsf_cfdc) return -ENOMEM; data = memdup_user(data_user, sizeof(*data_user)); if (IS_ERR(data)) { retval = PTR_ERR(data); goto no_mem_sense; } if (data->signature != 0xCFDCACDF) { retval = -EINVAL; goto free_buffer; } retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command); adapter = zfcp_cfdc_get_adapter(data->devno); if (!adapter) { retval = -ENXIO; goto free_buffer; } retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, data_user->control_file); if (retval) goto adapter_put; req = zfcp_fsf_control_file(adapter, fsf_cfdc); if (IS_ERR(req)) { retval = PTR_ERR(req); goto free_sg; } if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { retval = -ENXIO; goto free_fsf; } zfcp_cfdc_req_to_sense(data, req); retval = copy_to_user(data_user, data, sizeof(*data_user)); if (retval) { retval = -EFAULT; goto free_fsf; } if (data->command & ZFCP_CFDC_UPLOAD) retval = zfcp_cfdc_copy_to_user(&data_user->control_file, fsf_cfdc->sg); free_fsf: zfcp_fsf_req_free(req); free_sg: zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); adapter_put: zfcp_ccw_adapter_put(adapter); free_buffer: kfree(data); no_mem_sense: kfree(fsf_cfdc); return retval; } static const struct file_operations zfcp_cfdc_fops = { .open = nonseekable_open, .unlocked_ioctl = zfcp_cfdc_dev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = zfcp_cfdc_dev_ioctl, #endif .llseek = no_llseek, }; struct miscdevice zfcp_cfdc_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "zfcp_cfdc", .fops = &zfcp_cfdc_fops, }; /** * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT * @adapter: Adapter where the Access Control Table (ACT) changed * * After a change in the adapter ACT, check if access to any * previously denied resources is now possible. */ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) { unsigned long flags; struct zfcp_port *port; struct scsi_device *sdev; struct zfcp_scsi_dev *zfcp_sdev; int status; if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) return; read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) { status = atomic_read(&port->status); if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "cfaac_1"); } read_unlock_irqrestore(&adapter->port_list_lock, flags); shost_for_each_device(sdev, adapter->scsi_host) { zfcp_sdev = sdev_to_zfcp(sdev); status = atomic_read(&zfcp_sdev->status); if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, "cfaac_2"); } } static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) { u16 subtable = table >> 16; u16 rule = table & 0xffff; const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; if (subtable && subtable < ARRAY_SIZE(act_type)) dev_warn(&adapter->ccw_device->dev, "Access denied according to ACT rule type %s, " "rule %d\n", act_type[subtable], rule); } /** * zfcp_cfdc_port_denied - Process "access denied" for port * @port: The port where the access has been denied * @qual: The FSF status qualifier for the access denied FSF status */ void zfcp_cfdc_port_denied(struct zfcp_port *port, union fsf_status_qual *qual) { dev_warn(&port->adapter->ccw_device->dev, "Access denied to port 0x%016Lx\n", (unsigned long long)port->wwpn); zfcp_act_eval_err(port->adapter, qual->halfword[0]); zfcp_act_eval_err(port->adapter, qual->halfword[1]); zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED | ZFCP_STATUS_COMMON_ACCESS_DENIED); } /** * zfcp_cfdc_lun_denied - Process "access denied" for LUN * @sdev: The SCSI device / LUN where the access has been denied * @qual: The FSF status qualifier for the access denied FSF status */ void zfcp_cfdc_lun_denied(struct scsi_device *sdev, union fsf_status_qual *qual) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, "Access denied to LUN 0x%016Lx on port 0x%016Lx\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]); zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED | ZFCP_STATUS_COMMON_ACCESS_DENIED); atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); } /** * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status * @sdev: The LUN / SCSI device where sharing violation occurred * @qual: The FSF status qualifier from the LUN sharing violation */ void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev, union fsf_status_qual *qual) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); if (qual->word[0]) dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, "LUN 0x%Lx on port 0x%Lx is already in " "use by CSS%d, MIF Image ID %x\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn, qual->fsf_queue_designator.cssid, qual->fsf_queue_designator.hla); else zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED | ZFCP_STATUS_COMMON_ACCESS_DENIED); atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); } /** * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun" * @sdev: The SCSI device / LUN where to evaluate the status * @bottom: The qtcb bottom with the status from the "open lun" * * Returns: 0 if LUN is usable, -EACCES if the access control table * reports an unsupported configuration. */ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, struct fsf_qtcb_bottom_support *bottom) { int shared, rw; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) || !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) || zfcp_ccw_priv_sch(adapter)) return 0; shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE); rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); if (shared) atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); if (!rw) { atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); dev_info(&adapter->ccw_device->dev, "SCSI device at LUN " "0x%016Lx on port 0x%016Lx opened read-only\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); } if (!shared && !rw) { dev_err(&adapter->ccw_device->dev, "Exclusive read-only access " "not supported (LUN 0x%016Lx, port 0x%016Lx)\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6"); return -EACCES; } if (shared && rw) { dev_err(&adapter->ccw_device->dev, "Shared read-write access not supported " "(LUN 0x%016Lx, port 0x%016Lx)\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8"); return -EACCES; } return 0; }
gpl-2.0
YAOSP/kernel_huawei_angler
drivers/regulator/max8907-regulator.c
2321
11297
/* * max8907-regulator.c -- support regulators in max8907 * * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com> * Copyright (C) 2010-2012, NVIDIA CORPORATION. All rights reserved. * * Portions based on drivers/regulator/tps65910-regulator.c, * Copyright 2010 Texas Instruments Inc. * Author: Graeme Gregory <gg@slimlogic.co.uk> * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/init.h> #include <linux/mfd/core.h> #include <linux/mfd/max8907.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/regmap.h> #include <linux/slab.h> #define MAX8907_II2RR_VERSION_MASK 0xF0 #define MAX8907_II2RR_VERSION_REV_A 0x00 #define MAX8907_II2RR_VERSION_REV_B 0x10 #define MAX8907_II2RR_VERSION_REV_C 0x30 struct max8907_regulator { struct regulator_desc desc[MAX8907_NUM_REGULATORS]; struct regulator_dev *rdev[MAX8907_NUM_REGULATORS]; }; #define REG_MBATT() \ [MAX8907_MBATT] = { \ .name = "MBATT", \ .supply_name = "mbatt", \ .id = MAX8907_MBATT, \ .ops = &max8907_mbatt_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ } #define REG_LDO(ids, supply, base, min, max, step) \ [MAX8907_##ids] = { \ .name = #ids, \ .supply_name = supply, \ .id = MAX8907_##ids, \ .n_voltages = ((max) - (min)) / (step) + 1, \ .ops = &max8907_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = (min), \ .uV_step = (step), \ .vsel_reg = (base) + MAX8907_VOUT, \ .vsel_mask = 0x3f, \ .enable_reg = (base) + MAX8907_CTL, \ .enable_mask = MAX8907_MASK_LDO_EN, \ } #define REG_FIXED(ids, supply, voltage) \ [MAX8907_##ids] = { \ .name = #ids, \ .supply_name = supply, \ .id = MAX8907_##ids, \ .n_voltages = 1, \ .ops = &max8907_fixed_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = (voltage), \ } #define REG_OUT5V(ids, supply, base, voltage) \ [MAX8907_##ids] = { \ .name = #ids, \ .supply_name = supply, \ .id = MAX8907_##ids, \ .n_voltages = 1, \ .ops = &max8907_out5v_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = (voltage), \ .enable_reg = (base), \ .enable_mask = MAX8907_MASK_OUT5V_EN, \ } #define REG_BBAT(ids, supply, base, min, max, step) \ [MAX8907_##ids] = { \ .name = #ids, \ .supply_name = supply, \ .id = MAX8907_##ids, \ .n_voltages = ((max) - (min)) / (step) + 1, \ .ops = &max8907_bbat_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = (min), \ .uV_step = (step), \ .vsel_reg = (base), \ .vsel_mask = MAX8907_MASK_VBBATTCV, \ } #define LDO_750_50(id, supply, base) REG_LDO(id, supply, (base), \ 750000, 3900000, 50000) #define LDO_650_25(id, supply, base) REG_LDO(id, supply, (base), \ 650000, 2225000, 25000) static struct regulator_ops max8907_mbatt_ops = { }; static struct regulator_ops max8907_ldo_ops = { .list_voltage = regulator_list_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, }; static struct regulator_ops max8907_ldo_hwctl_ops = { .list_voltage = regulator_list_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, }; static struct regulator_ops max8907_fixed_ops = { .list_voltage = regulator_list_voltage_linear, }; static struct regulator_ops max8907_out5v_ops = { .list_voltage = regulator_list_voltage_linear, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, }; static struct regulator_ops max8907_out5v_hwctl_ops = { .list_voltage = regulator_list_voltage_linear, }; static struct regulator_ops max8907_bbat_ops = { .list_voltage = regulator_list_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, }; static struct regulator_desc max8907_regulators[] = { REG_MBATT(), REG_LDO(SD1, "in-v1", MAX8907_REG_SDCTL1, 650000, 2225000, 25000), REG_LDO(SD2, "in-v2", MAX8907_REG_SDCTL2, 637500, 1425000, 12500), REG_LDO(SD3, "in-v3", MAX8907_REG_SDCTL3, 750000, 3900000, 50000), LDO_750_50(LDO1, "in1", MAX8907_REG_LDOCTL1), LDO_650_25(LDO2, "in2", MAX8907_REG_LDOCTL2), LDO_650_25(LDO3, "in3", MAX8907_REG_LDOCTL3), LDO_750_50(LDO4, "in4", MAX8907_REG_LDOCTL4), LDO_750_50(LDO5, "in5", MAX8907_REG_LDOCTL5), LDO_750_50(LDO6, "in6", MAX8907_REG_LDOCTL6), LDO_750_50(LDO7, "in7", MAX8907_REG_LDOCTL7), LDO_750_50(LDO8, "in8", MAX8907_REG_LDOCTL8), LDO_750_50(LDO9, "in9", MAX8907_REG_LDOCTL9), LDO_750_50(LDO10, "in10", MAX8907_REG_LDOCTL10), LDO_750_50(LDO11, "in11", MAX8907_REG_LDOCTL11), LDO_750_50(LDO12, "in12", MAX8907_REG_LDOCTL12), LDO_750_50(LDO13, "in13", MAX8907_REG_LDOCTL13), LDO_750_50(LDO14, "in14", MAX8907_REG_LDOCTL14), LDO_750_50(LDO15, "in15", MAX8907_REG_LDOCTL15), LDO_750_50(LDO16, "in16", MAX8907_REG_LDOCTL16), LDO_650_25(LDO17, "in17", MAX8907_REG_LDOCTL17), LDO_650_25(LDO18, "in18", MAX8907_REG_LDOCTL18), LDO_750_50(LDO19, "in19", MAX8907_REG_LDOCTL19), LDO_750_50(LDO20, "in20", MAX8907_REG_LDOCTL20), REG_OUT5V(OUT5V, "mbatt", MAX8907_REG_OUT5VEN, 5000000), REG_OUT5V(OUT33V, "mbatt", MAX8907_REG_OUT33VEN, 3300000), REG_BBAT(BBAT, "MBATT", MAX8907_REG_BBAT_CNFG, 2400000, 3000000, 200000), REG_FIXED(SDBY, "MBATT", 1200000), REG_FIXED(VRTC, "MBATT", 3300000), }; #ifdef CONFIG_OF #define MATCH(_name, _id) \ [MAX8907_##_id] = { \ .name = #_name, \ .driver_data = (void *)&max8907_regulators[MAX8907_##_id], \ } static struct of_regulator_match max8907_matches[] = { MATCH(mbatt, MBATT), MATCH(sd1, SD1), MATCH(sd2, SD2), MATCH(sd3, SD3), MATCH(ldo1, LDO1), MATCH(ldo2, LDO2), MATCH(ldo3, LDO3), MATCH(ldo4, LDO4), MATCH(ldo5, LDO5), MATCH(ldo6, LDO6), MATCH(ldo7, LDO7), MATCH(ldo8, LDO8), MATCH(ldo9, LDO9), MATCH(ldo10, LDO10), MATCH(ldo11, LDO11), MATCH(ldo12, LDO12), MATCH(ldo13, LDO13), MATCH(ldo14, LDO14), MATCH(ldo15, LDO15), MATCH(ldo16, LDO16), MATCH(ldo17, LDO17), MATCH(ldo18, LDO18), MATCH(ldo19, LDO19), MATCH(ldo20, LDO20), MATCH(out5v, OUT5V), MATCH(out33v, OUT33V), MATCH(bbat, BBAT), MATCH(sdby, SDBY), MATCH(vrtc, VRTC), }; static int max8907_regulator_parse_dt(struct platform_device *pdev) { struct device_node *np, *regulators; int ret; np = of_node_get(pdev->dev.parent->of_node); if (!np) return 0; regulators = of_find_node_by_name(np, "regulators"); if (!regulators) { dev_err(&pdev->dev, "regulators node not found\n"); return -EINVAL; } ret = of_regulator_match(&pdev->dev, regulators, max8907_matches, ARRAY_SIZE(max8907_matches)); of_node_put(regulators); if (ret < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); return ret; } return 0; } static inline struct regulator_init_data *match_init_data(int index) { return max8907_matches[index].init_data; } static inline struct device_node *match_of_node(int index) { return max8907_matches[index].of_node; } #else static int max8907_regulator_parse_dt(struct platform_device *pdev) { return 0; } static inline struct regulator_init_data *match_init_data(int index) { return NULL; } static inline struct device_node *match_of_node(int index) { return NULL; } #endif static int max8907_regulator_probe(struct platform_device *pdev) { struct max8907 *max8907 = dev_get_drvdata(pdev->dev.parent); struct max8907_platform_data *pdata = dev_get_platdata(max8907->dev); int ret; struct max8907_regulator *pmic; unsigned int val; int i; struct regulator_config config = {}; struct regulator_init_data *idata; const char *mbatt_rail_name = NULL; ret = max8907_regulator_parse_dt(pdev); if (ret) return ret; pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); if (!pmic) { dev_err(&pdev->dev, "Failed to alloc pmic\n"); return -ENOMEM; } platform_set_drvdata(pdev, pmic); memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); /* Backwards compatibility with MAX8907B; SD1 uses different voltages */ regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); if ((val & MAX8907_II2RR_VERSION_MASK) == MAX8907_II2RR_VERSION_REV_B) { pmic->desc[MAX8907_SD1].min_uV = 637500; pmic->desc[MAX8907_SD1].uV_step = 12500; pmic->desc[MAX8907_SD1].n_voltages = (1425000 - 637500) / 12500 + 1; } for (i = 0; i < MAX8907_NUM_REGULATORS; i++) { config.dev = pdev->dev.parent; if (pdata) idata = pdata->init_data[i]; else idata = match_init_data(i); config.init_data = idata; config.driver_data = pmic; config.regmap = max8907->regmap_gen; config.of_node = match_of_node(i); switch (pmic->desc[i].id) { case MAX8907_MBATT: if (idata && idata->constraints.name) mbatt_rail_name = idata->constraints.name; else mbatt_rail_name = pmic->desc[i].name; break; case MAX8907_BBAT: case MAX8907_SDBY: case MAX8907_VRTC: idata->supply_regulator = mbatt_rail_name; break; } if (pmic->desc[i].ops == &max8907_ldo_ops) { regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); if ((val & MAX8907_MASK_LDO_SEQ) != MAX8907_MASK_LDO_SEQ) pmic->desc[i].ops = &max8907_ldo_hwctl_ops; } else if (pmic->desc[i].ops == &max8907_out5v_ops) { regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); if ((val & (MAX8907_MASK_OUT5V_VINEN | MAX8907_MASK_OUT5V_ENSRC)) != MAX8907_MASK_OUT5V_ENSRC) pmic->desc[i].ops = &max8907_out5v_hwctl_ops; } pmic->rdev[i] = regulator_register(&pmic->desc[i], &config); if (IS_ERR(pmic->rdev[i])) { dev_err(&pdev->dev, "failed to register %s regulator\n", pmic->desc[i].name); ret = PTR_ERR(pmic->rdev[i]); goto err_unregister_regulator; } } return 0; err_unregister_regulator: while (--i >= 0) regulator_unregister(pmic->rdev[i]); return ret; } static int max8907_regulator_remove(struct platform_device *pdev) { struct max8907_regulator *pmic = platform_get_drvdata(pdev); int i; for (i = 0; i < MAX8907_NUM_REGULATORS; i++) regulator_unregister(pmic->rdev[i]); return 0; } static struct platform_driver max8907_regulator_driver = { .driver = { .name = "max8907-regulator", .owner = THIS_MODULE, }, .probe = max8907_regulator_probe, .remove = max8907_regulator_remove, }; static int __init max8907_regulator_init(void) { return platform_driver_register(&max8907_regulator_driver); } subsys_initcall(max8907_regulator_init); static void __exit max8907_reg_exit(void) { platform_driver_unregister(&max8907_regulator_driver); } module_exit(max8907_reg_exit); MODULE_DESCRIPTION("MAX8907 regulator driver"); MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@maxim-ic.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:max8907-regulator");
gpl-2.0
eugene373/ApexqJB
drivers/net/wireless/ath/ath9k/htc_drv_main.c
2321
45146
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "htc.h" /*************/ /* Utilities */ /*************/ /* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, struct ath9k_channel *ichan) { enum htc_phymode mode; mode = -EINVAL; switch (ichan->chanmode) { case CHANNEL_G: case CHANNEL_G_HT20: case CHANNEL_G_HT40PLUS: case CHANNEL_G_HT40MINUS: mode = HTC_MODE_11NG; break; case CHANNEL_A: case CHANNEL_A_HT20: case CHANNEL_A_HT40PLUS: case CHANNEL_A_HT40MINUS: mode = HTC_MODE_11NA; break; default: break; } WARN_ON(mode < 0); return mode; } bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, enum ath9k_power_mode mode) { bool ret; mutex_lock(&priv->htc_pm_lock); ret = ath9k_hw_setpower(priv->ah, mode); mutex_unlock(&priv->htc_pm_lock); return ret; } void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv) { mutex_lock(&priv->htc_pm_lock); if (++priv->ps_usecount != 1) goto unlock; ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE); unlock: mutex_unlock(&priv->htc_pm_lock); } void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv) { mutex_lock(&priv->htc_pm_lock); if (--priv->ps_usecount != 0) goto unlock; if (priv->ps_idle) ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP); else if (priv->ps_enabled) ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP); unlock: mutex_unlock(&priv->htc_pm_lock); } void ath9k_ps_work(struct work_struct *work) { struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, ps_work); ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); /* The chip wakes up after receiving the first beacon while network sleep is enabled. For the driver to be in sync with the hw, set the chip to awake and only then set it to sleep. */ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); } static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath9k_htc_priv *priv = data; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon) priv->reconfig_beacon = true; if (bss_conf->assoc) { priv->rearm_ani = true; priv->reconfig_beacon = true; } } static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv) { priv->rearm_ani = false; priv->reconfig_beacon = false; ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_vif_iter, priv); if (priv->rearm_ani) ath9k_htc_start_ani(priv); if (priv->reconfig_beacon) { ath9k_htc_ps_wakeup(priv); ath9k_htc_beacon_reconfig(priv); ath9k_htc_ps_restore(priv); } } static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath9k_vif_iter_data *iter_data = data; int i; for (i = 0; i < ETH_ALEN; i++) iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); } static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_vif_iter_data iter_data; /* * Use the hardware MAC address as reference, the hardware uses it * together with the BSSID mask when matching addresses. */ iter_data.hw_macaddr = common->macaddr; memset(&iter_data.mask, 0xff, ETH_ALEN); if (vif) ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); /* Get list of all active MAC addresses */ ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter, &iter_data); memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); ath_hw_setbssidmask(common); } static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv) { if (priv->num_ibss_vif) priv->ah->opmode = NL80211_IFTYPE_ADHOC; else if (priv->num_ap_vif) priv->ah->opmode = NL80211_IFTYPE_AP; else priv->ah->opmode = NL80211_IFTYPE_STATION; ath9k_hw_setopmode(priv->ah); } void ath9k_htc_reset(struct ath9k_htc_priv *priv) { struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *channel = priv->hw->conf.channel; struct ath9k_hw_cal_data *caldata = NULL; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; int ret; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); ath9k_htc_stop_ani(priv); ieee80211_stop_queues(priv->hw); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); ath9k_wmi_event_drain(priv); caldata = &priv->caldata; ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); if (ret) { ath_err(common, "Unable to reset device (%u Mhz) reset status %d\n", channel->center_freq, ret); } ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, &priv->curtxpow); WMI_CMD(WMI_START_RECV_CMDID); ath9k_host_rx_init(priv); mode = ath9k_htc_get_curmode(priv, ah->curchan); htc_mode = cpu_to_be16(mode); WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode); WMI_CMD(WMI_ENABLE_INTR_CMDID); htc_start(priv->htc); ath9k_htc_vif_reconfig(priv); ieee80211_wake_queues(priv->hw); mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, struct ieee80211_hw *hw, struct ath9k_channel *hchan) { struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; bool fastcc; struct ieee80211_channel *channel = hw->conf.channel; struct ath9k_hw_cal_data *caldata = NULL; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; int ret; if (priv->op_flags & OP_INVALID) return -EIO; fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); ath9k_htc_ps_wakeup(priv); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); ath9k_wmi_event_drain(priv); ath_dbg(common, ATH_DBG_CONFIG, "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n", priv->ah->curchan->channel, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), fastcc); if (!fastcc) caldata = &priv->caldata; ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (ret) { ath_err(common, "Unable to reset channel (%u Mhz) reset status %d\n", channel->center_freq, ret); goto err; } ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, &priv->curtxpow); WMI_CMD(WMI_START_RECV_CMDID); if (ret) goto err; ath9k_host_rx_init(priv); mode = ath9k_htc_get_curmode(priv, hchan); htc_mode = cpu_to_be16(mode); WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode); if (ret) goto err; WMI_CMD(WMI_ENABLE_INTR_CMDID); if (ret) goto err; htc_start(priv->htc); if (!(priv->op_flags & OP_SCANNING) && !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) ath9k_htc_vif_reconfig(priv); mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); err: ath9k_htc_ps_restore(priv); return ret; } /* * Monitor mode handling is a tad complicated because the firmware requires * an interface to be created exclusively, while mac80211 doesn't associate * an interface with the mode. * * So, for now, only one monitor interface can be configured. */ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_vif hvif; int ret = 0; u8 cmd_rsp; memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); hvif.index = priv->mon_vif_idx; WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); if (ret) { ath_err(common, "Unable to remove monitor interface at idx: %d\n", priv->mon_vif_idx); } priv->nvifs--; priv->vif_slot &= ~(1 << priv->mon_vif_idx); } static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_vif hvif; struct ath9k_htc_target_sta tsta; int ret = 0, sta_idx; u8 cmd_rsp; if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) || (priv->nstations >= ATH9K_HTC_MAX_STA)) { ret = -ENOBUFS; goto err_vif; } sta_idx = ffz(priv->sta_slot); if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) { ret = -ENOBUFS; goto err_vif; } /* * Add an interface. */ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); hvif.opmode = HTC_M_MONITOR; hvif.index = ffz(priv->vif_slot); WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); if (ret) goto err_vif; /* * Assign the monitor interface index as a special case here. * This is needed when the interface is brought down. */ priv->mon_vif_idx = hvif.index; priv->vif_slot |= (1 << hvif.index); /* * Set the hardware mode to monitor only if there are no * other interfaces. */ if (!priv->nvifs) priv->ah->opmode = NL80211_IFTYPE_MONITOR; priv->nvifs++; /* * Associate a station with the interface for packet injection. */ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN); tsta.is_vif_sta = 1; tsta.sta_index = sta_idx; tsta.vif_index = hvif.index; tsta.maxampdu = cpu_to_be16(0xffff); WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); if (ret) { ath_err(common, "Unable to add station entry for monitor mode\n"); goto err_sta; } priv->sta_slot |= (1 << sta_idx); priv->nstations++; priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx; priv->ah->is_monitoring = true; ath_dbg(common, ATH_DBG_CONFIG, "Attached a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); return 0; err_sta: /* * Remove the interface from the target. */ __ath9k_htc_remove_monitor_interface(priv); err_vif: ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n"); return ret; } static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); int ret = 0; u8 cmd_rsp, sta_idx; __ath9k_htc_remove_monitor_interface(priv); sta_idx = priv->vif_sta_pos[priv->mon_vif_idx]; WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); if (ret) { ath_err(common, "Unable to remove station entry for monitor mode\n"); return ret; } priv->sta_slot &= ~(1 << sta_idx); priv->nstations--; priv->ah->is_monitoring = false; ath_dbg(common, ATH_DBG_CONFIG, "Removed a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); return 0; } static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_sta tsta; struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; struct ath9k_htc_sta *ista; int ret, sta_idx; u8 cmd_rsp; u16 maxampdu; if (priv->nstations >= ATH9K_HTC_MAX_STA) return -ENOBUFS; sta_idx = ffz(priv->sta_slot); if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) return -ENOBUFS; memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); if (sta) { ista = (struct ath9k_htc_sta *) sta->drv_priv; memcpy(&tsta.macaddr, sta->addr, ETH_ALEN); memcpy(&tsta.bssid, common->curbssid, ETH_ALEN); tsta.is_vif_sta = 0; ista->index = sta_idx; } else { memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); tsta.is_vif_sta = 1; } tsta.sta_index = sta_idx; tsta.vif_index = avp->index; if (!sta) { tsta.maxampdu = cpu_to_be16(0xffff); } else { maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + sta->ht_cap.ampdu_factor); tsta.maxampdu = cpu_to_be16(maxampdu); } WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); if (ret) { if (sta) ath_err(common, "Unable to add station entry for: %pM\n", sta->addr); return ret; } if (sta) { ath_dbg(common, ATH_DBG_CONFIG, "Added a station entry for: %pM (idx: %d)\n", sta->addr, tsta.sta_index); } else { ath_dbg(common, ATH_DBG_CONFIG, "Added a station entry for VIF %d (idx: %d)\n", avp->index, tsta.sta_index); } priv->sta_slot |= (1 << sta_idx); priv->nstations++; if (!sta) priv->vif_sta_pos[avp->index] = sta_idx; return 0; } static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; struct ath9k_htc_sta *ista; int ret; u8 cmd_rsp, sta_idx; if (sta) { ista = (struct ath9k_htc_sta *) sta->drv_priv; sta_idx = ista->index; } else { sta_idx = priv->vif_sta_pos[avp->index]; } WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); if (ret) { if (sta) ath_err(common, "Unable to remove station entry for: %pM\n", sta->addr); return ret; } if (sta) { ath_dbg(common, ATH_DBG_CONFIG, "Removed a station entry for: %pM (idx: %d)\n", sta->addr, sta_idx); } else { ath_dbg(common, ATH_DBG_CONFIG, "Removed a station entry for VIF %d (idx: %d)\n", avp->index, sta_idx); } priv->sta_slot &= ~(1 << sta_idx); priv->nstations--; return 0; } int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv, u8 enable_coex) { struct ath9k_htc_cap_target tcap; int ret; u8 cmd_rsp; memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target)); tcap.ampdu_limit = cpu_to_be32(0xffff); tcap.ampdu_subframes = 0xff; tcap.enable_coex = enable_coex; tcap.tx_chainmask = priv->ah->caps.tx_chainmask; WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap); return ret; } static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, struct ieee80211_sta *sta, struct ath9k_htc_target_rate *trate) { struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv; struct ieee80211_supported_band *sband; u32 caps = 0; int i, j; sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band]; for (i = 0, j = 0; i < sband->n_bitrates; i++) { if (sta->supp_rates[sband->band] & BIT(i)) { trate->rates.legacy_rates.rs_rates[j] = (sband->bitrates[i].bitrate * 2) / 10; j++; } } trate->rates.legacy_rates.rs_nrates = j; if (sta->ht_cap.ht_supported) { for (i = 0, j = 0; i < 77; i++) { if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) trate->rates.ht_rates.rs_rates[j++] = i; if (j == ATH_HTC_RATE_MAX) break; } trate->rates.ht_rates.rs_nrates = j; caps = WLAN_RC_HT_FLAG; if (sta->ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (conf_is_ht40(&priv->hw->conf))) caps |= WLAN_RC_40_FLAG; if (conf_is_ht40(&priv->hw->conf) && (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) caps |= WLAN_RC_SGI_FLAG; else if (conf_is_ht20(&priv->hw->conf) && (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)) caps |= WLAN_RC_SGI_FLAG; } trate->sta_index = ista->index; trate->isnew = 1; trate->capflags = cpu_to_be32(caps); } static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv, struct ath9k_htc_target_rate *trate) { struct ath_common *common = ath9k_hw_common(priv->ah); int ret; u8 cmd_rsp; WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate); if (ret) { ath_err(common, "Unable to initialize Rate information on target\n"); } return ret; } static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv, struct ieee80211_sta *sta) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_rate trate; int ret; memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); ath9k_htc_setup_rate(priv, sta, &trate); ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) ath_dbg(common, ATH_DBG_CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", sta->addr, be32_to_cpu(trate.capflags)); } static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_rate trate; struct ieee80211_sta *sta; int ret; memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { rcu_read_unlock(); return; } ath9k_htc_setup_rate(priv, sta, &trate); rcu_read_unlock(); ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) ath_dbg(common, ATH_DBG_CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", bss_conf->bssid, be32_to_cpu(trate.capflags)); } static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_ampdu_mlme_action action, u16 tid) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_aggr aggr; struct ath9k_htc_sta *ista; int ret = 0; u8 cmd_rsp; if (tid >= ATH9K_HTC_MAX_TID) return -EINVAL; memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr)); ista = (struct ath9k_htc_sta *) sta->drv_priv; aggr.sta_index = ista->index; aggr.tidno = tid & 0xf; aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false; WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) ath_dbg(common, ATH_DBG_CONFIG, "Unable to %s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid); else ath_dbg(common, ATH_DBG_CONFIG, "%s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "Starting" : "Stopping", sta->addr, tid); spin_lock_bh(&priv->tx.tx_lock); ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP; spin_unlock_bh(&priv->tx.tx_lock); return ret; } /*******/ /* ANI */ /*******/ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); unsigned long timestamp = jiffies_to_msecs(jiffies); common->ani.longcal_timer = timestamp; common->ani.shortcal_timer = timestamp; common->ani.checkani_timer = timestamp; priv->op_flags |= OP_ANI_RUNNING; ieee80211_queue_delayed_work(common->hw, &priv->ani_work, msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); } void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv) { cancel_delayed_work_sync(&priv->ani_work); priv->op_flags &= ~OP_ANI_RUNNING; } void ath9k_htc_ani_work(struct work_struct *work) { struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, ani_work.work); struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); bool longcal = false; bool shortcal = false; bool aniflag = false; unsigned int timestamp = jiffies_to_msecs(jiffies); u32 cal_interval, short_cal_interval; short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; /* Only calibrate if awake */ if (ah->power_mode != ATH9K_PM_AWAKE) goto set_timer; /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { longcal = true; ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; } /* Short calibration applies only while caldone is false */ if (!common->ani.caldone) { if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; ath_dbg(common, ATH_DBG_ANI, "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } } else { if ((timestamp - common->ani.resetcal_timer) >= ATH_RESTART_CALINTERVAL) { common->ani.caldone = ath9k_hw_reset_calvalid(ah); if (common->ani.caldone) common->ani.resetcal_timer = timestamp; } } /* Verify whether we must check ANI */ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { aniflag = true; common->ani.checkani_timer = timestamp; } /* Skip all processing if there's nothing to do. */ if (longcal || shortcal || aniflag) { ath9k_htc_ps_wakeup(priv); /* Call ANI routine if necessary */ if (aniflag) ath9k_hw_ani_monitor(ah, ah->curchan); /* Perform calibration if necessary */ if (longcal || shortcal) common->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan, common->rx_chainmask, longcal); ath9k_htc_ps_restore(priv); } set_timer: /* * Set timer interval based on previous results. * The interval must be the shortest necessary to satisfy ANI, * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; if (priv->ah->config.enable_ani) cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); ieee80211_queue_delayed_work(common->hw, &priv->ani_work, msecs_to_jiffies(cal_interval)); } /**********************/ /* mac80211 Callbacks */ /**********************/ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); int padpos, padsize, ret, slot; hdr = (struct ieee80211_hdr *) skb->data; /* Add the padding after the header if this is not already done */ padpos = ath9k_cmn_padpos(hdr->frame_control); padsize = padpos & 3; if (padsize && skb->len > padpos) { if (skb_headroom(skb) < padsize) { ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n"); goto fail_tx; } skb_push(skb, padsize); memmove(skb->data, skb->data + padsize, padpos); } slot = ath9k_htc_tx_get_slot(priv); if (slot < 0) { ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n"); goto fail_tx; } ret = ath9k_htc_tx_start(priv, skb, slot, false); if (ret != 0) { ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n"); goto clear_slot; } ath9k_htc_check_stop_queues(priv); return; clear_slot: ath9k_htc_tx_clear_slot(priv, slot); fail_tx: dev_kfree_skb_any(skb); } static int ath9k_htc_start(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *curchan = hw->conf.channel; struct ath9k_channel *init_channel; int ret = 0; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; mutex_lock(&priv->mutex); ath_dbg(common, ATH_DBG_CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); /* Ensure that HW is awake before flushing RX */ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); WMI_CMD(WMI_FLUSH_RECV_CMDID); /* setup initial channel */ init_channel = ath9k_cmn_get_curchannel(hw, ah); ath9k_hw_htc_resetinit(ah); ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (ret) { ath_err(common, "Unable to reset hardware; reset status %d (freq %u MHz)\n", ret, curchan->center_freq); mutex_unlock(&priv->mutex); return ret; } ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, &priv->curtxpow); mode = ath9k_htc_get_curmode(priv, init_channel); htc_mode = cpu_to_be16(mode); WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode); WMI_CMD(WMI_ATH_INIT_CMDID); WMI_CMD(WMI_START_RECV_CMDID); ath9k_host_rx_init(priv); ret = ath9k_htc_update_cap_target(priv, 0); if (ret) ath_dbg(common, ATH_DBG_CONFIG, "Failed to update capability in target\n"); priv->op_flags &= ~OP_INVALID; htc_start(priv->htc); spin_lock_bh(&priv->tx.tx_lock); priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP; spin_unlock_bh(&priv->tx.tx_lock); ieee80211_wake_queues(hw); mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) { ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah); ath_htc_resume_btcoex_work(priv); } mutex_unlock(&priv->mutex); return ret; } static void ath9k_htc_stop(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); int ret __attribute__ ((unused)); u8 cmd_rsp; mutex_lock(&priv->mutex); if (priv->op_flags & OP_INVALID) { ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&priv->mutex); return; } ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); tasklet_kill(&priv->rx_tasklet); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); ath9k_wmi_event_drain(priv); mutex_unlock(&priv->mutex); /* Cancel all the running timers/work .. */ cancel_work_sync(&priv->fatal_work); cancel_work_sync(&priv->ps_work); #ifdef CONFIG_MAC80211_LEDS cancel_work_sync(&priv->led_work); #endif ath9k_htc_stop_ani(priv); mutex_lock(&priv->mutex); if (ah->btcoex_hw.enabled) { ath9k_hw_btcoex_disable(ah); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath_htc_cancel_btcoex_work(priv); } /* Remove a monitor interface if it's present. */ if (priv->ah->is_monitoring) ath9k_htc_remove_monitor_interface(priv); ath9k_hw_phy_disable(ah); ath9k_hw_disable(ah); ath9k_htc_ps_restore(priv); ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); priv->op_flags |= OP_INVALID; ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); mutex_unlock(&priv->mutex); } static int ath9k_htc_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_vif *avp = (void *)vif->drv_priv; struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_vif hvif; int ret = 0; u8 cmd_rsp; mutex_lock(&priv->mutex); if (priv->nvifs >= ATH9K_HTC_MAX_VIF) { mutex_unlock(&priv->mutex); return -ENOBUFS; } if (priv->num_ibss_vif || (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) { ath_err(common, "IBSS coexistence with other modes is not allowed\n"); mutex_unlock(&priv->mutex); return -ENOBUFS; } if (((vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_ADHOC)) && ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) { ath_err(common, "Max. number of beaconing interfaces reached\n"); mutex_unlock(&priv->mutex); return -ENOBUFS; } ath9k_htc_ps_wakeup(priv); memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); switch (vif->type) { case NL80211_IFTYPE_STATION: hvif.opmode = HTC_M_STA; break; case NL80211_IFTYPE_ADHOC: hvif.opmode = HTC_M_IBSS; break; case NL80211_IFTYPE_AP: hvif.opmode = HTC_M_HOSTAP; break; default: ath_err(common, "Interface type %d not yet supported\n", vif->type); ret = -EOPNOTSUPP; goto out; } /* Index starts from zero on the target */ avp->index = hvif.index = ffz(priv->vif_slot); hvif.rtsthreshold = cpu_to_be16(2304); WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); if (ret) goto out; /* * We need a node in target to tx mgmt frames * before association. */ ret = ath9k_htc_add_station(priv, vif, NULL); if (ret) { WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); goto out; } ath9k_htc_set_bssid_mask(priv, vif); priv->vif_slot |= (1 << avp->index); priv->nvifs++; INC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_assign_bslot(priv, vif); ath9k_htc_set_opmode(priv); if ((priv->ah->opmode == NL80211_IFTYPE_AP) && !(priv->op_flags & OP_ANI_RUNNING)) { ath9k_hw_set_tsfadjust(priv->ah, 1); ath9k_htc_start_ani(priv); } ath_dbg(common, ATH_DBG_CONFIG, "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index); out: ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_vif *avp = (void *)vif->drv_priv; struct ath9k_htc_target_vif hvif; int ret = 0; u8 cmd_rsp; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); hvif.index = avp->index; WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); if (ret) { ath_err(common, "Unable to remove interface at idx: %d\n", avp->index); } priv->nvifs--; priv->vif_slot &= ~(1 << avp->index); ath9k_htc_remove_station(priv, vif, NULL); DEC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_remove_bslot(priv, vif); ath9k_htc_set_opmode(priv); ath9k_htc_set_bssid_mask(priv, vif); /* * Stop ANI only if there are no associated station interfaces. */ if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) { priv->rearm_ani = false; ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_vif_iter, priv); if (!priv->rearm_ani) ath9k_htc_stop_ani(priv); } ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); struct ieee80211_conf *conf = &hw->conf; mutex_lock(&priv->mutex); if (changed & IEEE80211_CONF_CHANGE_IDLE) { bool enable_radio = false; bool idle = !!(conf->flags & IEEE80211_CONF_IDLE); mutex_lock(&priv->htc_pm_lock); if (!idle && priv->ps_idle) enable_radio = true; priv->ps_idle = idle; mutex_unlock(&priv->htc_pm_lock); if (enable_radio) { ath_dbg(common, ATH_DBG_CONFIG, "not-idle: enabling radio\n"); ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); ath9k_htc_radio_enable(hw); } } /* * Monitor interface should be added before * IEEE80211_CONF_CHANGE_CHANNEL is handled. */ if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if ((conf->flags & IEEE80211_CONF_MONITOR) && !priv->ah->is_monitoring) ath9k_htc_add_monitor_interface(priv); else if (priv->ah->is_monitoring) ath9k_htc_remove_monitor_interface(priv); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", curchan->center_freq); ath9k_cmn_update_ichannel(&priv->ah->channels[pos], hw->conf.channel, hw->conf.channel_type); if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { ath_err(common, "Unable to set channel\n"); mutex_unlock(&priv->mutex); return -EINVAL; } } if (changed & IEEE80211_CONF_CHANGE_PS) { if (conf->flags & IEEE80211_CONF_PS) { ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); priv->ps_enabled = true; } else { priv->ps_enabled = false; cancel_work_sync(&priv->ps_work); ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); } } if (changed & IEEE80211_CONF_CHANGE_POWER) { priv->txpowlimit = 2 * conf->power_level; ath9k_cmn_update_txpow(priv->ah, priv->curtxpow, priv->txpowlimit, &priv->curtxpow); } if (changed & IEEE80211_CONF_CHANGE_IDLE) { mutex_lock(&priv->htc_pm_lock); if (!priv->ps_idle) { mutex_unlock(&priv->htc_pm_lock); goto out; } mutex_unlock(&priv->htc_pm_lock); ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); ath9k_htc_radio_disable(hw); } out: mutex_unlock(&priv->mutex); return 0; } #define SUPPORTED_FILTERS \ (FIF_PROMISC_IN_BSS | \ FIF_ALLMULTI | \ FIF_CONTROL | \ FIF_PSPOLL | \ FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_PROBE_REQ | \ FIF_FCSFAIL) static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct ath9k_htc_priv *priv = hw->priv; u32 rfilt; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; priv->rxfilter = *total_flags; rfilt = ath9k_htc_calcrxfilter(priv); ath9k_hw_setrxfilter(priv->ah, rfilt); ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static int ath9k_htc_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath9k_htc_priv *priv = hw->priv; int ret; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); ret = ath9k_htc_add_station(priv, vif, sta); if (!ret) ath9k_htc_init_rate(priv, sta); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static int ath9k_htc_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_sta *ista; int ret; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); ista = (struct ath9k_htc_sta *) sta->drv_priv; htc_sta_drain(priv->htc, ista->index); ret = ath9k_htc_remove_station(priv, vif, sta); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *params) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_tx_queue_info qi; int ret = 0, qnum; if (queue >= WME_NUM_AC) return 0; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); qi.tqi_aifs = params->aifs; qi.tqi_cwmin = params->cw_min; qi.tqi_cwmax = params->cw_max; qi.tqi_burstTime = params->txop; qnum = get_hw_qnum(queue, priv->hwq_map); ath_dbg(common, ATH_DBG_CONFIG, "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, qnum, params->aifs, params->cw_min, params->cw_max, params->txop); ret = ath_htc_txq_update(priv, qnum, &qi); if (ret) { ath_err(common, "TXQ Update failed\n"); goto out; } if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) && (qnum == priv->hwq_map[WME_AC_BE])) ath9k_htc_beaconq_config(priv); out: ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static int ath9k_htc_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); int ret = 0; if (htc_modparam_nohwcrypt) return -ENOSPC; mutex_lock(&priv->mutex); ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); ath9k_htc_ps_wakeup(priv); switch (cmd) { case SET_KEY: ret = ath_key_config(common, vif, sta, key); if (ret >= 0) { key->hw_key_idx = ret; /* push IV and Michael MIC generation to stack */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; if (priv->ah->sw_mgmt_crypto && key->cipher == WLAN_CIPHER_SUITE_CCMP) key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; ret = 0; } break; case DISABLE_KEY: ath_key_delete(common, key); break; default: ret = -EINVAL; } ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); ath9k_hw_write_associd(priv->ah); ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n", common->curbssid, common->curaid); } static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; struct ath_common *common = ath9k_hw_common(priv->ah); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) { common->curaid = bss_conf->aid; memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); } } static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv) { if (priv->num_sta_assoc_vif == 1) { ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bss_iter, priv); ath9k_htc_set_bssid(priv); } } static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); if (changed & BSS_CHANGED_ASSOC) { ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", bss_conf->assoc); bss_conf->assoc ? priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--; if (priv->ah->opmode == NL80211_IFTYPE_STATION) { if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1)) ath9k_htc_start_ani(priv); else if (priv->num_sta_assoc_vif == 0) ath9k_htc_stop_ani(priv); } } if (changed & BSS_CHANGED_BSSID) { if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) { common->curaid = bss_conf->aid; memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); ath9k_htc_set_bssid(priv); } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) { ath9k_htc_choose_set_bssid(priv); } } if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) { ath_dbg(common, ATH_DBG_CONFIG, "Beacon enabled for BSS: %pM\n", bss_conf->bssid); ath9k_htc_set_tsfadjust(priv, vif); priv->op_flags |= OP_ENABLE_BEACON; ath9k_htc_beacon_config(priv, vif); } if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) { /* * Disable SWBA interrupt only if there are no * AP/IBSS interfaces. */ if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { ath_dbg(common, ATH_DBG_CONFIG, "Beacon disabled for BSS: %pM\n", bss_conf->bssid); priv->op_flags &= ~OP_ENABLE_BEACON; ath9k_htc_beacon_config(priv, vif); } } if (changed & BSS_CHANGED_BEACON_INT) { /* * Reset the HW TSF for the first AP interface. */ if ((priv->ah->opmode == NL80211_IFTYPE_AP) && (priv->nvifs == 1) && (priv->num_ap_vif == 1) && (vif->type == NL80211_IFTYPE_AP)) { priv->op_flags |= OP_TSF_RESET; } ath_dbg(common, ATH_DBG_CONFIG, "Beacon interval changed for BSS: %pM\n", bss_conf->bssid); ath9k_htc_beacon_config(priv, vif); } if (changed & BSS_CHANGED_ERP_SLOT) { if (bss_conf->use_short_slot) ah->slottime = 9; else ah->slottime = 20; ath9k_hw_init_global_settings(ah); } if (changed & BSS_CHANGED_HT) ath9k_htc_update_rate(priv, vif, bss_conf); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; u64 tsf; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); tsf = ath9k_hw_gettsf64(priv->ah); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return tsf; } static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); ath9k_hw_settsf64(priv->ah, tsf); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); ath9k_hw_reset_tsf(priv->ah); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_sta *ista; int ret = 0; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); switch (action) { case IEEE80211_AMPDU_RX_START: break; case IEEE80211_AMPDU_RX_STOP: break; case IEEE80211_AMPDU_TX_START: ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid); if (!ret) ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP: ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ista = (struct ath9k_htc_sta *) sta->drv_priv; spin_lock_bh(&priv->tx.tx_lock); ista->tid_state[tid] = AGGR_OPERATIONAL; spin_unlock_bh(&priv->tx.tx_lock); break; default: ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n"); } ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; } static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); spin_lock_bh(&priv->beacon_lock); priv->op_flags |= OP_SCANNING; spin_unlock_bh(&priv->beacon_lock); cancel_work_sync(&priv->ps_work); ath9k_htc_stop_ani(priv); mutex_unlock(&priv->mutex); } static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); spin_lock_bh(&priv->beacon_lock); priv->op_flags &= ~OP_SCANNING; spin_unlock_bh(&priv->beacon_lock); ath9k_htc_ps_wakeup(priv); ath9k_htc_vif_reconfig(priv); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { return 0; } static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) { struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); priv->ah->coverage_class = coverage_class; ath9k_hw_init_global_settings(priv->ah); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } /* * Currently, this is used only for selecting the minimum rate * for management frames, rate selection for data frames remain * unaffected. */ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_rate_mask tmask; struct ath9k_htc_vif *avp = (void *)vif->drv_priv; int ret = 0; u8 cmd_rsp; memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask)); tmask.vif_index = avp->index; tmask.band = IEEE80211_BAND_2GHZ; tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { ath_err(common, "Unable to set 2G rate mask for " "interface at idx: %d\n", avp->index); goto out; } tmask.band = IEEE80211_BAND_5GHZ; tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { ath_err(common, "Unable to set 5G rate mask for " "interface at idx: %d\n", avp->index); goto out; } ath_dbg(common, ATH_DBG_CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", mask->control[IEEE80211_BAND_2GHZ].legacy, mask->control[IEEE80211_BAND_5GHZ].legacy); out: return ret; } struct ieee80211_ops ath9k_htc_ops = { .tx = ath9k_htc_tx, .start = ath9k_htc_start, .stop = ath9k_htc_stop, .add_interface = ath9k_htc_add_interface, .remove_interface = ath9k_htc_remove_interface, .config = ath9k_htc_config, .configure_filter = ath9k_htc_configure_filter, .sta_add = ath9k_htc_sta_add, .sta_remove = ath9k_htc_sta_remove, .conf_tx = ath9k_htc_conf_tx, .bss_info_changed = ath9k_htc_bss_info_changed, .set_key = ath9k_htc_set_key, .get_tsf = ath9k_htc_get_tsf, .set_tsf = ath9k_htc_set_tsf, .reset_tsf = ath9k_htc_reset_tsf, .ampdu_action = ath9k_htc_ampdu_action, .sw_scan_start = ath9k_htc_sw_scan_start, .sw_scan_complete = ath9k_htc_sw_scan_complete, .set_rts_threshold = ath9k_htc_set_rts_threshold, .rfkill_poll = ath9k_htc_rfkill_poll_state, .set_coverage_class = ath9k_htc_set_coverage_class, .set_bitrate_mask = ath9k_htc_set_bitrate_mask, };
gpl-2.0
visi0nary/android_kernel_alps_k05ts_a
drivers/leds/leds-lp3944.c
2321
11146
/* * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* * I2C driver for National Semiconductor LP3944 Funlight Chip * http://www.national.com/pf/LP/LP3944.html * * This helper chip can drive up to 8 leds, with two programmable DIM modes; * it could even be used as a gpio expander but this driver assumes it is used * as a led controller. * * The DIM modes are used to set _blink_ patterns for leds, the pattern is * specified supplying two parameters: * - period: from 0s to 1.6s * - duty cycle: percentage of the period the led is on, from 0 to 100 * * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb * leds, the camera flash light and the displays backlights. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ /* These registers are not used to control leds in LP3944, they can store * arbitrary values which the chip will ignore. */ #define LP3944_REG_REGISTER8 0x08 #define LP3944_REG_REGISTER9 0x09 #define LP3944_DIM0 0 #define LP3944_DIM1 1 /* period in ms */ #define LP3944_PERIOD_MIN 0 #define LP3944_PERIOD_MAX 1600 /* duty cycle is a percentage */ #define LP3944_DUTY_CYCLE_MIN 0 #define LP3944_DUTY_CYCLE_MAX 100 #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) /* Saved data */ struct lp3944_led_data { u8 id; enum lp3944_type type; enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; struct work_struct work; }; struct lp3944_data { struct mutex lock; struct i2c_client *client; struct lp3944_led_data leds[LP3944_LEDS_MAX]; }; static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) { int tmp; tmp = i2c_smbus_read_byte_data(client, reg); if (tmp < 0) return tmp; *value = tmp; return 0; } static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /** * Set the period for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @period: period of a blink, that is a on/off cycle, expressed in ms. */ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) { u8 psc_reg; u8 psc_value; int err; if (dim == LP3944_DIM0) psc_reg = LP3944_REG_PSC0; else if (dim == LP3944_DIM1) psc_reg = LP3944_REG_PSC1; else return -EINVAL; /* Convert period to Prescaler value */ if (period > LP3944_PERIOD_MAX) return -EINVAL; psc_value = (period * 255) / LP3944_PERIOD_MAX; err = lp3944_reg_write(client, psc_reg, psc_value); return err; } /** * Set the duty cycle for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @duty_cycle: percentage of a period during which a led is ON */ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, u8 duty_cycle) { u8 pwm_reg; u8 pwm_value; int err; if (dim == LP3944_DIM0) pwm_reg = LP3944_REG_PWM0; else if (dim == LP3944_DIM1) pwm_reg = LP3944_REG_PWM1; else return -EINVAL; /* Convert duty cycle to PWM value */ if (duty_cycle > LP3944_DUTY_CYCLE_MAX) return -EINVAL; pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; err = lp3944_reg_write(client, pwm_reg, pwm_value); return err; } /** * Set the led status * * @led: a lp3944_led_data structure * @status: one of LP3944_LED_STATUS_OFF * LP3944_LED_STATUS_ON * LP3944_LED_STATUS_DIM0 * LP3944_LED_STATUS_DIM1 */ static int lp3944_led_set(struct lp3944_led_data *led, u8 status) { struct lp3944_data *data = i2c_get_clientdata(led->client); u8 id = led->id; u8 reg; u8 val = 0; int err; dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", __func__, led->ldev.name, status); switch (id) { case LP3944_LED0: case LP3944_LED1: case LP3944_LED2: case LP3944_LED3: reg = LP3944_REG_LS0; break; case LP3944_LED4: case LP3944_LED5: case LP3944_LED6: case LP3944_LED7: id -= LP3944_LED4; reg = LP3944_REG_LS1; break; default: return -EINVAL; } if (status > LP3944_LED_STATUS_DIM1) return -EINVAL; /* invert only 0 and 1, leave unchanged the other values, * remember we are abusing status to set blink patterns */ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) status = 1 - status; mutex_lock(&data->lock); lp3944_reg_read(led->client, reg, &val); val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); val |= (status << (id << 1)); dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", __func__, led->ldev.name, reg, id, status, val); /* set led status */ err = lp3944_reg_write(led->client, reg, val); mutex_unlock(&data->lock); return err; } static int lp3944_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lp3944_led_data *led = ldev_to_led(led_cdev); u16 period; u8 duty_cycle; int err; /* units are in ms */ if (*delay_on + *delay_off > LP3944_PERIOD_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { /* Special case: the leds subsystem requires a default user * friendly blink pattern for the LED. Let's blink the led * slowly (1Hz). */ *delay_on = 500; *delay_off = 500; } period = (*delay_on) + (*delay_off); /* duty_cycle is the percentage of period during which the led is ON */ duty_cycle = 100 * (*delay_on) / period; /* invert duty cycle for inverted leds, this has the same effect of * swapping delay_on and delay_off */ if (led->type == LP3944_LED_TYPE_LED_INVERTED) duty_cycle = 100 - duty_cycle; /* NOTE: using always the first DIM mode, this means that all leds * will have the same blinking pattern. * * We could find a way later to have two leds blinking in hardware * with different patterns at the same time, falling back to software * control for the other ones. */ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); if (err) return err; err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); if (err) return err; dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); led->status = LP3944_LED_STATUS_DIM0; schedule_work(&led->work); return 0; } static void lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); led->status = brightness; schedule_work(&led->work); } static void lp3944_led_work(struct work_struct *work) { struct lp3944_led_data *led; led = container_of(work, struct lp3944_led_data, work); lp3944_led_set(led, led->status); } static int lp3944_configure(struct i2c_client *client, struct lp3944_data *data, struct lp3944_platform_data *pdata) { int i, err = 0; for (i = 0; i < pdata->leds_size; i++) { struct lp3944_led *pled = &pdata->leds[i]; struct lp3944_led_data *led = &data->leds[i]; led->client = client; led->id = i; switch (pled->type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; led->ldev.brightness_set = lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", led->ldev.name); goto exit; } /* to expose the default value to userspace */ led->ldev.brightness = led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", led->ldev.name, led->status); goto exit; } break; case LP3944_LED_TYPE_NONE: default: break; } } return 0; exit: if (i > 0) for (i = i - 1; i >= 0; i--) switch (pdata->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return err; } static int lp3944_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } /* Let's see whether this adapter can support what we need. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "insufficient functionality!\n"); return -ENODEV; } data = devm_kzalloc(&client->dev, sizeof(struct lp3944_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); err = lp3944_configure(client, data, lp3944_pdata); if (err < 0) return err; dev_info(&client->dev, "lp3944 enabled\n"); return 0; } static int lp3944_remove(struct i2c_client *client) { struct lp3944_platform_data *pdata = client->dev.platform_data; struct lp3944_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->leds_size; i++) switch (data->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return 0; } /* lp3944 i2c driver struct */ static const struct i2c_device_id lp3944_id[] = { {"lp3944", 0}, {} }; MODULE_DEVICE_TABLE(i2c, lp3944_id); static struct i2c_driver lp3944_driver = { .driver = { .name = "lp3944", }, .probe = lp3944_probe, .remove = lp3944_remove, .id_table = lp3944_id, }; module_i2c_driver(lp3944_driver); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("LP3944 Fun Light Chip"); MODULE_LICENSE("GPL");
gpl-2.0
NoelMacwan/Kernel-Honami-14.1.N.0.52
drivers/staging/iio/Documentation/iio_event_monitor.c
4881
5546
/* Industrialio event test code. * * Copyright (c) 2011-2012 Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is primarily intended as an example application. * Reads the current buffer setup from sysfs and starts a short capture * from the specified device, pretty printing the result after appropriate * conversion. * * Usage: * iio_event_monitor <device_name> * */ #define _GNU_SOURCE #include <unistd.h> #include <stdbool.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <poll.h> #include <fcntl.h> #include <sys/ioctl.h> #include "iio_utils.h" #include "../events.h" static const char * const iio_chan_type_name_spec[] = { [IIO_VOLTAGE] = "voltage", [IIO_CURRENT] = "current", [IIO_POWER] = "power", [IIO_ACCEL] = "accel", [IIO_ANGL_VEL] = "anglvel", [IIO_MAGN] = "magn", [IIO_LIGHT] = "illuminance", [IIO_INTENSITY] = "intensity", [IIO_PROXIMITY] = "proximity", [IIO_TEMP] = "temp", [IIO_INCLI] = "incli", [IIO_ROT] = "rot", [IIO_ANGL] = "angl", [IIO_TIMESTAMP] = "timestamp", [IIO_CAPACITANCE] = "capacitance", }; static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH] = "thresh", [IIO_EV_TYPE_MAG] = "mag", [IIO_EV_TYPE_ROC] = "roc", [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", }; static const char * const iio_ev_dir_text[] = { [IIO_EV_DIR_EITHER] = "either", [IIO_EV_DIR_RISING] = "rising", [IIO_EV_DIR_FALLING] = "falling" }; static const char * const iio_modifier_names[] = { [IIO_MOD_X] = "x", [IIO_MOD_Y] = "y", [IIO_MOD_Z] = "z", [IIO_MOD_LIGHT_BOTH] = "both", [IIO_MOD_LIGHT_IR] = "ir", }; static bool event_is_known(struct iio_event_data *event) { enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); switch (type) { case IIO_VOLTAGE: case IIO_CURRENT: case IIO_POWER: case IIO_ACCEL: case IIO_ANGL_VEL: case IIO_MAGN: case IIO_LIGHT: case IIO_INTENSITY: case IIO_PROXIMITY: case IIO_TEMP: case IIO_INCLI: case IIO_ROT: case IIO_ANGL: case IIO_TIMESTAMP: case IIO_CAPACITANCE: break; default: return false; } switch (mod) { case IIO_NO_MOD: case IIO_MOD_X: case IIO_MOD_Y: case IIO_MOD_Z: case IIO_MOD_LIGHT_BOTH: case IIO_MOD_LIGHT_IR: break; default: return false; } switch (ev_type) { case IIO_EV_TYPE_THRESH: case IIO_EV_TYPE_MAG: case IIO_EV_TYPE_ROC: case IIO_EV_TYPE_THRESH_ADAPTIVE: case IIO_EV_TYPE_MAG_ADAPTIVE: break; default: return false; } switch (dir) { case IIO_EV_DIR_EITHER: case IIO_EV_DIR_RISING: case IIO_EV_DIR_FALLING: break; default: return false; } return true; } static void print_event(struct iio_event_data *event) { enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id); int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id); bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id); if (!event_is_known(event)) { printf("Unknown event: time: %lld, id: %llx\n", event->timestamp, event->id); return; } printf("Event: time: %lld, ", event->timestamp); if (mod != IIO_NO_MOD) { printf("type: %s(%s), ", iio_chan_type_name_spec[type], iio_modifier_names[mod]); } else { printf("type: %s, ", iio_chan_type_name_spec[type]); } if (diff && chan >= 0 && chan2 >= 0) printf("channel: %d-%d, ", chan, chan2); else if (chan >= 0) printf("channel: %d, ", chan); printf("evtype: %s, direction: %s\n", iio_ev_type_text[ev_type], iio_ev_dir_text[dir]); } int main(int argc, char **argv) { struct iio_event_data event; const char *device_name; char *chrdev_name; int ret; int dev_num; int fd, event_fd; if (argc <= 1) { printf("Usage: %s <device_name>\n", argv[0]); return -1; } device_name = argv[1]; dev_num = find_type_by_name(device_name, "iio:device"); if (dev_num >= 0) { printf("Found IIO device with name %s with device number %d\n", device_name, dev_num); ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num); if (ret < 0) { ret = -ENOMEM; goto error_ret; } } else { /* If we can't find a IIO device by name assume device_name is a IIO chrdev */ chrdev_name = strdup(device_name); } fd = open(chrdev_name, 0); if (fd == -1) { fprintf(stdout, "Failed to open %s\n", chrdev_name); ret = -errno; goto error_free_chrdev_name; } ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd); close(fd); if (ret == -1 || event_fd == -1) { fprintf(stdout, "Failed to retrieve event fd\n"); ret = -errno; goto error_free_chrdev_name; } while (true) { ret = read(event_fd, &event, sizeof(event)); if (ret == -1) { if (errno == EAGAIN) { printf("nothing available\n"); continue; } else { perror("Failed to read event from device"); ret = -errno; break; } } print_event(&event); } close(event_fd); error_free_chrdev_name: free(chrdev_name); error_ret: return ret; }
gpl-2.0
Butterfly-CM/android_kernel_htc_dlxub1
drivers/net/ethernet/amd/nmclan_cs.c
5137
45839
/* ---------------------------------------------------------------------------- Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN. nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media Access Controller for Ethernet (MACE). It is essentially the Am2150 PCMCIA Ethernet card contained in the Am2150 Demo Kit. Written by Roger C. Pao <rpao@paonet.org> Copyright 1995 Roger C. Pao Linux 2.5 cleanups Copyright Red Hat 2003 This software may be used and distributed according to the terms of the GNU General Public License. Ported to Linux 1.3.* network driver environment by Matti Aarnio <mea@utu.fi> References Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993 Am79C940 (MACE) Data Sheet, 1994 Am79C90 (C-LANCE) Data Sheet, 1994 Linux PCMCIA Programmer's Guide v1.17 /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8 Eric Mears, New Media Corporation Tom Pollard, New Media Corporation Dean Siasoyco, New Media Corporation Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com> Donald Becker <becker@scyld.com> David Hinds <dahinds@users.sourceforge.net> The Linux client driver is based on the 3c589_cs.c client driver by David Hinds. The Linux network driver outline is based on the 3c589_cs.c driver, the 8390.c driver, and the example skeleton.c kernel code, which are by Donald Becker. The Am2150 network driver hardware interface code is based on the OS/9000 driver for the New Media Ethernet LAN by Eric Mears. Special thanks for testing and help in debugging this driver goes to Ken Lesniak. ------------------------------------------------------------------------------- Driver Notes and Issues ------------------------------------------------------------------------------- 1. Developed on a Dell 320SLi PCMCIA Card Services 2.6.2 Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386 2. rc.pcmcia may require loading pcmcia_core with io_speed=300: 'insmod pcmcia_core.o io_speed=300'. This will avoid problems with fast systems which causes rx_framecnt to return random values. 3. If hot extraction does not work for you, use 'ifconfig eth0 down' before extraction. 4. There is a bad slow-down problem in this driver. 5. Future: Multicast processing. In the meantime, do _not_ compile your kernel with multicast ip enabled. ------------------------------------------------------------------------------- History ------------------------------------------------------------------------------- Log: nmclan_cs.c,v * 2.5.75-ac1 2003/07/11 Alan Cox <alan@lxorguk.ukuu.org.uk> * Fixed hang on card eject as we probe it * Cleaned up to use new style locking. * * Revision 0.16 1995/07/01 06:42:17 rpao * Bug fix: nmclan_reset() called CardServices incorrectly. * * Revision 0.15 1995/05/24 08:09:47 rpao * Re-implement MULTI_TX dev->tbusy handling. * * Revision 0.14 1995/05/23 03:19:30 rpao * Added, in nmclan_config(), "tuple.Attributes = 0;". * Modified MACE ID check to ignore chip revision level. * Avoid tx_free_frames race condition between _start_xmit and _interrupt. * * Revision 0.13 1995/05/18 05:56:34 rpao * Statistics changes. * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list. * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup. * * Revision 0.12 1995/05/14 00:12:23 rpao * Statistics overhaul. * 95/05/13 rpao V0.10a Bug fix: MACE statistics counters used wrong I/O ports. Bug fix: mace_interrupt() needed to allow statistics to be processed without RX or TX interrupts pending. 95/05/11 rpao V0.10 Multiple transmit request processing. Modified statistics to use MACE counters where possible. 95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO. *Released 95/05/10 rpao V0.08 Bug fix: Make all non-exported functions private by using static keyword. Bug fix: Test IntrCnt _before_ reading MACE_IR. 95/05/10 rpao V0.07 Statistics. 95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states. ---------------------------------------------------------------------------- */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "nmclan_cs" #define DRV_VERSION "0.16" /* ---------------------------------------------------------------------------- Conditional Compilation Options ---------------------------------------------------------------------------- */ #define MULTI_TX 0 #define RESET_ON_TIMEOUT 1 #define TX_INTERRUPTABLE 1 #define RESET_XILINX 0 /* ---------------------------------------------------------------------------- Include Files ---------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/bitops.h> #include <pcmcia/cisreg.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <asm/uaccess.h> #include <asm/io.h> /* ---------------------------------------------------------------------------- Defines ---------------------------------------------------------------------------- */ #define MACE_LADRF_LEN 8 /* 8 bytes in Logical Address Filter */ /* Loop Control Defines */ #define MACE_MAX_IR_ITERATIONS 10 #define MACE_MAX_RX_ITERATIONS 12 /* TBD: Dean brought this up, and I assumed the hardware would handle it: If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be non-zero when the isr exits. We may not get another interrupt to process the remaining packets for some time. */ /* The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA) which manages the interface between the MACE and the PCMCIA bus. It also includes buffer management for the 32K x 8 SRAM to control up to four transmit and 12 receive frames at a time. */ #define AM2150_MAX_TX_FRAMES 4 #define AM2150_MAX_RX_FRAMES 12 /* Am2150 Ethernet Card I/O Mapping */ #define AM2150_RCV 0x00 #define AM2150_XMT 0x04 #define AM2150_XMT_SKIP 0x09 #define AM2150_RCV_NEXT 0x0A #define AM2150_RCV_FRAME_COUNT 0x0B #define AM2150_MACE_BANK 0x0C #define AM2150_MACE_BASE 0x10 /* MACE Registers */ #define MACE_RCVFIFO 0 #define MACE_XMTFIFO 1 #define MACE_XMTFC 2 #define MACE_XMTFS 3 #define MACE_XMTRC 4 #define MACE_RCVFC 5 #define MACE_RCVFS 6 #define MACE_FIFOFC 7 #define MACE_IR 8 #define MACE_IMR 9 #define MACE_PR 10 #define MACE_BIUCC 11 #define MACE_FIFOCC 12 #define MACE_MACCC 13 #define MACE_PLSCC 14 #define MACE_PHYCC 15 #define MACE_CHIPIDL 16 #define MACE_CHIPIDH 17 #define MACE_IAC 18 /* Reserved */ #define MACE_LADRF 20 #define MACE_PADR 21 /* Reserved */ /* Reserved */ #define MACE_MPC 24 /* Reserved */ #define MACE_RNTPC 26 #define MACE_RCVCC 27 /* Reserved */ #define MACE_UTR 29 #define MACE_RTR1 30 #define MACE_RTR2 31 /* MACE Bit Masks */ #define MACE_XMTRC_EXDEF 0x80 #define MACE_XMTRC_XMTRC 0x0F #define MACE_XMTFS_XMTSV 0x80 #define MACE_XMTFS_UFLO 0x40 #define MACE_XMTFS_LCOL 0x20 #define MACE_XMTFS_MORE 0x10 #define MACE_XMTFS_ONE 0x08 #define MACE_XMTFS_DEFER 0x04 #define MACE_XMTFS_LCAR 0x02 #define MACE_XMTFS_RTRY 0x01 #define MACE_RCVFS_RCVSTS 0xF000 #define MACE_RCVFS_OFLO 0x8000 #define MACE_RCVFS_CLSN 0x4000 #define MACE_RCVFS_FRAM 0x2000 #define MACE_RCVFS_FCS 0x1000 #define MACE_FIFOFC_RCVFC 0xF0 #define MACE_FIFOFC_XMTFC 0x0F #define MACE_IR_JAB 0x80 #define MACE_IR_BABL 0x40 #define MACE_IR_CERR 0x20 #define MACE_IR_RCVCCO 0x10 #define MACE_IR_RNTPCO 0x08 #define MACE_IR_MPCO 0x04 #define MACE_IR_RCVINT 0x02 #define MACE_IR_XMTINT 0x01 #define MACE_MACCC_PROM 0x80 #define MACE_MACCC_DXMT2PD 0x40 #define MACE_MACCC_EMBA 0x20 #define MACE_MACCC_RESERVED 0x10 #define MACE_MACCC_DRCVPA 0x08 #define MACE_MACCC_DRCVBC 0x04 #define MACE_MACCC_ENXMT 0x02 #define MACE_MACCC_ENRCV 0x01 #define MACE_PHYCC_LNKFL 0x80 #define MACE_PHYCC_DLNKTST 0x40 #define MACE_PHYCC_REVPOL 0x20 #define MACE_PHYCC_DAPC 0x10 #define MACE_PHYCC_LRT 0x08 #define MACE_PHYCC_ASEL 0x04 #define MACE_PHYCC_RWAKE 0x02 #define MACE_PHYCC_AWAKE 0x01 #define MACE_IAC_ADDRCHG 0x80 #define MACE_IAC_PHYADDR 0x04 #define MACE_IAC_LOGADDR 0x02 #define MACE_UTR_RTRE 0x80 #define MACE_UTR_RTRD 0x40 #define MACE_UTR_RPA 0x20 #define MACE_UTR_FCOLL 0x10 #define MACE_UTR_RCVFCSE 0x08 #define MACE_UTR_LOOP_INCL_MENDEC 0x06 #define MACE_UTR_LOOP_NO_MENDEC 0x04 #define MACE_UTR_LOOP_EXTERNAL 0x02 #define MACE_UTR_LOOP_NONE 0x00 #define MACE_UTR_RESERVED 0x01 /* Switch MACE register bank (only 0 and 1 are valid) */ #define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK) #define MACE_IMR_DEFAULT \ (0xFF - \ ( \ MACE_IR_CERR | \ MACE_IR_RCVCCO | \ MACE_IR_RNTPCO | \ MACE_IR_MPCO | \ MACE_IR_RCVINT | \ MACE_IR_XMTINT \ ) \ ) #undef MACE_IMR_DEFAULT #define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */ #define TX_TIMEOUT ((400*HZ)/1000) /* ---------------------------------------------------------------------------- Type Definitions ---------------------------------------------------------------------------- */ typedef struct _mace_statistics { /* MACE_XMTFS */ int xmtsv; int uflo; int lcol; int more; int one; int defer; int lcar; int rtry; /* MACE_XMTRC */ int exdef; int xmtrc; /* RFS1--Receive Status (RCVSTS) */ int oflo; int clsn; int fram; int fcs; /* RFS2--Runt Packet Count (RNTPC) */ int rfs_rntpc; /* RFS3--Receive Collision Count (RCVCC) */ int rfs_rcvcc; /* MACE_IR */ int jab; int babl; int cerr; int rcvcco; int rntpco; int mpco; /* MACE_MPC */ int mpc; /* MACE_RNTPC */ int rntpc; /* MACE_RCVCC */ int rcvcc; } mace_statistics; typedef struct _mace_private { struct pcmcia_device *p_dev; struct net_device_stats linux_stats; /* Linux statistics counters */ mace_statistics mace_stats; /* MACE chip statistics counters */ /* restore_multicast_list() state variables */ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */ int multicast_num_addrs; char tx_free_frames; /* Number of free transmit frame buffers */ char tx_irq_disabled; /* MACE TX interrupt disabled */ spinlock_t bank_lock; /* Must be held if you step off bank 0 */ } mace_private; /* ---------------------------------------------------------------------------- Private Global Variables ---------------------------------------------------------------------------- */ static const char *if_names[]={ "Auto", "10baseT", "BNC", }; /* ---------------------------------------------------------------------------- Parameters These are the parameters that can be set during loading with 'insmod'. ---------------------------------------------------------------------------- */ MODULE_DESCRIPTION("New Media PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) /* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ INT_MODULE_PARM(if_port, 0); /* ---------------------------------------------------------------------------- Function Prototypes ---------------------------------------------------------------------------- */ static int nmclan_config(struct pcmcia_device *link); static void nmclan_release(struct pcmcia_device *link); static void nmclan_reset(struct net_device *dev); static int mace_config(struct net_device *dev, struct ifmap *map); static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); static netdev_tx_t mace_start_xmit(struct sk_buff *skb, struct net_device *dev); static void mace_tx_timeout(struct net_device *dev); static irqreturn_t mace_interrupt(int irq, void *dev_id); static struct net_device_stats *mace_get_stats(struct net_device *dev); static int mace_rx(struct net_device *dev, unsigned char RxCnt); static void restore_multicast_list(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; static void nmclan_detach(struct pcmcia_device *p_dev); static const struct net_device_ops mace_netdev_ops = { .ndo_open = mace_open, .ndo_stop = mace_close, .ndo_start_xmit = mace_start_xmit, .ndo_tx_timeout = mace_tx_timeout, .ndo_set_config = mace_config, .ndo_get_stats = mace_get_stats, .ndo_set_rx_mode = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int nmclan_probe(struct pcmcia_device *link) { mace_private *lp; struct net_device *dev; dev_dbg(&link->dev, "nmclan_attach()\n"); /* Create new ethernet device */ dev = alloc_etherdev(sizeof(mace_private)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->p_dev = link; link->priv = dev; spin_lock_init(&lp->bank_lock); link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; link->config_flags |= CONF_ENABLE_IRQ; link->config_index = 1; link->config_regs = PRESENT_OPTION; lp->tx_free_frames=AM2150_MAX_TX_FRAMES; dev->netdev_ops = &mace_netdev_ops; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); dev->watchdog_timeo = TX_TIMEOUT; return nmclan_config(link); } /* nmclan_attach */ static void nmclan_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "nmclan_detach\n"); unregister_netdev(dev); nmclan_release(link); free_netdev(dev); } /* nmclan_detach */ /* ---------------------------------------------------------------------------- mace_read Reads a MACE register. This is bank independent; however, the caller must ensure that this call is not interruptable. We are assuming that during normal operation, the MACE is always in bank 0. ---------------------------------------------------------------------------- */ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg) { int data = 0xFF; unsigned long flags; switch (reg >> 4) { case 0: /* register 0-15 */ data = inb(ioaddr + AM2150_MACE_BASE + reg); break; case 1: /* register 16-31 */ spin_lock_irqsave(&lp->bank_lock, flags); MACEBANK(1); data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); MACEBANK(0); spin_unlock_irqrestore(&lp->bank_lock, flags); break; } return data & 0xFF; } /* mace_read */ /* ---------------------------------------------------------------------------- mace_write Writes to a MACE register. This is bank independent; however, the caller must ensure that this call is not interruptable. We are assuming that during normal operation, the MACE is always in bank 0. ---------------------------------------------------------------------------- */ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, int data) { unsigned long flags; switch (reg >> 4) { case 0: /* register 0-15 */ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg); break; case 1: /* register 16-31 */ spin_lock_irqsave(&lp->bank_lock, flags); MACEBANK(1); outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); MACEBANK(0); spin_unlock_irqrestore(&lp->bank_lock, flags); break; } } /* mace_write */ /* ---------------------------------------------------------------------------- mace_init Resets the MACE chip. ---------------------------------------------------------------------------- */ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) { int i; int ct = 0; /* MACE Software reset */ mace_write(lp, ioaddr, MACE_BIUCC, 1); while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) { /* Wait for reset bit to be cleared automatically after <= 200ns */; if(++ct > 500) { pr_err("reset failed, card removed?\n"); return -1; } udelay(1); } mace_write(lp, ioaddr, MACE_BIUCC, 0); /* The Am2150 requires that the MACE FIFOs operate in burst mode. */ mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F); mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */ mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */ /* * Bit 2-1 PORTSEL[1-0] Port Select. * 00 AUI/10Base-2 * 01 10Base-T * 10 DAI Port (reserved in Am2150) * 11 GPSI * For this card, only the first two are valid. * So, PLSCC should be set to * 0x00 for 10Base-2 * 0x02 for 10Base-T * Or just set ASEL in PHYCC below! */ switch (if_port) { case 1: mace_write(lp, ioaddr, MACE_PLSCC, 0x02); break; case 2: mace_write(lp, ioaddr, MACE_PLSCC, 0x00); break; default: mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4); /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden, and the MACE device will automatically select the operating media interface port. */ break; } mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR); /* Poll ADDRCHG bit */ ct = 0; while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) { if(++ ct > 500) { pr_err("ADDRCHG timeout, card removed?\n"); return -1; } } /* Set PADR register */ for (i = 0; i < ETH_ALEN; i++) mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); /* MAC Configuration Control Register should be written last */ /* Let set_multicast_list set this. */ /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */ mace_write(lp, ioaddr, MACE_MACCC, 0x00); return 0; } /* mace_init */ static int nmclan_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; mace_private *lp = netdev_priv(dev); u8 *buf; size_t len; int i, ret; unsigned int ioaddr; dev_dbg(&link->dev, "nmclan_config\n"); link->io_lines = 5; ret = pcmcia_request_io(link); if (ret) goto failed; ret = pcmcia_request_exclusive_irq(link, mace_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; ioaddr = dev->base_addr; /* Read the ethernet address from the CIS. */ len = pcmcia_get_tuple(link, 0x80, &buf); if (!buf || len < ETH_ALEN) { kfree(buf); goto failed; } memcpy(dev->dev_addr, buf, ETH_ALEN); kfree(buf); /* Verify configuration by reading the MACE ID. */ { char sig[2]; sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", sig[0], sig[1]); } else { pr_notice("mace id not found: %x %x should be 0x40 0x?9\n", sig[0], sig[1]); return -ENODEV; } } if(mace_init(lp, ioaddr, dev->dev_addr) == -1) goto failed; /* The if_port symbol can be set when the module is loaded */ if (if_port <= 2) dev->if_port = if_port; else pr_notice("invalid if_port requested\n"); SET_NETDEV_DEV(dev, &link->dev); i = register_netdev(dev); if (i != 0) { pr_notice("register_netdev() failed\n"); goto failed; } netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n", dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr); return 0; failed: nmclan_release(link); return -ENODEV; } /* nmclan_config */ static void nmclan_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "nmclan_release\n"); pcmcia_disable_device(link); } static int nmclan_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int nmclan_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { nmclan_reset(dev); netif_device_attach(dev); } return 0; } /* ---------------------------------------------------------------------------- nmclan_reset Reset and restore all of the Xilinx and MACE registers. ---------------------------------------------------------------------------- */ static void nmclan_reset(struct net_device *dev) { mace_private *lp = netdev_priv(dev); #if RESET_XILINX struct pcmcia_device *link = &lp->link; u8 OrigCorValue; /* Save original COR value */ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue); /* Reset Xilinx */ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n", OrigCorValue); pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET); /* Need to wait for 20 ms for PCMCIA to finish reset. */ /* Restore original COR configuration index */ pcmcia_write_config_byte(link, CISREG_COR, (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK))); /* Xilinx is now completely reset along with the MACE chip. */ lp->tx_free_frames=AM2150_MAX_TX_FRAMES; #endif /* #if RESET_XILINX */ /* Xilinx is now completely reset along with the MACE chip. */ lp->tx_free_frames=AM2150_MAX_TX_FRAMES; /* Reinitialize the MACE chip for operation. */ mace_init(lp, dev->base_addr, dev->dev_addr); mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT); /* Restore the multicast list and enable TX and RX. */ restore_multicast_list(dev); } /* nmclan_reset */ /* ---------------------------------------------------------------------------- mace_config [Someone tell me what this is supposed to do? Is if_port a defined standard? If so, there should be defines to indicate 1=10Base-T, 2=10Base-2, etc. including limited automatic detection.] ---------------------------------------------------------------------------- */ static int mace_config(struct net_device *dev, struct ifmap *map) { if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (map->port <= 2) { dev->if_port = map->port; netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); } else return -EINVAL; } return 0; } /* mace_config */ /* ---------------------------------------------------------------------------- mace_open Open device driver. ---------------------------------------------------------------------------- */ static int mace_open(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; mace_private *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; MACEBANK(0); netif_start_queue(dev); nmclan_reset(dev); return 0; /* Always succeed */ } /* mace_open */ /* ---------------------------------------------------------------------------- mace_close Closes device driver. ---------------------------------------------------------------------------- */ static int mace_close(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; mace_private *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); /* Mask off all interrupts from the MACE chip. */ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); link->open--; netif_stop_queue(dev); return 0; } /* mace_close */ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; /* ---------------------------------------------------------------------------- mace_start_xmit This routine begins the packet transmit function. When completed, it will generate a transmit interrupt. According to /usr/src/linux/net/inet/dev.c, if _start_xmit returns 0, the "packet is now solely the responsibility of the driver." If _start_xmit returns non-zero, the "transmission failed, put skb back into a list." ---------------------------------------------------------------------------- */ static void mace_tx_timeout(struct net_device *dev) { mace_private *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; netdev_notice(dev, "transmit timed out -- "); #if RESET_ON_TIMEOUT pr_cont("resetting card\n"); pcmcia_reset_card(link->socket); #else /* #if RESET_ON_TIMEOUT */ pr_cont("NOT resetting card\n"); #endif /* #if RESET_ON_TIMEOUT */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } static netdev_tx_t mace_start_xmit(struct sk_buff *skb, struct net_device *dev) { mace_private *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; netif_stop_queue(dev); pr_debug("%s: mace_start_xmit(length = %ld) called.\n", dev->name, (long)skb->len); #if (!TX_INTERRUPTABLE) /* Disable MACE TX interrupts. */ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT, ioaddr + AM2150_MACE_BASE + MACE_IMR); lp->tx_irq_disabled=1; #endif /* #if (!TX_INTERRUPTABLE) */ { /* This block must not be interrupted by another transmit request! mace_tx_timeout will take care of timer-based retransmissions from the upper layers. The interrupt handler is guaranteed never to service a transmit interrupt while we are in here. */ lp->linux_stats.tx_bytes += skb->len; lp->tx_free_frames--; /* WARNING: Write the _exact_ number of bytes written in the header! */ /* Put out the word header [must be an outw()] . . . */ outw(skb->len, ioaddr + AM2150_XMT); /* . . . and the packet [may be any combination of outw() and outb()] */ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1); if (skb->len & 1) { /* Odd byte transfer */ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT); } #if MULTI_TX if (lp->tx_free_frames > 0) netif_start_queue(dev); #endif /* #if MULTI_TX */ } #if (!TX_INTERRUPTABLE) /* Re-enable MACE TX interrupts. */ lp->tx_irq_disabled=0; outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR); #endif /* #if (!TX_INTERRUPTABLE) */ dev_kfree_skb(skb); return NETDEV_TX_OK; } /* mace_start_xmit */ /* ---------------------------------------------------------------------------- mace_interrupt The interrupt handler. ---------------------------------------------------------------------------- */ static irqreturn_t mace_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; mace_private *lp = netdev_priv(dev); unsigned int ioaddr; int status; int IntrCnt = MACE_MAX_IR_ITERATIONS; if (dev == NULL) { pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", irq); return IRQ_NONE; } ioaddr = dev->base_addr; if (lp->tx_irq_disabled) { const char *msg; if (lp->tx_irq_disabled) msg = "Interrupt with tx_irq_disabled"; else msg = "Re-entering the interrupt handler"; netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n", msg, inb(ioaddr + AM2150_MACE_BASE + MACE_IR), inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)); /* WARNING: MACE_IR has been read! */ return IRQ_NONE; } if (!netif_device_present(dev)) { netdev_dbg(dev, "interrupt from dead card\n"); return IRQ_NONE; } do { /* WARNING: MACE_IR is a READ/CLEAR port! */ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); if (status & MACE_IR_RCVINT) { mace_rx(dev, MACE_MAX_RX_ITERATIONS); } if (status & MACE_IR_XMTINT) { unsigned char fifofc; unsigned char xmtrc; unsigned char xmtfs; fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); if ((fifofc & MACE_FIFOFC_XMTFC)==0) { lp->linux_stats.tx_errors++; outb(0xFF, ioaddr + AM2150_XMT_SKIP); } /* Transmit Retry Count (XMTRC, reg 4) */ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC); if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++; lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC); if ( (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) & MACE_XMTFS_XMTSV /* Transmit Status Valid */ ) { lp->mace_stats.xmtsv++; if (xmtfs & ~MACE_XMTFS_XMTSV) { if (xmtfs & MACE_XMTFS_UFLO) { /* Underflow. Indicates that the Transmit FIFO emptied before the end of frame was reached. */ lp->mace_stats.uflo++; } if (xmtfs & MACE_XMTFS_LCOL) { /* Late Collision */ lp->mace_stats.lcol++; } if (xmtfs & MACE_XMTFS_MORE) { /* MORE than one retry was needed */ lp->mace_stats.more++; } if (xmtfs & MACE_XMTFS_ONE) { /* Exactly ONE retry occurred */ lp->mace_stats.one++; } if (xmtfs & MACE_XMTFS_DEFER) { /* Transmission was defered */ lp->mace_stats.defer++; } if (xmtfs & MACE_XMTFS_LCAR) { /* Loss of carrier */ lp->mace_stats.lcar++; } if (xmtfs & MACE_XMTFS_RTRY) { /* Retry error: transmit aborted after 16 attempts */ lp->mace_stats.rtry++; } } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */ } /* if (xmtfs & MACE_XMTFS_XMTSV) */ lp->linux_stats.tx_packets++; lp->tx_free_frames++; netif_wake_queue(dev); } /* if (status & MACE_IR_XMTINT) */ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) { if (status & MACE_IR_JAB) { /* Jabber Error. Excessive transmit duration (20-150ms). */ lp->mace_stats.jab++; } if (status & MACE_IR_BABL) { /* Babble Error. >1518 bytes transmitted. */ lp->mace_stats.babl++; } if (status & MACE_IR_CERR) { /* Collision Error. CERR indicates the absence of the Signal Quality Error Test message after a packet transmission. */ lp->mace_stats.cerr++; } if (status & MACE_IR_RCVCCO) { /* Receive Collision Count Overflow; */ lp->mace_stats.rcvcco++; } if (status & MACE_IR_RNTPCO) { /* Runt Packet Count Overflow */ lp->mace_stats.rntpco++; } if (status & MACE_IR_MPCO) { /* Missed Packet Count Overflow */ lp->mace_stats.mpco++; } } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt)); return IRQ_HANDLED; } /* mace_interrupt */ /* ---------------------------------------------------------------------------- mace_rx Receives packets. ---------------------------------------------------------------------------- */ static int mace_rx(struct net_device *dev, unsigned char RxCnt) { mace_private *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned char rx_framecnt; unsigned short rx_status; while ( ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) && (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */ (RxCnt--) ) { rx_status = inw(ioaddr + AM2150_RCV); pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" " 0x%X.\n", dev->name, rx_framecnt, rx_status); if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ lp->linux_stats.rx_errors++; if (rx_status & MACE_RCVFS_OFLO) { lp->mace_stats.oflo++; } if (rx_status & MACE_RCVFS_CLSN) { lp->mace_stats.clsn++; } if (rx_status & MACE_RCVFS_FRAM) { lp->mace_stats.fram++; } if (rx_status & MACE_RCVFS_FCS) { lp->mace_stats.fcs++; } } else { short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4; /* Auto Strip is off, always subtract 4 */ struct sk_buff *skb; lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV); /* runt packet count */ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); /* rcv collision count */ pr_debug(" receiving packet size 0x%X rx_status" " 0x%X.\n", pkt_len, rx_status); skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb != NULL) { skb_reserve(skb, 2); insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); if (pkt_len & 1) *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ lp->linux_stats.rx_packets++; lp->linux_stats.rx_bytes += pkt_len; outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ continue; } else { pr_debug("%s: couldn't allocate a sk_buff of size" " %d.\n", dev->name, pkt_len); lp->linux_stats.rx_dropped++; } } outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ } /* while */ return 0; } /* mace_rx */ /* ---------------------------------------------------------------------------- pr_linux_stats ---------------------------------------------------------------------------- */ static void pr_linux_stats(struct net_device_stats *pstats) { pr_debug("pr_linux_stats\n"); pr_debug(" rx_packets=%-7ld tx_packets=%ld\n", (long)pstats->rx_packets, (long)pstats->tx_packets); pr_debug(" rx_errors=%-7ld tx_errors=%ld\n", (long)pstats->rx_errors, (long)pstats->tx_errors); pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n", (long)pstats->rx_dropped, (long)pstats->tx_dropped); pr_debug(" multicast=%-7ld collisions=%ld\n", (long)pstats->multicast, (long)pstats->collisions); pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n", (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n", (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); pr_debug(" tx_window_errors=%ld\n", (long)pstats->tx_window_errors); } /* pr_linux_stats */ /* ---------------------------------------------------------------------------- pr_mace_stats ---------------------------------------------------------------------------- */ static void pr_mace_stats(mace_statistics *pstats) { pr_debug("pr_mace_stats\n"); pr_debug(" xmtsv=%-7d uflo=%d\n", pstats->xmtsv, pstats->uflo); pr_debug(" lcol=%-7d more=%d\n", pstats->lcol, pstats->more); pr_debug(" one=%-7d defer=%d\n", pstats->one, pstats->defer); pr_debug(" lcar=%-7d rtry=%d\n", pstats->lcar, pstats->rtry); /* MACE_XMTRC */ pr_debug(" exdef=%-7d xmtrc=%d\n", pstats->exdef, pstats->xmtrc); /* RFS1--Receive Status (RCVSTS) */ pr_debug(" oflo=%-7d clsn=%d\n", pstats->oflo, pstats->clsn); pr_debug(" fram=%-7d fcs=%d\n", pstats->fram, pstats->fcs); /* RFS2--Runt Packet Count (RNTPC) */ /* RFS3--Receive Collision Count (RCVCC) */ pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n", pstats->rfs_rntpc, pstats->rfs_rcvcc); /* MACE_IR */ pr_debug(" jab=%-7d babl=%d\n", pstats->jab, pstats->babl); pr_debug(" cerr=%-7d rcvcco=%d\n", pstats->cerr, pstats->rcvcco); pr_debug(" rntpco=%-7d mpco=%d\n", pstats->rntpco, pstats->mpco); /* MACE_MPC */ pr_debug(" mpc=%d\n", pstats->mpc); /* MACE_RNTPC */ pr_debug(" rntpc=%d\n", pstats->rntpc); /* MACE_RCVCC */ pr_debug(" rcvcc=%d\n", pstats->rcvcc); } /* pr_mace_stats */ /* ---------------------------------------------------------------------------- update_stats Update statistics. We change to register window 1, so this should be run single-threaded if the device is active. This is expected to be a rare operation, and it's simpler for the rest of the driver to assume that window 0 is always valid rather than use a special window-state variable. oflo & uflo should _never_ occur since it would mean the Xilinx was not able to transfer data between the MACE FIFO and the card's SRAM fast enough. If this happens, something is seriously wrong with the hardware. ---------------------------------------------------------------------------- */ static void update_stats(unsigned int ioaddr, struct net_device *dev) { mace_private *lp = netdev_priv(dev); lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC); lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC); lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC); /* At this point, mace_stats is fully updated for this call. We may now update the linux_stats. */ /* The MACE has no equivalent for linux_stats field which are commented out. */ /* lp->linux_stats.multicast; */ lp->linux_stats.collisions = lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc; /* Collision: The MACE may retry sending a packet 15 times before giving up. The retry count is in XMTRC. Does each retry constitute a collision? If so, why doesn't the RCVCC record these collisions? */ /* detailed rx_errors: */ lp->linux_stats.rx_length_errors = lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc; /* lp->linux_stats.rx_over_errors */ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs; lp->linux_stats.rx_frame_errors = lp->mace_stats.fram; lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo; lp->linux_stats.rx_missed_errors = lp->mace_stats.mpco * 256 + lp->mace_stats.mpc; /* detailed tx_errors */ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry; lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar; /* LCAR usually results from bad cabling. */ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; /* lp->linux_stats.tx_window_errors; */ } /* update_stats */ /* ---------------------------------------------------------------------------- mace_get_stats Gathers ethernet statistics from the MACE chip. ---------------------------------------------------------------------------- */ static struct net_device_stats *mace_get_stats(struct net_device *dev) { mace_private *lp = netdev_priv(dev); update_stats(dev->base_addr, dev); pr_debug("%s: updating the statistics.\n", dev->name); pr_linux_stats(&lp->linux_stats); pr_mace_stats(&lp->mace_stats); return &lp->linux_stats; } /* net_device_stats */ /* ---------------------------------------------------------------------------- updateCRC Modified from Am79C90 data sheet. ---------------------------------------------------------------------------- */ #ifdef BROKEN_MULTICAST static void updateCRC(int *CRC, int bit) { static const int poly[]={ 1,1,1,0, 1,1,0,1, 1,0,1,1, 1,0,0,0, 1,0,0,0, 0,0,1,1, 0,0,1,0, 0,0,0,0 }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the CRC generator polynomial. */ int j; /* shift CRC and control bit (CRC[32]) */ for (j = 32; j > 0; j--) CRC[j] = CRC[j-1]; CRC[0] = 0; /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */ if (bit ^ CRC[32]) for (j = 0; j < 32; j++) CRC[j] ^= poly[j]; } /* updateCRC */ /* ---------------------------------------------------------------------------- BuildLAF Build logical address filter. Modified from Am79C90 data sheet. Input ladrf: logical address filter (contents initialized to 0) adr: ethernet address ---------------------------------------------------------------------------- */ static void BuildLAF(int *ladrf, int *adr) { int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */ int i, byte; /* temporary array indices */ int hashcode; /* the output object */ CRC[32]=0; for (byte = 0; byte < 6; byte++) for (i = 0; i < 8; i++) updateCRC(CRC, (adr[byte] >> i) & 1); hashcode = 0; for (i = 0; i < 6; i++) hashcode = (hashcode << 1) + CRC[i]; byte = hashcode >> 3; ladrf[byte] |= (1 << (hashcode & 7)); #ifdef PCMCIA_DEBUG if (0) printk(KERN_DEBUG " adr =%pM\n", adr); printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); for (i = 0; i < 8; i++) pr_cont(" %02X", ladrf[i]); pr_cont("\n"); #endif } /* BuildLAF */ /* ---------------------------------------------------------------------------- restore_multicast_list Restores the multicast filter for MACE chip to the last set_multicast_list() call. Input multicast_num_addrs multicast_ladrf[] ---------------------------------------------------------------------------- */ static void restore_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); int num_addrs = lp->multicast_num_addrs; int *ladrf = lp->multicast_ladrf; unsigned int ioaddr = dev->base_addr; int i; pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, num_addrs); if (num_addrs > 0) { pr_debug("Attempt to restore multicast list detected.\n"); mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); /* Poll ADDRCHG bit */ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) ; /* Set LADRF register */ for (i = 0; i < MACE_LADRF_LEN; i++) mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]); mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL); mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); } else if (num_addrs < 0) { /* Promiscuous mode: receive all packets */ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV ); } else { /* Normal mode */ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); } } /* restore_multicast_list */ /* ---------------------------------------------------------------------------- set_multicast_list Set or clear the multicast filter for this adaptor. Input num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list num_addrs > 0 Multicast mode, receive normal and MC packets, and do best-effort filtering. Output multicast_num_addrs multicast_ladrf[] ---------------------------------------------------------------------------- */ static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); int adr[ETH_ALEN] = {0}; /* Ethernet address */ struct netdev_hw_addr *ha; #ifdef PCMCIA_DEBUG { static int old; if (netdev_mc_count(dev) != old) { old = netdev_mc_count(dev); pr_debug("%s: setting Rx mode to %d addresses.\n", dev->name, old); } } #endif /* Set multicast_num_addrs. */ lp->multicast_num_addrs = netdev_mc_count(dev); /* Set multicast_ladrf. */ if (num_addrs > 0) { /* Calculate multicast logical address filter */ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); netdev_for_each_mc_addr(ha, dev) { memcpy(adr, ha->addr, ETH_ALEN); BuildLAF(lp->multicast_ladrf, adr); } } restore_multicast_list(dev); } /* set_multicast_list */ #endif /* BROKEN_MULTICAST */ static void restore_multicast_list(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; mace_private *lp = netdev_priv(dev); pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, lp->multicast_num_addrs); if (dev->flags & IFF_PROMISC) { /* Promiscuous mode: receive all packets */ mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV ); } else { /* Normal mode */ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); } } /* restore_multicast_list */ static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); #ifdef PCMCIA_DEBUG { static int old; if (netdev_mc_count(dev) != old) { old = netdev_mc_count(dev); pr_debug("%s: setting Rx mode to %d addresses.\n", dev->name, old); } } #endif lp->multicast_num_addrs = netdev_mc_count(dev); restore_multicast_list(dev); } /* set_multicast_list */ static const struct pcmcia_device_id nmclan_ids[] = { PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, nmclan_ids); static struct pcmcia_driver nmclan_cs_driver = { .owner = THIS_MODULE, .name = "nmclan_cs", .probe = nmclan_probe, .remove = nmclan_detach, .id_table = nmclan_ids, .suspend = nmclan_suspend, .resume = nmclan_resume, }; static int __init init_nmclan_cs(void) { return pcmcia_register_driver(&nmclan_cs_driver); } static void __exit exit_nmclan_cs(void) { pcmcia_unregister_driver(&nmclan_cs_driver); } module_init(init_nmclan_cs); module_exit(exit_nmclan_cs);
gpl-2.0
geggio84/kernel-beaglebone
drivers/net/wireless/rtlwifi/rtl8192de/led.c
9489
4571
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "reg.h" #include "led.h" static void _rtl92ce_init_led(struct ieee80211_hw *hw, struct rtl_led *pled, enum rtl_led_pin ledpin) { pled->hw = hw; pled->ledpin = ledpin; pled->ledon = false; } void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) { u8 ledcfg; struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); if ((rtlpriv->efuse.eeprom_did == 0x8176) || (rtlpriv->efuse.eeprom_did == 0x8193)) /* BIT7 of REG_LEDCFG2 should be set to * make sure we could emit the led2. */ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(7) | BIT(5) | BIT(6)); else rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(7) | BIT(5)); break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = true; } void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg &= 0xf0; if (pcipriv->ledctl.led_opendrain) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(1) | BIT(5) | BIT(6))); else rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3) | BIT(5) | BIT(6))); break; case LED_PIN_LED1: ledcfg &= 0x0f; rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = false; } void rtl92de_init_sw_leds(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); } static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: rtl92de_sw_led_on(hw, pLed0); break; case LED_CTL_POWER_OFF: rtl92de_sw_led_off(hw, pLed0); break; default: break; } } void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && (ledaction == LED_CTL_TX || ledaction == LED_CTL_RX || ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK || ledaction == LED_CTL_NO_LINK || ledaction == LED_CTL_START_TO_LINK || ledaction == LED_CTL_POWER_ON)) { return; } RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction); _rtl92ce_sw_led_control(hw, ledaction); }
gpl-2.0
jamison904/Galaxy_Note_3
drivers/uwb/i1480/i1480-est.c
13073
3167
/* * Intel Wireless UWB Link 1480 * Event Size tables for Wired Adaptors * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/init.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/uwb.h> #include "dfu/i1480-dfu.h" /** Event size table for wEvents 0x00XX */ static struct uwb_est_entry i1480_est_fd00[] = { /* Anybody expecting this response has to use * neh->extra_size to specify the real size that will * come back. */ [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) }, [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) }, #ifdef i1480_RCEB_EXTENDED [0x09] = { .size = sizeof(struct i1480_rceb), .offset = 1 + offsetof(struct i1480_rceb, wParamLength), }, #endif }; /** Event size table for wEvents 0x01XX */ static struct uwb_est_entry i1480_est_fd01[] = { [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) }, [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 }, [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 }, [0xff & i1480_EVT_DEV_ID_CHANGE] = { .size = sizeof(struct i1480_rceb) + 2 }, }; static int __init i1480_est_init(void) { int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); if (result < 0) { printk(KERN_ERR "Can't register EST table fd00: %d\n", result); return result; } result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); if (result < 0) { printk(KERN_ERR "Can't register EST table fd01: %d\n", result); return result; } return 0; } module_init(i1480_est_init); static void __exit i1480_est_exit(void) { uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); } module_exit(i1480_est_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables"); MODULE_LICENSE("GPL"); /** * USB device ID's that we handle * * [so we are loaded when this kind device is connected] */ static struct usb_device_id __used i1480_est_id_table[] = { { USB_DEVICE(0x8086, 0xdf3b), }, { USB_DEVICE(0x8086, 0x0c3b), }, { }, }; MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
gpl-2.0
hoxton22/CriseHearth
src/server/scripts/Commands/cs_ban.cpp
18
29534
/* * Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData Name: ban_commandscript %Complete: 100 Comment: All ban related commands Category: commandscripts EndScriptData */ #include "AccountMgr.h" #include "Chat.h" #include "Language.h" #include "ObjectAccessor.h" #include "ObjectMgr.h" #include "Player.h" #include "ScriptMgr.h" class ban_commandscript : public CommandScript { public: ban_commandscript() : CommandScript("ban_commandscript") { } std::vector<ChatCommand> GetCommands() const override { static std::vector<ChatCommand> unbanCommandTable = { { "account", rbac::RBAC_PERM_COMMAND_UNBAN_ACCOUNT, true, &HandleUnBanAccountCommand, "" }, { "character", rbac::RBAC_PERM_COMMAND_UNBAN_CHARACTER, true, &HandleUnBanCharacterCommand, "" }, { "playeraccount", rbac::RBAC_PERM_COMMAND_UNBAN_PLAYERACCOUNT, true, &HandleUnBanAccountByCharCommand, "" }, { "ip", rbac::RBAC_PERM_COMMAND_UNBAN_IP, true, &HandleUnBanIPCommand, "" }, }; static std::vector<ChatCommand> banlistCommandTable = { { "account", rbac::RBAC_PERM_COMMAND_BANLIST_ACCOUNT, true, &HandleBanListAccountCommand, "" }, { "character", rbac::RBAC_PERM_COMMAND_BANLIST_CHARACTER, true, &HandleBanListCharacterCommand, "" }, { "ip", rbac::RBAC_PERM_COMMAND_BANLIST_IP, true, &HandleBanListIPCommand, "" }, }; static std::vector<ChatCommand> baninfoCommandTable = { { "account", rbac::RBAC_PERM_COMMAND_BANINFO_ACCOUNT, true, &HandleBanInfoAccountCommand, "" }, { "character", rbac::RBAC_PERM_COMMAND_BANINFO_CHARACTER, true, &HandleBanInfoCharacterCommand, "" }, { "ip", rbac::RBAC_PERM_COMMAND_BANINFO_IP, true, &HandleBanInfoIPCommand, "" }, }; static std::vector<ChatCommand> banCommandTable = { { "account", rbac::RBAC_PERM_COMMAND_BAN_ACCOUNT, true, &HandleBanAccountCommand, "" }, { "character", rbac::RBAC_PERM_COMMAND_BAN_CHARACTER, true, &HandleBanCharacterCommand, "" }, { "playeraccount", rbac::RBAC_PERM_COMMAND_BAN_PLAYERACCOUNT, true, &HandleBanAccountByCharCommand, "" }, { "ip", rbac::RBAC_PERM_COMMAND_BAN_IP, true, &HandleBanIPCommand, "" }, }; static std::vector<ChatCommand> commandTable = { { "ban", rbac::RBAC_PERM_COMMAND_BAN, true, NULL, "", banCommandTable }, { "baninfo", rbac::RBAC_PERM_COMMAND_BANINFO, true, NULL, "", baninfoCommandTable }, { "banlist", rbac::RBAC_PERM_COMMAND_BANLIST, true, NULL, "", banlistCommandTable }, { "unban", rbac::RBAC_PERM_COMMAND_UNBAN, true, NULL, "", unbanCommandTable }, }; return commandTable; } static bool HandleBanAccountCommand(ChatHandler* handler, char const* args) { return HandleBanHelper(BAN_ACCOUNT, args, handler); } static bool HandleBanCharacterCommand(ChatHandler* handler, char const* args) { if (!*args) return false; char* nameStr = strtok((char*)args, " "); if (!nameStr) return false; std::string name = nameStr; char* durationStr = strtok(NULL, " "); if (!durationStr || !atoi(durationStr)) return false; char* reasonStr = strtok(NULL, ""); if (!reasonStr) return false; if (!normalizePlayerName(name)) { handler->SendSysMessage(LANG_PLAYER_NOT_FOUND); handler->SetSentErrorMessage(true); return false; } std::string author = handler->GetSession() ? handler->GetSession()->GetPlayerName() : "Server"; switch (sWorld->BanCharacter(name, durationStr, reasonStr, author)) { case BAN_SUCCESS: { if (atoi(durationStr) > 0) { if (sWorld->getBoolConfig(CONFIG_SHOW_BAN_IN_WORLD)) sWorld->SendWorldText(LANG_BAN_CHARACTER_YOUBANNEDMESSAGE_WORLD, author.c_str(), name.c_str(), secsToTimeString(TimeStringToSecs(durationStr), true).c_str(), reasonStr); else handler->PSendSysMessage(LANG_BAN_YOUBANNED, name.c_str(), secsToTimeString(TimeStringToSecs(durationStr), true).c_str(), reasonStr); } else { if (sWorld->getBoolConfig(CONFIG_SHOW_BAN_IN_WORLD)) sWorld->SendWorldText(LANG_BAN_CHARACTER_YOUPERMBANNEDMESSAGE_WORLD, author.c_str(), name.c_str(), reasonStr); else handler->PSendSysMessage(LANG_BAN_YOUPERMBANNED, name.c_str(), reasonStr); } break; } case BAN_NOTFOUND: { handler->PSendSysMessage(LANG_BAN_NOTFOUND, "character", name.c_str()); handler->SetSentErrorMessage(true); return false; } default: break; } return true; } static bool HandleBanAccountByCharCommand(ChatHandler* handler, char const* args) { return HandleBanHelper(BAN_CHARACTER, args, handler); } static bool HandleBanIPCommand(ChatHandler* handler, char const* args) { return HandleBanHelper(BAN_IP, args, handler); } static bool HandleBanHelper(BanMode mode, char const* args, ChatHandler* handler) { if (!*args) return false; char* cnameOrIP = strtok((char*)args, " "); if (!cnameOrIP) return false; std::string nameOrIP = cnameOrIP; char* durationStr = strtok(NULL, " "); if (!durationStr || !atoi(durationStr)) return false; char* reasonStr = strtok(NULL, ""); if (!reasonStr) return false; switch (mode) { case BAN_ACCOUNT: if (!Utf8ToUpperOnlyLatin(nameOrIP)) { handler->PSendSysMessage(LANG_ACCOUNT_NOT_EXIST, nameOrIP.c_str()); handler->SetSentErrorMessage(true); return false; } break; case BAN_CHARACTER: if (!normalizePlayerName(nameOrIP)) { handler->SendSysMessage(LANG_PLAYER_NOT_FOUND); handler->SetSentErrorMessage(true); return false; } break; case BAN_IP: if (!IsIPAddress(nameOrIP.c_str())) return false; break; } std::string author = handler->GetSession() ? handler->GetSession()->GetPlayerName() : "Server"; switch (sWorld->BanAccount(mode, nameOrIP, durationStr, reasonStr, author)) { case BAN_SUCCESS: if (atoi(durationStr) > 0) { if (sWorld->getBoolConfig(CONFIG_SHOW_BAN_IN_WORLD)) sWorld->SendWorldText(LANG_BAN_ACCOUNT_YOUBANNEDMESSAGE_WORLD, author.c_str(), nameOrIP.c_str(), secsToTimeString(TimeStringToSecs(durationStr), true).c_str(), reasonStr); else handler->PSendSysMessage(LANG_BAN_YOUBANNED, nameOrIP.c_str(), secsToTimeString(TimeStringToSecs(durationStr), true).c_str(), reasonStr); } else { if (sWorld->getBoolConfig(CONFIG_SHOW_BAN_IN_WORLD)) sWorld->SendWorldText(LANG_BAN_ACCOUNT_YOUPERMBANNEDMESSAGE_WORLD, author.c_str(), nameOrIP.c_str(), reasonStr); else handler->PSendSysMessage(LANG_BAN_YOUPERMBANNED, nameOrIP.c_str(), reasonStr); } break; case BAN_SYNTAX_ERROR: return false; case BAN_NOTFOUND: switch (mode) { default: handler->PSendSysMessage(LANG_BAN_NOTFOUND, "account", nameOrIP.c_str()); break; case BAN_CHARACTER: handler->PSendSysMessage(LANG_BAN_NOTFOUND, "character", nameOrIP.c_str()); break; case BAN_IP: handler->PSendSysMessage(LANG_BAN_NOTFOUND, "ip", nameOrIP.c_str()); break; } handler->SetSentErrorMessage(true); return false; } return true; } static bool HandleBanInfoAccountCommand(ChatHandler* handler, char const* args) { if (!*args) return false; char* nameStr = strtok((char*)args, ""); if (!nameStr) return false; std::string accountName = nameStr; if (!Utf8ToUpperOnlyLatin(accountName)) { handler->PSendSysMessage(LANG_ACCOUNT_NOT_EXIST, accountName.c_str()); handler->SetSentErrorMessage(true); return false; } uint32 accountId = AccountMgr::GetId(accountName); if (!accountId) { handler->PSendSysMessage(LANG_ACCOUNT_NOT_EXIST, accountName.c_str()); return true; } return HandleBanInfoHelper(accountId, accountName.c_str(), handler); } static bool HandleBanInfoHelper(uint32 accountId, char const* accountName, ChatHandler* handler) { QueryResult result = LoginDatabase.PQuery("SELECT FROM_UNIXTIME(bandate), unbandate-bandate, active, unbandate, banreason, bannedby FROM account_banned WHERE id = '%u' ORDER BY bandate ASC", accountId); if (!result) { handler->PSendSysMessage(LANG_BANINFO_NOACCOUNTBAN, accountName); return true; } handler->PSendSysMessage(LANG_BANINFO_BANHISTORY, accountName); do { Field* fields = result->Fetch(); time_t unbanDate = time_t(fields[3].GetUInt32()); bool active = false; if (fields[2].GetBool() && (fields[1].GetUInt64() == uint64(0) || unbanDate >= time(NULL))) active = true; bool permanent = (fields[1].GetUInt64() == uint64(0)); std::string banTime = permanent ? handler->GetTrinityString(LANG_BANINFO_INFINITE) : secsToTimeString(fields[1].GetUInt64(), true); handler->PSendSysMessage(LANG_BANINFO_HISTORYENTRY, fields[0].GetCString(), banTime.c_str(), active ? handler->GetTrinityString(LANG_YES) : handler->GetTrinityString(LANG_NO), fields[4].GetCString(), fields[5].GetCString()); } while (result->NextRow()); return true; } static bool HandleBanInfoCharacterCommand(ChatHandler* handler, char const* args) { if (!*args) return false; Player* target = ObjectAccessor::FindPlayerByName(args); ObjectGuid targetGuid; std::string name(args); if (!target) { targetGuid = ObjectMgr::GetPlayerGUIDByName(name); if (targetGuid.IsEmpty()) { handler->PSendSysMessage(LANG_BANINFO_NOCHARACTER); return false; } } else targetGuid = target->GetGUID(); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_BANINFO); stmt->setUInt64(0, targetGuid.GetCounter()); PreparedQueryResult result = CharacterDatabase.Query(stmt); if (!result) { handler->PSendSysMessage(LANG_CHAR_NOT_BANNED, name.c_str()); return true; } handler->PSendSysMessage(LANG_BANINFO_BANHISTORY, name.c_str()); do { Field* fields = result->Fetch(); time_t unbanDate = time_t(fields[3].GetUInt32()); bool active = false; if (fields[2].GetUInt8() && (!fields[1].GetUInt32() || unbanDate >= time(NULL))) active = true; bool permanent = (fields[1].GetUInt32() == uint32(0)); std::string banTime = permanent ? handler->GetTrinityString(LANG_BANINFO_INFINITE) : secsToTimeString(fields[1].GetUInt32(), true); handler->PSendSysMessage(LANG_BANINFO_HISTORYENTRY, TimeToTimestampStr(fields[0].GetUInt32()).c_str(), banTime.c_str(), active ? handler->GetTrinityString(LANG_YES) : handler->GetTrinityString(LANG_NO), fields[4].GetCString(), fields[5].GetCString()); } while (result->NextRow()); return true; } static bool HandleBanInfoIPCommand(ChatHandler* handler, char const* args) { if (!*args) return false; char* ipStr = strtok((char*)args, ""); if (!ipStr) return false; if (!IsIPAddress(ipStr)) return false; std::string IP = ipStr; LoginDatabase.EscapeString(IP); QueryResult result = LoginDatabase.PQuery("SELECT ip, FROM_UNIXTIME(bandate), FROM_UNIXTIME(unbandate), unbandate-UNIX_TIMESTAMP(), banreason, bannedby, unbandate-bandate FROM ip_banned WHERE ip = '%s'", IP.c_str()); if (!result) { handler->PSendSysMessage(LANG_BANINFO_NOIP); return true; } Field* fields = result->Fetch(); bool permanent = !fields[6].GetUInt64(); handler->PSendSysMessage(LANG_BANINFO_IPENTRY, fields[0].GetCString(), fields[1].GetCString(), permanent ? handler->GetTrinityString(LANG_BANINFO_NEVER) : fields[2].GetCString(), permanent ? handler->GetTrinityString(LANG_BANINFO_INFINITE) : secsToTimeString(fields[3].GetUInt64(), true).c_str(), fields[4].GetCString(), fields[5].GetCString()); return true; } static bool HandleBanListAccountCommand(ChatHandler* handler, char const* args) { PreparedStatement* stmt = NULL; stmt = LoginDatabase.GetPreparedStatement(LOGIN_DEL_EXPIRED_IP_BANS); LoginDatabase.Execute(stmt); char* filterStr = strtok((char*)args, " "); std::string filter = filterStr ? filterStr : ""; PreparedQueryResult result; if (filter.empty()) { stmt = LoginDatabase.GetPreparedStatement(LOGIN_SEL_ACCOUNT_BANNED_ALL); result = LoginDatabase.Query(stmt); } else { stmt = LoginDatabase.GetPreparedStatement(LOGIN_SEL_ACCOUNT_BANNED_BY_USERNAME); stmt->setString(0, filter); result = LoginDatabase.Query(stmt); } if (!result) { handler->PSendSysMessage(LANG_BANLIST_NOACCOUNT); return true; } return HandleBanListHelper(result, handler); } static bool HandleBanListHelper(PreparedQueryResult result, ChatHandler* handler) { handler->PSendSysMessage(LANG_BANLIST_MATCHINGACCOUNT); // Chat short output if (handler->GetSession()) { do { Field* fields = result->Fetch(); uint32 accountid = fields[0].GetUInt32(); QueryResult banResult = LoginDatabase.PQuery("SELECT account.username FROM account, account_banned WHERE account_banned.id='%u' AND account_banned.id=account.id", accountid); if (banResult) { Field* fields2 = banResult->Fetch(); handler->PSendSysMessage("%s", fields2[0].GetCString()); } } while (result->NextRow()); } // Console wide output else { handler->SendSysMessage(LANG_BANLIST_ACCOUNTS); handler->SendSysMessage(" ==============================================================================="); handler->SendSysMessage(LANG_BANLIST_ACCOUNTS_HEADER); do { handler->SendSysMessage("-------------------------------------------------------------------------------"); Field* fields = result->Fetch(); uint32 accountId = fields[0].GetUInt32(); std::string accountName; // "account" case, name can be get in same query if (result->GetFieldCount() > 1) accountName = fields[1].GetString(); // "character" case, name need extract from another DB else AccountMgr::GetName(accountId, accountName); // No SQL injection. id is uint32. QueryResult banInfo = LoginDatabase.PQuery("SELECT bandate, unbandate, bannedby, banreason FROM account_banned WHERE id = %u ORDER BY unbandate", accountId); if (banInfo) { Field* fields2 = banInfo->Fetch(); do { time_t timeBan = time_t(fields2[0].GetUInt32()); tm tmBan; localtime_r(&timeBan, &tmBan); if (fields2[0].GetUInt32() == fields2[1].GetUInt32()) { handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d| permanent |%-15.15s|%-15.15s|", accountName.c_str(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, fields2[2].GetCString(), fields2[3].GetCString()); } else { time_t timeUnban = time_t(fields2[1].GetUInt32()); tm tmUnban; localtime_r(&timeUnban, &tmUnban); handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d|%02d-%02d-%02d %02d:%02d|%-15.15s|%-15.15s|", accountName.c_str(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, tmUnban.tm_year%100, tmUnban.tm_mon+1, tmUnban.tm_mday, tmUnban.tm_hour, tmUnban.tm_min, fields2[2].GetCString(), fields2[3].GetCString()); } } while (banInfo->NextRow()); } } while (result->NextRow()); handler->SendSysMessage(" ==============================================================================="); } return true; } static bool HandleBanListCharacterCommand(ChatHandler* handler, char const* args) { if (!*args) return false; char* filterStr = strtok((char*)args, " "); if (!filterStr) return false; std::string filter(filterStr); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_GUID_BY_NAME_FILTER); stmt->setString(0, filter); PreparedQueryResult result = CharacterDatabase.Query(stmt); if (!result) { handler->PSendSysMessage(LANG_BANLIST_NOCHARACTER); return true; } handler->PSendSysMessage(LANG_BANLIST_MATCHINGCHARACTER); // Chat short output if (handler->GetSession()) { do { Field* fields = result->Fetch(); PreparedStatement* stmt2 = CharacterDatabase.GetPreparedStatement(CHAR_SEL_BANNED_NAME); stmt2->setUInt64(0, fields[0].GetUInt64()); PreparedQueryResult banResult = CharacterDatabase.Query(stmt2); if (banResult) handler->PSendSysMessage("%s", (*banResult)[0].GetCString()); } while (result->NextRow()); } // Console wide output else { handler->SendSysMessage(LANG_BANLIST_CHARACTERS); handler->SendSysMessage(" =============================================================================== "); handler->SendSysMessage(LANG_BANLIST_CHARACTERS_HEADER); do { handler->SendSysMessage("-------------------------------------------------------------------------------"); Field* fields = result->Fetch(); std::string char_name = fields[1].GetString(); PreparedStatement* stmt2 = CharacterDatabase.GetPreparedStatement(CHAR_SEL_BANINFO_LIST); stmt2->setUInt64(0, fields[0].GetUInt64()); PreparedQueryResult banInfo = CharacterDatabase.Query(stmt2); if (banInfo) { Field* banFields = banInfo->Fetch(); do { time_t timeBan = time_t(banFields[0].GetUInt32()); tm tmBan; localtime_r(&timeBan, &tmBan); if (banFields[0].GetUInt32() == banFields[1].GetUInt32()) { handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d| permanent |%-15.15s|%-15.15s|", char_name.c_str(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, banFields[2].GetCString(), banFields[3].GetCString()); } else { time_t timeUnban = time_t(banFields[1].GetUInt32()); tm tmUnban; localtime_r(&timeUnban, &tmUnban); handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d|%02d-%02d-%02d %02d:%02d|%-15.15s|%-15.15s|", char_name.c_str(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, tmUnban.tm_year%100, tmUnban.tm_mon+1, tmUnban.tm_mday, tmUnban.tm_hour, tmUnban.tm_min, banFields[2].GetCString(), banFields[3].GetCString()); } } while (banInfo->NextRow()); } } while (result->NextRow()); handler->SendSysMessage(" =============================================================================== "); } return true; } static bool HandleBanListIPCommand(ChatHandler* handler, char const* args) { PreparedStatement* stmt = NULL; stmt = LoginDatabase.GetPreparedStatement(LOGIN_DEL_EXPIRED_IP_BANS); LoginDatabase.Execute(stmt); char* filterStr = strtok((char*)args, " "); std::string filter = filterStr ? filterStr : ""; LoginDatabase.EscapeString(filter); PreparedQueryResult result; if (filter.empty()) { stmt = LoginDatabase.GetPreparedStatement(LOGIN_SEL_IP_BANNED_ALL); result = LoginDatabase.Query(stmt); } else { stmt = LoginDatabase.GetPreparedStatement(LOGIN_SEL_IP_BANNED_BY_IP); stmt->setString(0, filter); result = LoginDatabase.Query(stmt); } if (!result) { handler->PSendSysMessage(LANG_BANLIST_NOIP); return true; } handler->PSendSysMessage(LANG_BANLIST_MATCHINGIP); // Chat short output if (handler->GetSession()) { do { Field* fields = result->Fetch(); handler->PSendSysMessage("%s", fields[0].GetCString()); } while (result->NextRow()); } // Console wide output else { handler->SendSysMessage(LANG_BANLIST_IPS); handler->SendSysMessage(" ==============================================================================="); handler->SendSysMessage(LANG_BANLIST_IPS_HEADER); do { handler->SendSysMessage("-------------------------------------------------------------------------------"); Field* fields = result->Fetch(); time_t timeBan = time_t(fields[1].GetUInt32()); tm tmBan; localtime_r(&timeBan, &tmBan); if (fields[1].GetUInt32() == fields[2].GetUInt32()) { handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d| permanent |%-15.15s|%-15.15s|", fields[0].GetCString(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, fields[3].GetCString(), fields[4].GetCString()); } else { time_t timeUnban = time_t(fields[2].GetUInt32()); tm tmUnban; localtime_r(&timeUnban, &tmUnban); handler->PSendSysMessage("|%-15.15s|%02d-%02d-%02d %02d:%02d|%02d-%02d-%02d %02d:%02d|%-15.15s|%-15.15s|", fields[0].GetCString(), tmBan.tm_year%100, tmBan.tm_mon+1, tmBan.tm_mday, tmBan.tm_hour, tmBan.tm_min, tmUnban.tm_year%100, tmUnban.tm_mon+1, tmUnban.tm_mday, tmUnban.tm_hour, tmUnban.tm_min, fields[3].GetCString(), fields[4].GetCString()); } } while (result->NextRow()); handler->SendSysMessage(" ==============================================================================="); } return true; } static bool HandleUnBanAccountCommand(ChatHandler* handler, char const* args) { return HandleUnBanHelper(BAN_ACCOUNT, args, handler); } static bool HandleUnBanCharacterCommand(ChatHandler* handler, char const* args) { if (!*args) return false; char* nameStr = strtok((char*)args, " "); if (!nameStr) return false; std::string name = nameStr; if (!normalizePlayerName(name)) { handler->SendSysMessage(LANG_PLAYER_NOT_FOUND); handler->SetSentErrorMessage(true); return false; } if (!sWorld->RemoveBanCharacter(name)) { handler->SendSysMessage(LANG_PLAYER_NOT_FOUND); handler->SetSentErrorMessage(true); return false; } return true; } static bool HandleUnBanAccountByCharCommand(ChatHandler* handler, char const* args) { return HandleUnBanHelper(BAN_CHARACTER, args, handler); } static bool HandleUnBanIPCommand(ChatHandler* handler, char const* args) { return HandleUnBanHelper(BAN_IP, args, handler); } static bool HandleUnBanHelper(BanMode mode, char const* args, ChatHandler* handler) { if (!*args) return false; char* nameOrIPStr = strtok((char*)args, " "); if (!nameOrIPStr) return false; std::string nameOrIP = nameOrIPStr; switch (mode) { case BAN_ACCOUNT: if (!Utf8ToUpperOnlyLatin(nameOrIP)) { handler->PSendSysMessage(LANG_ACCOUNT_NOT_EXIST, nameOrIP.c_str()); handler->SetSentErrorMessage(true); return false; } break; case BAN_CHARACTER: if (!normalizePlayerName(nameOrIP)) { handler->SendSysMessage(LANG_PLAYER_NOT_FOUND); handler->SetSentErrorMessage(true); return false; } break; case BAN_IP: if (!IsIPAddress(nameOrIP.c_str())) return false; break; } if (sWorld->RemoveBanAccount(mode, nameOrIP)) handler->PSendSysMessage(LANG_UNBAN_UNBANNED, nameOrIP.c_str()); else handler->PSendSysMessage(LANG_UNBAN_ERROR, nameOrIP.c_str()); return true; } }; void AddSC_ban_commandscript() { new ban_commandscript(); }
gpl-2.0
eliastor/rt-thread
bsp/lm4f232/Libraries/driverlib/i2s.c
18
40475
//***************************************************************************** // // i2s.c - Driver for the I2S controller. // // Copyright (c) 2008-2011 Texas Instruments Incorporated. All rights reserved. // Software License Agreement // // Texas Instruments (TI) is supplying this software for use solely and // exclusively on TI's microcontroller products. The software is owned by // TI and/or its suppliers, and is protected under applicable copyright // laws. You may not combine this software with "viral" open-source // software in order to form a larger program. // // THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS. // NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT // NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY // CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL // DAMAGES, FOR ANY REASON WHATSOEVER. // // This is part of revision 8264 of the Stellaris Peripheral Driver Library. // //***************************************************************************** //***************************************************************************** // //! \addtogroup i2s_api //! @{ // //***************************************************************************** #include "inc/hw_i2s.h" #include "inc/hw_ints.h" #include "inc/hw_memmap.h" #include "inc/hw_types.h" #include "driverlib/debug.h" #include "driverlib/i2s.h" #include "driverlib/interrupt.h" //***************************************************************************** // //! Enables the I2S transmit module for operation. //! //! \param ulBase is the I2S module base address. //! //! This function enables the transmit module for operation. The module //! should be enabled after configuration. When the module is disabled, //! no data or clocks are generated on the I2S signals. //! //! \return None. // //***************************************************************************** void I2STxEnable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Enable the tx FIFO service request. // HWREG(ulBase + I2S_O_TXISM) = I2S_TXISM_FFM; // // Read-modify-write the enable bit. // HWREG(ulBase + I2S_O_CFG) |= I2S_CFG_TXEN; } //***************************************************************************** // //! Disables the I2S transmit module for operation. //! //! \param ulBase is the I2S module base address. //! //! This function disables the transmit module for operation. The module //! should be disabled before configuration. When the module is disabled, //! no data or clocks are generated on the I2S signals. //! //! \return None. // //***************************************************************************** void I2STxDisable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read-modify-write the enable bit. // HWREG(ulBase + I2S_O_CFG) &= ~I2S_CFG_TXEN; } //***************************************************************************** // //! Writes data samples to the I2S transmit FIFO with blocking. //! //! \param ulBase is the I2S module base address. //! \param ulData is the single- or dual-channel I2S data. //! //! This function writes a single-channel sample or combined left-right //! samples to the I2S transmit FIFO. The format of the sample is determined //! by the configuration that was used with the function I2STxConfigSet(). //! If the transmit mode is \b I2S_MODE_DUAL_STEREO then the \e ulData //! parameter contains either the left or right sample. The left and right //! sample alternate with each write to the FIFO, left sample first. If the //! transmit mode is \b I2S_MODE_COMPACT_STEREO_16 or //! \b I2S_MODE_COMPACT_STEREO_8, then the \e ulData parameter contains both //! the left and right samples. If the transmit mode is //! \b I2S_MODE_SINGLE_MONO then the \e ulData parameter contains the single //! channel sample. //! //! For the compact modes, both the left and right samples are written at //! the same time. If 16-bit compact mode is used, then the least significant //! 16 bits contain the left sample, and the most significant 16 bits contain //! the right sample. If 8-bit compact mode is used, then the lower 8 bits //! contain the left sample, and the next 8 bits contain the right sample, //! with the upper 16 bits unused. //! //! If there is no room in the transmit FIFO, then this function waits //! in a polling loop until the data can be written. //! //! \return None. // //***************************************************************************** void I2STxDataPut(unsigned long ulBase, unsigned long ulData) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Wait until there is space. // while(HWREG(ulBase + I2S_O_TXLEV) >= 16) { } // // Write the data to the I2S. // HWREG(ulBase + I2S_O_TXFIFO) = ulData; } //***************************************************************************** // //! Writes data samples to the I2S transmit FIFO without blocking. //! //! \param ulBase is the I2S module base address. //! \param ulData is the single- or dual-channel I2S data. //! //! This function writes a single-channel sample or combined left-right //! samples to the I2S transmit FIFO. The format of the sample is determined //! by the configuration that was used with the function I2STxConfigSet(). //! If the transmit mode is \b I2S_MODE_DUAL_STEREO then the \e ulData //! parameter contains either the left or right sample. The left and right //! sample alternate with each write to the FIFO, left sample first. If the //! transmit mode is \b I2S_MODE_COMPACT_STEREO_16 or //! \b I2S_MODE_COMPACT_STEREO_8, then the \e ulData parameter contains both //! the left and right samples. If the transmit mode is //! \b I2S_MODE_SINGLE_MONO then the \e ulData parameter contains the single- //! channel sample. //! //! For the compact modes, both the left and right samples are written at //! the same time. If 16-bit compact mode is used, then the least significant //! 16 bits contain the left sample, and the most significant 16 bits contain //! the right sample. If 8-bit compact mode is used, then the lower 8 bits //! contain the left sample, and the next 8 bits contain the right sample, //! with the upper 16 bits unused. //! //! If there is no room in the transmit FIFO, then this function returns //! immediately without writing any data to the FIFO. //! //! \return The number of elements written to the I2S transmit FIFO (1 or 0). // //***************************************************************************** long I2STxDataPutNonBlocking(unsigned long ulBase, unsigned long ulData) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Check for space to write. // if(HWREG(ulBase + I2S_O_TXLEV) < 16) { HWREG(ulBase + I2S_O_TXFIFO) = ulData; return(1); } else { return(0); } } //***************************************************************************** // //! Configures the I2S transmit module. //! //! \param ulBase is the I2S module base address. //! \param ulConfig is the logical OR of the configuration options. //! //! This function is used to configure the options for the I2S transmit //! channel. The parameter \e ulConfig is the logical OR of the following //! options: //! //! - \b I2S_CONFIG_FORMAT_I2S for standard I2S format, //! \b I2S_CONFIG_FORMAT_LEFT_JUST for left justified format, or //! \b I2S_CONFIG_FORMAT_RIGHT_JUST for right justified format. //! - \b I2S_CONFIG_SCLK_INVERT to invert the polarity of the serial bit clock. //! - \b I2S_CONFIG_MODE_DUAL for dual channel stereo, //! \b I2S_CONFIG_MODE_COMPACT_16 for 16-bit compact stereo mode, //! \b I2S_CONFIG_MODE_COMPACT_8 for 8-bit compact stereo mode, or //! \b I2S_CONFIG_MODE_MONO for single channel mono format. //! - \b I2S_CONFIG_CLK_MASTER or \b I2S_CONFIG_CLK_SLAVE to select whether //! the I2S transmitter is the clock master or slave. //! - \b I2S_CONFIG_SAMPLE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per sample. //! - \b I2S_CONFIG_WIRE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per word that are transferred on the data //! line. //! - \b I2S_CONFIG_EMPTY_ZERO or \b I2S_CONFIG_EMPTY_REPEAT to select whether //! the module transmits zeroes or repeats the last sample when the FIFO is //! empty. //! //! \return None. // //***************************************************************************** void I2STxConfigSet(unsigned long ulBase, unsigned long ulConfig) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulConfig & (I2S_CONFIG_FORMAT_MASK | I2S_CONFIG_MODE_MASK | I2S_CONFIG_EMPTY_MASK | I2S_CONFIG_CLK_MASK | I2S_CONFIG_SAMPLE_SIZE_MASK | I2S_CONFIG_WIRE_SIZE_MASK)) == ulConfig); // // Check to see if a compact mode is used. // if((ulConfig & I2S_CONFIG_MODE_MASK) == I2S_CONFIG_MODE_COMPACT_8) { // // If compact 8 mode is used, then need to adjust some bits // before writing the config register. Also set the FIFO // config register for 8-bit compact samples. // ulConfig &= ~I2S_CONFIG_MODE_MONO; HWREG(ulBase + I2S_O_TXFIFOCFG) = I2S_TXFIFOCFG_CSS; } else { // // If compact 8 mode is not used, then set the FIFO config // register for 16 bit. This setting is okay if a compact // mode is not used. // HWREG(ulBase + I2S_O_TXFIFOCFG) = 0; } // // Write the configuration register. Because all the fields are // specified by the configuration parameter, it is not necessary // to do a read-modify-write. // HWREG(ulBase + I2S_O_TXCFG) = ulConfig; } //***************************************************************************** // //! Sets the FIFO level at which a service request is generated. //! //! \param ulBase is the I2S module base address. //! \param ulLevel is the FIFO service request limit. //! //! This function is used to set the transmit FIFO fullness level at which a //! service request occurs. The service request is used to generate an //! interrupt or a DMA transfer request. The transmit FIFO generates a //! service request when the number of items in the FIFO is less than the level //! specified in the \e ulLevel parameter. For example, if \e ulLevel is 8, //! then a service request is generated when there are less than 8 samples //! remaining in the transmit FIFO. //! //! For the purposes of counting the FIFO level, a left-right sample pair //! counts as 2, whether the mode is dual or compact stereo. When mono mode is //! used, internally the mono sample is still treated as a sample pair, so a //! single mono sample counts as 2. Because the FIFO always deals with sample //! pairs, the level must be an even number from 0 to 16. The maximum value is //! 16, which causes a service request when there is any room in the FIFO. //! The minimum value is 0, which disables the service request. //! //! \return None. // //***************************************************************************** void I2STxFIFOLimitSet(unsigned long ulBase, unsigned long ulLevel) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT(ulLevel <= 16); // // Write the FIFO limit // HWREG(ulBase + I2S_O_TXLIMIT) = ulLevel; } //***************************************************************************** // //! Gets the current setting of the FIFO service request level. //! //! \param ulBase is the I2S module base address. //! //! This function is used to get the value of the transmit FIFO service //! request level. This value is set using the I2STxFIFOLimitSet() //! function. //! //! \return Returns the current value of the FIFO service request limit. // //***************************************************************************** unsigned long I2STxFIFOLimitGet(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read and return the FIFO limit // return(HWREG(ulBase + I2S_O_TXLIMIT)); } //***************************************************************************** // //! Gets the number of samples in the transmit FIFO. //! //! \param ulBase is the I2S module base address. //! //! This function is used to get the number of samples in the transmit FIFO. //! For the purposes of measuring the FIFO level, a left-right sample pair //! counts as 2, whether the mode is dual or compact stereo. When mono mode is //! used, internally the mono sample is still treated as a sample pair, so a //! single mono sample counts as 2. Because the FIFO always deals with sample //! pairs, normally the level is an even number from 0 to 16. If dual stereo //! mode is used and only the left sample has been written without the matching //! right sample, then the FIFO level is an odd value. If the FIFO level is //! odd, it indicates a left-right sample mismatch. //! //! \return Returns the number of samples in the transmit FIFO, which is //! normally an even number. // //***************************************************************************** unsigned long I2STxFIFOLevelGet(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read and return the transmit FIFO level. // return(HWREG(ulBase + I2S_O_TXLEV)); } //***************************************************************************** // //! Enables the I2S receive module for operation. //! //! \param ulBase is the I2S module base address. //! //! This function enables the receive module for operation. The module should //! be enabled after configuration. When the module is disabled, no data is //! clocked in regardless of the signals on the I2S interface. //! //! \return None. // //***************************************************************************** void I2SRxEnable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Enable the tx FIFO service request. // HWREG(ulBase + I2S_O_RXISM) = I2S_RXISM_FFM; // // Read-modify-write the enable bit. // HWREG(ulBase + I2S_O_CFG) |= I2S_CFG_RXEN; } //***************************************************************************** // //! Disables the I2S receive module for operation. //! //! \param ulBase is the I2S module base address. //! //! This function disables the receive module for operation. The module should //! be disabled before configuration. When the module is disabled, no data is //! clocked in regardless of the signals on the I2S interface. //! //! \return None. // //***************************************************************************** void I2SRxDisable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read-modify-write the enable bit. // HWREG(ulBase + I2S_O_CFG) &= ~I2S_CFG_RXEN; } //***************************************************************************** // //! Reads data samples from the I2S receive FIFO with blocking. //! //! \param ulBase is the I2S module base address. //! \param pulData points to storage for the returned I2S sample data. //! //! This function reads a single channel sample or combined left-right //! samples from the I2S receive FIFO. The format of the sample is determined //! by the configuration that was used with the function I2SRxConfigSet(). //! If the receive mode is \b I2S_MODE_DUAL_STEREO then the returned value //! contains either the left or right sample. The left and right sample //! alternate with each read from the FIFO, left sample first. If the receive //! mode is \b I2S_MODE_COMPACT_STEREO_16 or \b I2S_MODE_COMPACT_STEREO_8, then //! the returned data contains both the left and right samples. If the //! receive mode is \b I2S_MODE_SINGLE_MONO then the returned data //! contains the single channel sample. //! //! For the compact modes, both the left and right samples are read at //! the same time. If 16-bit compact mode is used, then the least significant //! 16 bits contain the left sample, and the most significant 16 bits contain //! the right sample. If 8-bit compact mode is used, then the lower 8 bits //! contain the left sample, and the next 8 bits contain the right sample, //! with the upper 16 bits unused. //! //! If there is no data in the receive FIFO, then this function waits //! in a polling loop until data is available. //! //! \return None. // //***************************************************************************** void I2SRxDataGet(unsigned long ulBase, unsigned long *pulData) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Wait until there is data available. // while(HWREG(ulBase + I2S_O_RXLEV) == 0) { } // // Read data from the I2S receive FIFO. // *pulData = HWREG(ulBase + I2S_O_RXFIFO); } //***************************************************************************** // //! Reads data samples from the I2S receive FIFO without blocking. //! //! \param ulBase is the I2S module base address. //! \param pulData points to storage for the returned I2S sample data. //! //! This function reads a single channel sample or combined left-right //! samples from the I2S receive FIFO. The format of the sample is determined //! by the configuration that was used with the function I2SRxConfigSet(). //! If the receive mode is \b I2S_MODE_DUAL_STEREO then the received data //! contains either the left or right sample. The left and right sample //! alternate with each read from the FIFO, left sample first. If the receive //! mode is \b I2S_MODE_COMPACT_STEREO_16 or \b I2S_MODE_COMPACT_STEREO_8, then //! the received data contains both the left and right samples. If the //! receive mode is \b I2S_MODE_SINGLE_MONO then the received data //! contains the single channel sample. //! //! For the compact modes, both the left and right samples are read at //! the same time. If 16-bit compact mode is used, then the least significant //! 16 bits contain the left sample, and the most significant 16 bits contain //! the right sample. If 8-bit compact mode is used, then the lower 8 bits //! contain the left sample, and the next 8 bits contain the right sample, //! with the upper 16 bits unused. //! //! If there is no data in the receive FIFO, then this function returns //! immediately without reading any data from the FIFO. //! //! \return The number of elements read from the I2S receive FIFO (1 or 0). // //***************************************************************************** long I2SRxDataGetNonBlocking(unsigned long ulBase, unsigned long *pulData) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Check for available samples. // if(HWREG(ulBase + I2S_O_RXLEV) != 0) { *pulData = HWREG(ulBase + I2S_O_RXFIFO); return(1); } else { return(0); } } //***************************************************************************** // //! Configures the I2S receive module. //! //! \param ulBase is the I2S module base address. //! \param ulConfig is the logical OR of the configuration options. //! //! This function is used to configure the options for the I2S receive //! channel. The parameter \e ulConfig is the logical OR of the following //! options: //! //! - \b I2S_CONFIG_FORMAT_I2S for standard I2S format, //! \b I2S_CONFIG_FORMAT_LEFT_JUST for left justified format, or //! \b I2S_CONFIG_FORMAT_RIGHT_JUST for right justified format. //! - \b I2S_CONFIG_SCLK_INVERT to invert the polarity of the serial bit clock. //! - \b I2S_CONFIG_MODE_DUAL for dual channel stereo, //! \b I2S_CONFIG_MODE_COMPACT_16 for 16-bit compact stereo mode, //! \b I2S_CONFIG_MODE_COMPACT_8 for 8-bit compact stereo mode, or //! \b I2S_CONFIG_MODE_MONO for single channel mono format. //! - \b I2S_CONFIG_CLK_MASTER or \b I2S_CONFIG_CLK_SLAVE to select whether //! the I2S receiver is the clock master or slave. //! - \b I2S_CONFIG_SAMPLE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per sample. //! - \b I2S_CONFIG_WIRE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per word that are transferred on the data //! line. //! //! \return None. // //***************************************************************************** void I2SRxConfigSet(unsigned long ulBase, unsigned long ulConfig) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulConfig & (I2S_CONFIG_FORMAT_MASK | I2S_CONFIG_MODE_MASK | I2S_CONFIG_CLK_MASK | I2S_CONFIG_SAMPLE_SIZE_MASK | I2S_CONFIG_WIRE_SIZE_MASK)) == ulConfig); // // Clear out any prior config of the RX FIFO config register. // HWREG(ulBase + I2S_O_RXFIFOCFG) = 0; // // If mono mode is used, then the FMM bit needs to be set. // if((ulConfig & I2S_CONFIG_MODE_MASK) == I2S_CONFIG_MODE_MONO) { HWREG(ulBase + I2S_O_RXFIFOCFG) |= I2S_RXFIFOCFG_FMM; } // // If a compact mode is used, then the CSS bit needs to be set. // else if((ulConfig & I2S_CONFIG_MODE_MASK) == I2S_CONFIG_MODE_COMPACT_8) { HWREG(ulBase + I2S_O_RXFIFOCFG) |= I2S_RXFIFOCFG_CSS; } // // The "mono" bits must be removed from the configuration word // prior to writing to hardware, because the RX configuration register // does not actually use these bits. // ulConfig &= ~I2S_CONFIG_MODE_MONO; // // Write the configuration register. Because all the fields are // specified by the configuration parameter, it is not necessary // to do a read-modify-write. // HWREG(ulBase + I2S_O_RXCFG) = ulConfig; } //***************************************************************************** // //! Sets the FIFO level at which a service request is generated. //! //! \param ulBase is the I2S module base address. //! \param ulLevel is the FIFO service request limit. //! //! This function is used to set the receive FIFO fullness level at which a //! service request occurs. The service request is used to generate an //! interrupt or a DMA transfer request. The receive FIFO generates a //! service request when the number of items in the FIFO is greater than the //! level specified in the \e ulLevel parameter. For example, if \e ulLevel is //! 4, then a service request is generated when there are more than 4 samples //! available in the receive FIFO. //! //! For the purposes of counting the FIFO level, a left-right sample pair //! counts as 2, whether the mode is dual or compact stereo. When mono mode is //! used, internally the mono sample is still treated as a sample pair, so a //! single mono sample counts as 2. Because the FIFO always deals with sample //! pairs, the level must be an even number from 0 to 16. The minimum value is //! 0, which causes a service request when there is any data available in //! the FIFO. The maximum value is 16, which disables the service request //! (because there cannot be more than 16 items in the FIFO). //! //! \return None. // //***************************************************************************** void I2SRxFIFOLimitSet(unsigned long ulBase, unsigned long ulLevel) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT(ulLevel <= 16); // // Write the FIFO limit // HWREG(ulBase + I2S_O_RXLIMIT) = ulLevel; } //***************************************************************************** // //! Gets the current setting of the FIFO service request level. //! //! \param ulBase is the I2S module base address. //! //! This function is used to get the value of the receive FIFO service //! request level. This value is set using the I2SRxFIFOLimitSet() //! function. //! //! \return Returns the current value of the FIFO service request limit. // //***************************************************************************** unsigned long I2SRxFIFOLimitGet(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read and return the FIFO limit. The lower bit is masked // because it always reads as 1 and has no meaning. // return(HWREG(ulBase + I2S_O_RXLIMIT) & 0xFFFE); } //***************************************************************************** // //! Gets the number of samples in the receive FIFO. //! //! \param ulBase is the I2S module base address. //! //! This function is used to get the number of samples in the receive FIFO. //! For the purposes of measuring the FIFO level, a left-right sample pair //! counts as 2, whether the mode is dual or compact stereo. When mono mode is //! used, internally the mono sample is still treated as a sample pair, so a //! single mono sample counts as 2. Because the FIFO always deals with sample //! pairs, normally the level is an even number from 0 to 16. If dual stereo //! mode is used and only the left sample has been read without reading the //! matching right sample, then the FIFO level is an odd value. If the FIFO //! level is odd, it indicates a left-right sample mismatch. //! //! \return Returns the number of samples in the transmit FIFO, which is //! normally an even number. // //***************************************************************************** unsigned long I2SRxFIFOLevelGet(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Read and return the receive FIFO level. // return(HWREG(ulBase + I2S_O_RXLEV)); } //***************************************************************************** // //! Enables the I2S transmit and receive modules for operation. //! //! \param ulBase is the I2S module base address. //! //! This function simultaneously enables the transmit and receive modules for //! operation, providing a synchronized SCLK and LRCLK. The module should be //! enabled after configuration. When the module is disabled, no data or //! clocks are generated on the I2S signals. //! //! \return None. // //***************************************************************************** void I2STxRxEnable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Enable the Tx FIFO service request. // HWREG(ulBase + I2S_O_TXISM) = I2S_TXISM_FFM; // // Enable the Rx FIFO service request. // HWREG(ulBase + I2S_O_RXISM) = I2S_RXISM_FFM; // // Enable the transmit and receive modules. // HWREG(ulBase + I2S_O_CFG) |= I2S_CFG_TXEN | I2S_CFG_RXEN; } //***************************************************************************** // //! Disables the I2S transmit and receive modules. //! //! \param ulBase is the I2S module base address. //! //! This function simultaneously disables the transmit and receive modules. //! When the module is disabled, no data or clocks are generated on the I2S //! signals. //! //! \return None. // //***************************************************************************** void I2STxRxDisable(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Disable the transmit and receive modules. // HWREG(ulBase + I2S_O_CFG) &= ~(I2S_CFG_TXEN | I2S_CFG_RXEN); } //***************************************************************************** // //! Configures the I2S transmit and receive modules. //! //! \param ulBase is the I2S module base address. //! \param ulConfig is the logical OR of the configuration options. //! //! This function is used to configure the options for the I2S transmit and //! receive channels with identical parameters. The parameter \e ulConfig is //! the logical OR of the following options: //! //! - \b I2S_CONFIG_FORMAT_I2S for standard I2S format, //! \b I2S_CONFIG_FORMAT_LEFT_JUST for left justified format, or //! \b I2S_CONFIG_FORMAT_RIGHT_JUST for right justified format. //! - \b I2S_CONFIG_SCLK_INVERT to invert the polarity of the serial bit clock. //! - \b I2S_CONFIG_MODE_DUAL for dual-channel stereo, //! \b I2S_CONFIG_MODE_COMPACT_16 for 16-bit compact stereo mode, //! \b I2S_CONFIG_MODE_COMPACT_8 for 8-bit compact stereo mode, or //! \b I2S_CONFIG_MODE_MONO for single-channel mono format. //! - \b I2S_CONFIG_CLK_MASTER or \b I2S_CONFIG_CLK_SLAVE to select whether //! the I2S transmitter is the clock master or slave. //! - \b I2S_CONFIG_SAMPLE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per sample. //! - \b I2S_CONFIG_WIRE_SIZE_32, \b _24, \b _20, \b _16, or \b _8 //! to select the number of bits per word that are transferred on the data //! line. //! - \b I2S_CONFIG_EMPTY_ZERO or \b I2S_CONFIG_EMPTY_REPEAT to select whether //! the module transmits zeroes or repeats the last sample when the FIFO is //! empty. //! //! \return None. // //***************************************************************************** void I2STxRxConfigSet(unsigned long ulBase, unsigned long ulConfig) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulConfig & (I2S_CONFIG_FORMAT_MASK | I2S_CONFIG_MODE_MASK | I2S_CONFIG_EMPTY_MASK | I2S_CONFIG_CLK_MASK | I2S_CONFIG_SAMPLE_SIZE_MASK | I2S_CONFIG_WIRE_SIZE_MASK)) == ulConfig); // // Clear out any prior configuration of the FIFO config registers. // HWREG(ulBase + I2S_O_TXFIFOCFG) = 0; HWREG(ulBase + I2S_O_RXFIFOCFG) = 0; // // If mono mode is used, then the FMM bit needs to be set. // if((ulConfig & I2S_CONFIG_MODE_MASK) == I2S_CONFIG_MODE_MONO) { HWREG(ulBase + I2S_O_RXFIFOCFG) |= I2S_RXFIFOCFG_FMM; ulConfig &= ~(I2S_CONFIG_MODE_MONO); } // // If a compact mode is used, then the CSS bit needs to be set. // if((ulConfig & I2S_CONFIG_MODE_MASK) == I2S_CONFIG_MODE_COMPACT_8) { HWREG(ulBase + I2S_O_TXFIFOCFG) |= I2S_TXFIFOCFG_CSS; HWREG(ulBase + I2S_O_RXFIFOCFG) |= I2S_RXFIFOCFG_CSS; } // // Write the configuration register. Because all the fields are specified // by the configuration parameter, it is not necessary to do a // read-modify-write. // HWREG(ulBase + I2S_O_TXCFG) = ulConfig; HWREG(ulBase + I2S_O_RXCFG) = ulConfig; } //***************************************************************************** // //! Selects the source of the master clock, internal or external. //! //! \param ulBase is the I2S module base address. //! \param ulMClock is the logical OR of the master clock configuration //! choices. //! //! This function selects whether the master clock is sourced from the device //! internal PLL or comes from an external pin. The I2S serial bit clock //! (SCLK) and left-right word clock (LRCLK) are derived from the I2S master //! clock. The transmit and receive modules can be configured independently. //! The \e ulMClock parameter is chosen from the following: //! //! - one of \b I2S_TX_MCLK_EXT or \b I2S_TX_MCLK_INT //! - one of \b I2S_RX_MCLK_EXT or \b I2S_RX_MCLK_INT //! //! \return None. // //***************************************************************************** void I2SMasterClockSelect(unsigned long ulBase, unsigned long ulMClock) { unsigned long ulConfig; // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulMClock & (I2S_TX_MCLK_EXT | I2S_RX_MCLK_EXT)) == ulMClock); // // Set the clock selection bits in the configuation word. // ulConfig = HWREG(ulBase + I2S_O_CFG) & ~(I2S_TX_MCLK_EXT | I2S_RX_MCLK_EXT); HWREG(ulBase + I2S_O_CFG) = ulConfig | ulMClock; } //***************************************************************************** // //! Enables I2S interrupt sources. //! //! \param ulBase is the I2S module base address. //! \param ulIntFlags is a bit mask of the interrupt sources to be enabled. //! //! This function enables the specified I2S sources to generate interrupts. //! The \e ulIntFlags parameter can be the logical OR of any of the following //! values: //! //! - \b I2S_INT_RXERR for receive errors //! - \b I2S_INT_RXREQ for receive FIFO service requests //! - \b I2S_INT_TXERR for transmit errors //! - \b I2S_INT_TXREQ for transmit FIFO service requests //! //! \return None. // //***************************************************************************** void I2SIntEnable(unsigned long ulBase, unsigned long ulIntFlags) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulIntFlags & (I2S_INT_RXERR | I2S_INT_RXREQ | I2S_INT_TXERR | I2S_INT_TXREQ)) == ulIntFlags); // // Enable the specified interrupts. // HWREG(ulBase + I2S_O_IM) |= ulIntFlags; } //***************************************************************************** // //! Disables I2S interrupt sources. //! //! \param ulBase is the I2S module base address. //! \param ulIntFlags is a bit mask of the interrupt sources to be disabled. //! //! This function disables the specified I2S sources for interrupt //! generation. The \e ulIntFlags parameter can be the logical OR //! of any of the following values: \b I2S_INT_RXERR, \b I2S_INT_RXREQ, //! \b I2S_INT_TXERR, or \b I2S_INT_TXREQ. //! //! \return None. // //***************************************************************************** void I2SIntDisable(unsigned long ulBase, unsigned long ulIntFlags) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulIntFlags & (I2S_INT_RXERR | I2S_INT_RXREQ | I2S_INT_TXERR | I2S_INT_TXREQ)) == ulIntFlags); // // Enable the specified interrupts. // HWREG(ulBase + I2S_O_IM) &= ~ulIntFlags; } //***************************************************************************** // //! Gets the I2S interrupt status. //! //! \param ulBase is the I2S module base address. //! \param bMasked is set \b true to get the masked interrupt status, or //! \b false to get the raw interrupt status. //! //! This function returns the I2S interrupt status. It can return either //! the raw or masked interrupt status. //! //! \return Returns the masked or raw I2S interrupt status, as a bit field //! of any of the following values: \b I2S_INT_RXERR, \b I2S_INT_RXREQ, //! \b I2S_INT_TXERR, or \b I2S_INT_TXREQ // //***************************************************************************** unsigned long I2SIntStatus(unsigned long ulBase, tBoolean bMasked) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Return either the interrupt status or the raw interrupt status as // requested. // if(bMasked) { return(HWREG(ulBase + I2S_O_MIS)); } else { return(HWREG(ulBase + I2S_O_RIS)); } } //***************************************************************************** // //! Clears pending I2S interrupt sources. //! //! \param ulBase is the I2S module base address. //! \param ulIntFlags is a bit mask of the interrupt sources to be cleared. //! //! This function clears the specified pending I2S interrupts. This function //! must be called in the interrupt handler to keep the interrupt from being //! triggered again immediately upon exit. The \e ulIntFlags parameter can be //! the logical OR of any of the following values: \b I2S_INT_RXERR, //! \b I2S_INT_RXREQ, \b I2S_INT_TXERR, or \b I2S_INT_TXREQ. //! //! \note Because there is a write buffer in the Cortex-M processor, it may //! take several clock cycles before the interrupt source is actually cleared. //! Therefore, it is recommended that the interrupt source be cleared early in //! the interrupt handler (as opposed to the very last action) to avoid //! returning from the interrupt handler before the interrupt source is //! actually cleared. Failure to do so may result in the interrupt handler //! being immediately reentered (because the interrupt controller still sees //! the interrupt source asserted). //! //! \return None. // //***************************************************************************** void I2SIntClear(unsigned long ulBase, unsigned long ulIntFlags) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT((ulIntFlags & (I2S_INT_RXERR | I2S_INT_RXREQ | I2S_INT_TXERR | I2S_INT_TXREQ)) == ulIntFlags); // // Clear the requested interrupt sources. // HWREG(ulBase + I2S_O_IC) = ulIntFlags; } //***************************************************************************** // //! Registers an interrupt handler for the I2S controller. //! //! \param ulBase is the I2S module base address. //! \param pfnHandler is a pointer to the function to be called when the //! interrupt is activated. //! //! This function sets and enables the handler to be called when the I2S //! controller generates an interrupt. Specific I2S interrupts must still be //! enabled with the I2SIntEnable() function. It is the responsibility of the //! interrupt handler to clear any pending interrupts with I2SIntClear(). //! //! \sa IntRegister() for important information about registering interrupt //! handlers. //! //! \return None. // //***************************************************************************** void I2SIntRegister(unsigned long ulBase, void (*pfnHandler)(void)) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); ASSERT(pfnHandler); // // Register the interrupt handler. // IntRegister(INT_I2S0, pfnHandler); // // Enable the I2S interface interrupt. // IntEnable(INT_I2S0); } //***************************************************************************** // //! Unregisters an interrupt handler for the I2S controller. //! //! \param ulBase is the I2S module base address. //! //! This function disables and clears the handler to be called when the //! I2S interrupt occurs. //! //! \sa IntRegister() for important information about registering interrupt //! handlers. //! //! \return None. // //***************************************************************************** void I2SIntUnregister(unsigned long ulBase) { // // Check the arguments. // ASSERT(ulBase == I2S0_BASE); // // Disable the I2S interface interrupt. // IntDisable(INT_I2S0); // // Unregister the interrupt handler. // IntUnregister(INT_I2S0); } //***************************************************************************** // // Close the Doxygen group. //! @} // //*****************************************************************************
gpl-2.0
mirror3000/jedi-outcast
code/ff/ff_HandleTable.cpp
18
2737
#include "common_headers.h" #ifdef _IMMERSION ////---------------------- /// FFHandleTable::Convert //-------------------------- // // ffHandle_t FFHandleTable::Convert( ChannelCompound &compound, const char *name, qboolean create ) { ffHandle_t ff = FF_HANDLE_NULL; // Reserve a handle for effects that failed to create. // Rerouting channels to other devices may cause an effect to become lost. // This assumes that FF_Register is always called with legitimate effect names. // See CMD_FF_Play on how to handle possibly-bogus user input. // (It does not call this function) if ( compound.GetSet().size() ) ff = Convert( compound ); else { for ( FFHandleTable::RegFail::iterator itRegFail = mRegFail.begin() ; itRegFail != mRegFail.end() && (*itRegFail).second != name ; itRegFail++ ); ff = ( itRegFail != mRegFail.end() ? (*itRegFail).first : FF_HANDLE_NULL ); } if ( ff == FF_HANDLE_NULL ) { mVector.push_back( compound ); ff = mVector.size() - 1; // Remember effect name for future 'ff_restart' calls. if ( create && !compound.GetSet().size() ) mRegFail[ ff ] = name; } return ff; } ////---------------------- /// FFHandleTable::Convert //-------------------------- // Looks for 'compound' in the table. // // Assumes: // * 'compound' is non-empty // // Returns: // ffHandle_t // ffHandle_t FFHandleTable::Convert( ChannelCompound &compound ) { for ( int i = 1 ; i < mVector.size() && mVector[ i ] != compound ; i++ ); return ( i < mVector.size() ? i : FF_HANDLE_NULL ); } ////----------------------------- /// FFHandleTable::GetFailedNames //--------------------------------- // // qboolean FFHandleTable::GetFailedNames( TNameTable &NameTable ) { for ( RegFail::iterator itRegFail = mRegFail.begin() ; itRegFail != mRegFail.end() ; itRegFail++ ){ NameTable[ (*itRegFail).first ] = (*itRegFail).second; } return qboolean( mRegFail.size() != 0 ); } ////-------------------------- /// FFHandleTable::GetChannels //------------------------------ // // qboolean FFHandleTable::GetChannels( vector<int> &channel ) { //ASSERT( channel.size() >= mVector.size() ); for ( int i = 1 ; i < mVector.size() ; i++ ){ channel[ i ] = mVector[ i ].GetChannel(); } return qtrue; } const char *FFHandleTable::GetName( ffHandle_t ff ) { const char *result = NULL; if ( !mVector[ ff ].IsEmpty() ) { result = mVector[ ff ].GetName(); } else { RegFail::iterator itRegFail = mRegFail.find( ff ); if ( itRegFail != mRegFail.end() ) result = (*itRegFail).second.c_str(); } return result; } #endif // _IMMERSION
gpl-2.0
Nedj/CG-Blizzlike
src/server/scripts/Kalimdor/CavernsOfTime/BattleForMountHyjal/hyjalAI.cpp
18
51575
/* * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: HyjalAI SD%Complete: 90 SDComment: SDCategory: Caverns of Time, Mount Hyjal EndScriptData */ #include "ScriptPCH.h" #include "hyjalAI.h" #include "hyjal_trash.h" #include "MapManager.h" #include "Language.h" #include "Chat.h" #include "Object.h" #define SPAWN_GARG_GATE 0 #define SPAWN_WYRM_GATE 1 #define SPAWN_NEAR_TOWER 2 #define YELL_HURRY "Hurry, we don't have much time" // Locations for summoning gargoyls and frost wyrms in special cases float SpawnPointSpecial[3][3]= { {5497.08f, -2493.23f, 1535.72f}, //spawn point for the gargoyles near the horde gate {5624.53f, -2548.12f, 1551.54f}, //spawn point for the frost wyrm near the horde gate {5604.41f, -2811.98f, 1547.77f} //spawn point for the gargoyles and wyrms near the horde tower }; // Locations for summoning waves in Alliance base float AllianceBase[4][3]= { {4928.48f, -1526.38f, 1326.83f}, {4923.54f, -1514.29f, 1327.98f}, {4928.41f, -1510.35f, 1327.99f}, {4938.35f, -1521.00f, 1326.69f} }; float JainaDummySpawn[2][4]= { {5497.01f, -2719.03f, 1483.08f, 2.90426f}, {5484.98f, -2721.69f, 1483.39f, 6.00656f} }; // Locations for summoning waves in Horde base float HordeBase[4][3]= { {5458.01f, -2340.27f, 1459.60f}, {5466.01f, -2334.69f, 1460.06f}, {5468.45f, -2355.13f, 1459.99f}, {5479.06f, -2344.16f, 1461.74f} }; // Lady Jaina's waypoints when retreathing float JainaWPs[2][3]= { {5078.56f, -1789.79f, 1320.73f}, //next to the small stairs {5037.38f, -1778.39f, 1322.61f}, //center of alliance base }; float InfernalPos[8][3]=//spawn points for the infernals in the horde base { {5453.59f, -2764.52f, 1493.50f}, {5478.4f, -2781.77f, 1497.52f}, {5506.09f, -2780.53f, 1496.32f}, {5532.1f, -2763.42f, 1492.37f}, {5544.16f, -2733.99f, 1487.14f}, {5536.19f, -2708.18f, 1480.01f}, {5510.16f, -2691.75f, 1479.66f}, {5482.39f, -2689.19f, 1481.09f} }; float InfernalSPWP[26][4]=//spawn points for the infernals in the horde base used in the cleaning wave { {5490.96f, -2718.72f, 1482.96f, 0.49773f}, {5449.51f, -2723.32f, 1485.69f, 2.69685f}, {5520.83f, -2624.68f, 1483.82f, 1.20459f}, {5558.28f, -2629.26f, 1485.76f, 0.37992f}, {5567.97f, -2758.69f, 1494.10f, 5.04125f}, {5384.90f, -2793.47f, 1503.90f, 5.55412f}, {5495.33f, -2814.06f, 1501.56f, 1.12055f}, {5472.63f, -2929.39f, 1538.31f, 1.95777f}, {5334.72f, -2952.29f, 1534.34f, 0.50869f}, {5398.36f, -2872.46f, 1512.38f, 0.76787f}, {5514.39f, -2768.63f, 1492.30f, 1.55721f}, {5598.91f, -2703.75f, 1495.64f, 2.56644f}, {5467.80f, -2655.93f, 1482.27f, 0.85819f}, {5399.83f, -2985.24f, 1545.04f, 5.92559f}, {5232.13f, -2967.05f, 1553.09f, 5.41351f}, {5272.02f, -3082.52f, 1567.09f, 3.40681f}, {5343.26f, -3120.71f, 1582.92f, 3.16727f}, {5371.26f, -3175.24f, 1587.41f, 6.10466f}, {5434.67f, -3177.91f, 1579.25f, 2.77850f}, {5237.39f, -3149.25f, 1593.59f, 0.83855f}, {5202.15f, -3016.64f, 1566.28f, 3.31256f}, {5302.54f, -2914.37f, 1528.53f, 3.37146f}, {5439.04f, -2834.81f, 1508.80f, 2.14231f}, {5421.33f, -2771.04f, 1494.28f, 6.06223f}, {5549.76f, -2692.93f, 1482.68f, 2.19414f}, {5459.78f, -2755.71f, 1490.68f, 1.05139f} }; float VeinPos[14][8]=//spawn points of the ancient gem veins { {5184.84f, -1982.59f, 1382.66f, 2.58079f, 0, 0, 0.960944f, 0.276742f}, //alliance {5107.66f, -2071.16f, 1368.37f, 2.65148f, 0, 0, 0.970124f, 0.242611f}, //alliance {5040.53f, -2227.65f, 1403.17f, 3.35049f, 0, 0, 0.99455f, -0.104257f}, //alliance {5187.59f, -2453.12f, 1455.51f, 5.87943f, 0, 0, 0.20051f, -0.979692f}, //alliance {5429.43f, -2340.65f, 1465.38f, 4.7681f, 0, 0, 0.687138f, -0.726527f}, //alliance {5463.99f, -2315.95f, 1470.29f, 1.52045f, 0, 0, 0.689084f, 0.724682f}, //alliance {5624.65f, -2495.09f, 1510.11f, 0.0124869f, 0, 0, 0.00624342f, 0.999981f}, //alliance {5285.41f, -3348.32f, 1663.01f, 1.57152f, 0, 0, 0.707362f, 0.706852f}, //horde {5417.69f, -3372.52f, 1656.31f, 0.361993f, 0, 0, 0.18001f, 0.983665f}, //horde {5315.34f, -3238.32f, 1622.88f, 3.03627f, 0, 0, 0.998614f, 0.0526347f}, //horde {5303.4f, -3096.44f, 1596.41f, 1.72073f, 0, 0, 0.758081f, 0.65216f}, //horde {5265.13f, -3177.27f, 1616.22f, 0.813604f, 0, 0, 0.395674f, 0.918391f}, //horde {5374.3f, -3420.59f, 1653.43f, 1.45762f, 0, 0, 0.665981f, 0.745969f}, //horde {5441.54f, -3321.59f, 1651.55f, 0.258306f, 0, 0, 0.128794f, 0.991671f} //horde }; float AllianceOverrunGargPos[5][4]=//gargoyle spawn points in the alliance overrun { {5279.94f, -2049.68f, 1311.38f, 0}, //garg1 {5289.15f, -2219.06f, 1291.12f, 0}, //garg2 {5202.07f, -2136.10f, 1305.07f, 2.8f}, //garg3 {5071.52f, -2425.63f, 1454.48f, 5.54f}, //garg4 {5120.65f, -2467.92f, 1463.93f, 2.54f}//garg5 }; float AllianceFirePos[92][8]=//spawn points for the fire visuals (GO) in the alliance base { {5039.9f, -1796.84f, 1323.88f, 2.59222f, 0, 0, 0.962511f, 0.271243f}, {5087.2f, -1795.2f, 1320.68f, 1.03946f, 0, 0, 0.496644f, 0.867954f}, {5112.68f, -1806.66f, 1359.93f, 1.37799f, 0, 0, 0.63576f, 0.771887f}, {5095.61f, -1793.27f, 1359.78f, 0.580806f, 0, 0, 0.286338f, 0.958129f}, {5090.43f, -1784.45f, 1360.44f, 0.796784f, 0, 0, 0.387937f, 0.921686f}, {5139.25f, -1783.11f, 1359.39f, 3.30849f, 0, 0, 0.99652f, -0.0833509f}, {5112.16f, -1763.72f, 1361.35f, 5.10312f, 0, 0, 0.556388f, -0.830922f}, {4981.18f, -1793.98f, 1335.7f, 3.23072f, 0, 0, 0.999007f, -0.0445498f}, {4996.57f, -1766.75f, 1341.62f, 3.5331f, 0, 0, 0.980902f, -0.194505f}, {4983.74f, -1769.25f, 1345.75f, 3.79228f, 0, 0, 0.947541f, -0.319635f}, {4996.01f, -1774.43f, 1330.71f, 3.07364f, 0, 0, 0.999423f, 0.0339693f}, {5094.2f, -1726.13f, 1330.55f, 1.56175f, 0, 0, 0.703901f, 0.710298f}, {5079.82f, -1721.24f, 1336.26f, 1.18868f, 0, 0, 0.559964f, 0.828517f}, {5077.68f, -1717.15f, 1327.78f, 0.0145145f, 0, 0, 0.00725717f, 0.999974f}, {5122.27f, -1738.22f, 1341.67f, 0.835256f, 0, 0, 0.405593f, 0.914054f}, {5131.88f, -1741.15f, 1335.25f, 2.15472f, 0, 0, 0.880712f, 0.473653f}, {5196.93f, -1772.99f, 1345.2f, 0.128397f, 0, 0, 0.0641544f, 0.99794f}, {5225.33f, -1756.06f, 1344.17f, 3.04223f, 0, 0, 0.998766f, 0.0496599f}, {5224.84f, -1767.05f, 1360.06f, 3.19538f, 0, 0, 0.999638f, -0.0268922f}, {5202.05f, -1763.47f, 1361.68f, 2.59455f, 0, 0, 0.962826f, 0.270122f}, {5194.74f, -1766.66f, 1356.94f, 0.0734191f, 0, 0, 0.0367013f, 0.999326f}, {5159.67f, -1832.97f, 1344.5f, 5.17457f, 0, 0, 0.526356f, -0.850264f}, {5096.17f, -1858.73f, 1332.46f, 5.30021f, 0, 0, 0.471939f, -0.881631f}, {5110.7f, -1856.59f, 1342.84f, 5.97564f, 0, 0, 0.153167f, -0.9882f}, {5109.76f, -1855.3f, 1332.38f, 4.89572f, 0, 0, 0.639411f, -0.768865f}, {5068.95f, -1837.37f, 1328.81f, 2.61569f, 0, 0, 0.965628f, 0.25993f}, {5064.4f, -1824.77f, 1329.02f, 2.16409f, 0, 0, 0.88292f, 0.469524f}, {5059.89f, -1848.79f, 1329.59f, 0.0709955f, 0, 0, 0.0354903f, 0.99937f}, {5014.37f, -1851.39f, 1322.56f, 4.66949f, 0, 0, 0.722111f, -0.691777f}, {5025.1f, -1848.27f, 1323.39f, 4.44565f, 0, 0, 0.794854f, -0.606801f}, {4942.63f, -1890.13f, 1326.59f, 3.28719f, 0, 0, 0.997351f, -0.0727343f}, {4937.95f, -1888.71f, 1352.41f, 3.41678f, 0, 0, 0.990549f, -0.13716f}, {4922.48f, -1881.92f, 1352.41f, 5.03077f, 0, 0, 0.586075f, -0.810257f}, {4915.35f, -1894.32f, 1351.24f, 6.22457f, 0, 0, 0.0293048f, -0.999571f}, {4922.71f, -1904.84f, 1352.56f, 1.37866f, 0, 0, 0.63602f, 0.771672f}, {4932.89f, -1905.49f, 1352.56f, 1.89702f, 0, 0, 0.812549f, 0.582893f}, {5011.83f, -1861.05f, 1345.86f, 4.43777f, 0, 0, 0.797239f, -0.603664f}, {5011.83f, -1861.05f, 1363.26f, 4.748f, 0, 0, 0.694406f, -0.719583f}, {5021.46f, -1858.35f, 1342.17f, 4.86188f, 0, 0, 0.652329f, -0.757936f}, {4995.02f, -1698.3f, 1370.38f, 6.15779f, 0, 0, 0.0626579f, -0.998035f}, {5119.85f, -1728.9f, 1336.04f, 5.87112f, 0, 0, 0.204579f, -0.97885f}, {5214.75f, -1751.02f, 1342.5f, 5.08965f, 0, 0, 0.561972f, -0.827156f}, {5075.04f, -1822.43f, 1328.87f, 3.99951f, 0, 0, 0.9094f, -0.415924f}, {5057.09f, -1823.32f, 1350.35f, 3.88169f, 0, 0, 0.93231f, -0.361659f}, {4984.6f, -1816.99f, 1329.21f, 3.05308f, 0, 0, 0.999021f, 0.0442417f}, {4983.35f, -1811.55f, 1356.82f, 3.33975f, 0, 0, 0.995096f, -0.098917f}, {4984.11f, -1825.73f, 1350.76f, 2.26375f, 0, 0, 0.905211f, 0.424962f}, {4968.47f, -1786.46f, 1354.09f, 3.07663f, 0, 0, 0.999473f, 0.0324733f}, {5061.82f, -1751.16f, 1339.07f, 5.94727f, 0, 0, 0.167171f, -0.985928f}, {5063.75f, -1763, 1351.91f, 0.759707f, 0, 0, 0.370784f, 0.928719f}, {5078.65f, -1708.26f, 1353.9f, 1.27022f, 0, 0, 0.593264f, 0.805008f}, {4983.19f, -1755.96f, 1331.13f, 4.28221f, 0, 0, 0.841733f, -0.539894f}, {4972.76f, -1755.3f, 1332.5f, 4.21938f, 0, 0, 0.858276f, -0.513188f}, {4961.65f, -1760.82f, 1351.69f, 3.56515f, 0, 0, 0.977659f, -0.210198f}, {5086.45f, -1779.83f, 1321.62f, 6.23157f, 0, 0, 0.0258051f, -0.999667f}, {5063.15f, -1756.74f, 1328.56f, 0.886926f, 0, 0, 0.42907f, 0.903271f}, {5042.45f, -1800.61f, 1323.88f, 2.50093f, 0, 0, 0.949131f, 0.31488f}, {5084.74f, -1725.35f, 1327.89f, 1.65034f, 0, 0, 0.734663f, 0.678432f}, {4993.25f, -1758.1f, 1331.07f, 3.49995f, 0, 0, 0.98399f, -0.178223f}, {5078.53f, -1867.85f, 1348.91f, 5.85612f, 0, 0, 0.211913f, -0.977288f}, {5080.74f, -1869.73f, 1333.18f, 6.18206f, 0, 0, 0.0505424f, -0.998722f}, {5089.55f, -1894.13f, 1356.08f, 1.52072f, 0, 0, 0.689181f, 0.724589f}, {5113.24f, -1899.49f, 1363.77f, 1.50108f, 0, 0, 0.682034f, 0.731321f}, {4984.18f, -1907.69f, 1325.62f, 3.82193f, 0, 0, 0.942698f, -0.333646f}, {5094.14f, -2432.08f, 1429.38f, 4.70083f, 0, 0, 0.711182f, -0.703007f}, {5329.89f, -2113.30f, 1281.06f, 5.60560f, 0, 0, 0.332347f, -0.943157f}, {5170.87f, -2148.13f, 1278.32f, 1.63540f, 0, 0, 0.729573f, 0.683903f }, {5132.94f, -1960.25f, 1367.8f, 3.69787f, 0, 0, 0.961568f, -0.274566f}, {5280.82f, -2351.55f, 1431.57f, 4.46913f, 0, 0, 0.787677f, -0.616088f}, {5176.78f, -2121.43f, 1295.27f, 3.24153f, 0, 0, 0.998752f, -0.04995f}, {5332.75f, -2101.41f, 1296.37f, 5.50350f, 0, 0, 0.380043f, -0.924969f}, {5265.70f, -2050.27f, 1287.57f, 0.50051f, 0, 0, 0.247655f, 0.968848f }, {5194.21f, -2129.89f, 1274.04f, 3.08053f, 0, 0, 0.999534f, 0.0305272f}, {5225.81f, -1985.50f, 1364.15f, 0.37247f, 0, 0, 0.185163f, 0.982708f }, {5339.46f, -2204.47f, 1280.45f, 0.99921f, 0, 0, 0.479081f, 0.877771f }, {5269.63f, -2020.57f, 1299.62f, 3.00201f, 0, 0, 0.997566f, 0.0697332f}, {5111.54f, -2445.70f, 1435.31f, 2.70983f, 0, 0, 0.976788f, 0.214207f }, {5111.24f, -1901.14f, 1355.33f, 1.61028f, 0, 0, 0.720929f, 0.693009f }, {5310.42f, -2207.82f, 1277.46f, 0.50441f, 0, 0, 0.249544f, 0.968363f }, {5150.81f, -2042.13f, 1394.3f, 2.21031f, 0, 0, 0.893534f, 0.448995f }, {5224.84f, -2376.61f, 1366.33f, 5.0621f, 0, 0, 0.573311f, -0.819338f}, {5105.41f, -2454.86f, 1446.16f, 4.64584f, 0, 0, 0.730239f, -0.683191f}, {5309.65f, -2188.28f, 1266.84f, 5.56631f, 0, 0, 0.350811f, -0.936446f}, {5281.46f, -2047.82f, 1287.67f, 2.44909f, 0, 0, 0.940652f, 0.339373f }, {5325.45f, -2189.41f, 1309.6f, 6.23783f, 0, 0, 0.0226771f, -0.999743f}, {5190.96f, -2142.54f, 1293.03f, 6.25668f, 0, 0, 0.0132544f, -0.999912f}, {5089.99f, -2467.49f, 1441.8f, 0.77381f, 0, 0, 0.377326f, 0.92608f }, {5195.08f, -2129.01f, 1285.36f, 3.55727f, 0, 0, 0.978480f, -0.206344f}, {5353.76f, -2116.28f, 1299.27f, 6.17894f, 0, 0, 0.0521006f, -0.998642f}, {5271.14f, -2037.38f, 1299.24f, 4.07879f, 0, 0, 0.892201f, -0.451638f}, {5332.5f , -2181.28f, 1279.95f, 4.6906f, 0, 0, 0.714768f, -0.699362f}, {5108.2f , -2429.84f, 1427.73f, 4.5194f, 0, 0, 0.771943f, -0.635691f} }; float HordeFirePos[65][8]=//spawn points for the fire visuals (GO) in the horde base { {5524.11f, -2612.73f, 1483.38f, 1.96198f, 0, 0, 0.831047f, 0.556202f}, {5514.42f, -2617.19f, 1505.77f, 1.82453f, 0, 0, 0.790892f, 0.611956f}, {5510.21f, -2624.77f, 1485.34f, 1.71065f, 0, 0, 0.754783f, 0.655974f}, {5570.72f, -2619.04f, 1487.62f, 0.728898f, 0, 0, 0.356435f, 0.93432f}, {5570.29f, -2639.37f, 1487.31f, 1.49308f, 0, 0, 0.679104f, 0.734042f}, {5583.56f, -2637.2f, 1503.78f, 1.46559f, 0, 0, 0.668951f, 0.743307f}, {5571.53f, -2626.81f, 1510.99f, 0.362107f, 0, 0, 0.180066f, 0.983654f}, {5545.97f, -2659.62f, 1489.64f, 5.07055f, 0, 0, 0.569845f, -0.821752f}, {5557.44f, -2675.91f, 1482.58f, 1.70118f, 0, 0, 0.751671f, 0.659539f}, {5594.98f, -2742.31f, 1495.51f, 4.5993f, 0, 0, 0.74594f, -0.666013f}, {5599.65f, -2755.6f, 1505.05f, 1.66896f, 0, 0, 0.740947f, 0.671564f}, {5565.95f, -2774.75f, 1499.48f, 6.22425f, 0, 0, 0.0294611f, -0.999566f}, {5567.1f, -2769.7f, 1511.17f, 5.99257f, 0, 0, 0.144799f, -0.989461f}, {5572.84f, -2774.16f, 1527.06f, 0.836428f, 0, 0, 0.406129f, 0.913816f}, {5538.32f, -2805.94f, 1498.87f, 4.30082f, 0, 0, 0.836674f, -0.547701f}, {5515.66f, -2801.74f, 1503.53f, 5.57316f, 0, 0, 0.347602f, -0.937642f}, {5516.76f, -2827.14f, 1501.15f, 0.35026f, 0, 0, 0.174236f, 0.984704f}, {5536.13f, -2813.51f, 1537.21f, 4.51681f, 0, 0, 0.772765f, -0.634692f}, {5525.05f, -2825.16f, 1538.53f, 0.489275f, 0, 0, 0.242205f, 0.970225f}, {5534.42f, -2815.45f, 1562.84f, 4.62834f, 0, 0, 0.736191f, -0.676774f}, {5519.64f, -2831.12f, 1526.46f, 0.611008f, 0, 0, 0.300774f, 0.953696f}, {5551.04f, -2827.55f, 1523.5f, 3.35206f, 0, 0, 0.994468f, -0.10504f}, {5469.22f, -2802.87f, 1503.5f, 4.99509f, 0, 0, 0.600436f, -0.799673f}, {5427.8f, -2737.26f, 1487.12f, 1.78673f, 0, 0, 0.779186f, 0.626793f}, {5454.1f, -2709.1f, 1485.92f, 3.03552f, 0, 0, 0.998594f, 0.0530137f}, {5436.3f, -2718.2f, 1506.02f, 2.7567f, 0, 0, 0.981539f, 0.191261f}, {5412.6f, -2740.55f, 1510.79f, 2.98446f, 0, 0, 0.996915f, 0.0784832f}, {5406.12f, -2752.48f, 1521.01f, 2.05769f, 0, 0, 0.856705f, 0.515807f}, {5445.24f, -2676.35f, 1521.89f, 2.91378f, 0, 0, 0.99352f, 0.113661f}, {5481.4f, -2665.08f, 1482.23f, 4.30001f, 0, 0, 0.836895f, -0.547363f}, {5443.51f, -2675.44f, 1487.12f, 2.90986f, 0, 0, 0.993295f, 0.115606f}, {5391.72f, -2647.3f, 1528.9f, 3.76987f, 0, 0, 0.951063f, -0.308997f}, {5421.09f, -2734.12f, 1521.01f, 2.70567f, 0, 0, 0.97634f, 0.216242f}, {5405.39f, -2710.33f, 1533.77f, 2.51324f, 0, 0, 0.951052f, 0.309032f}, {5423.96f, -2703.76f, 1516.34f, 2.79206f, 0, 0, 0.984767f, 0.173879f}, {5444.75f, -2735.23f, 1486.37f, 2.22657f, 0, 0, 0.897155f, 0.441715f}, {5570.98f, -2747.91f, 1495.7f, 5.14433f, 0, 0, 0.53915f, -0.84221f}, {5567.79f, -2673.9f, 1484.66f, 2.72529f, 0, 0, 0.978415f, 0.20665f}, {5600.71f, -2696.8f, 1500.42f, 0.443704f, 0, 0, 0.220036f, 0.975492f}, {5600.7f, -2693.04f, 1515.2f, 5.16003f, 0, 0, 0.532522f, -0.846416f}, {5627.56f, -2839.66f, 1510.53f, 5.41527f, 0, 0, 0.420463f, -0.907309f}, {5622.02f, -2868.71f, 1516.22f, 2.25482f, 0, 0, 0.903303f, 0.429002f}, {5586.61f, -2878.97f, 1510.34f, 4.55604f, 0, 0, 0.76017f, -0.649724f}, {5583.78f, -2843.71f, 1509.54f, 5.35715f, 0, 0, 0.44665f, -0.894709f}, {5580.95f, -2811.3f, 1513.3f, 3.57587f, 0, 0, 0.976518f, -0.215434f}, {5542.52f, -2869.31f, 1523.13f, 5.23304f, 0, 0, 0.501275f, -0.865288f}, {5557.35f, -2866.36f, 1518.76f, 4.48299f, 0, 0, 0.783388f, -0.621533f}, {5380.91f, -2849.36f, 1512.81f, 3.90962f, 0, 0, 0.927168f, -0.374646f}, {5395.76f, -2881.41f, 1521.11f, 4.28426f, 0, 0, 0.84118f, -0.540755f}, {5374.87f, -2859.63f, 1528.98f, 3.30252f, 0, 0, 0.996765f, -0.0803745f}, {5356.07f, -2854.66f, 1520.34f, 5.83933f, 0, 0, 0.220108f, -0.975475f}, {5363.01f, -2975.72f, 1539.02f, 4.13738f, 0, 0, 0.87859f, -0.477576f}, {5336.85f, -2980.74f, 1561.24f, 5.11126f, 0, 0, 0.553001f, -0.83318f}, {5335.23f, -2974.62f, 1540.05f, 5.04451f, 0, 0, 0.580496f, -0.814263f}, {5422.37f, -2998.87f, 1549.98f, 4.51831f, 0, 0, 0.772288f, -0.635272f}, {5405.54f, -3014.6f, 1562.16f, 5.86761f, 0, 0, 0.206298f, -0.978489f}, {5427.96f, -3019.4f, 1561.58f, 3.53498f, 0, 0, 0.980718f, -0.19543f}, {5348.12f, -2977.84f, 1582.47f, 3.94025f, 0, 0, 0.921323f, -0.388799f}, {5331.12f, -2993.71f, 1576.14f, 0.0642734f, 0, 0, 0.0321311f, 0.999484f}, {5321.63f, -2986.55f, 1552.2f, 5.29503f, 0, 0, 0.474219f, -0.880407f}, {5292.1f, -2914.36f, 1529.52f, 2.9742f, 0, 0, 0.996499f, 0.083601f}, {5281.77f, -2926.5f, 1530.62f, 1.67829f, 0, 0, 0.744071f, 0.6681f}, {5287.19f, -2909.94f, 1543.49f, 3.31192f, 0, 0, 0.996376f, -0.0850591f}, {5534.15f, -2679.35f, 1483.61f, 0.428685f, 0, 0, 0.212705f, 0.977116f}, {5545.43f, -2647.82f, 1483.05f, 5.38848f, 0, 0, 0.432578f, -0.901596f} }; hyjalAI::hyjalAI(Creature* c) : npc_escortAI(c), Summons(me) { instance = c->GetInstanceScript(); VeinsSpawned[0] = false; VeinsSpawned[1] = false; for (uint8 i=0; i<14; ++i) VeinGUID[i] = 0; InfernalCount = 0; TeleportTimer = 1000; Overrun = false; Teleported = false; WaitForTeleport = false; OverrunCounter = 0; OverrunCounter2 = 0; InfernalPoint = 0; RespawnTimer = 10000; DoRespawn = false; DoHide = false; MassTeleportTimer = 0; DoMassTeleport = false; } void hyjalAI::JustSummoned(Creature* summoned) { Summons.Summon(summoned); } void hyjalAI::SummonedCreatureDespawn(Creature* summoned) { Summons.Despawn(summoned); } void hyjalAI::Reset() { IsDummy = false; me->setActive(true); // GUIDs PlayerGUID = 0; BossGUID[0] = 0; BossGUID[1] = 0; // Timers NextWaveTimer = 10000; CheckTimer = 0; RetreatTimer = 1000; // Misc WaveCount = 0; EnemyCount = 0; // Set faction properly based on Creature entry switch (me->GetEntry()) { case JAINA: Faction = 0; DoCast(me, SPELL_BRILLIANCE_AURA, true); break; case THRALL: Faction = 1; break; case TYRANDE: Faction = 2; break; } //Bools EventBegun = false; FirstBossDead = false; SecondBossDead = false; Summon = false; bRetreat = false; Debug = false; //Flags me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); //Initialize spells memset(Spells, 0, sizeof(Spell) * HYJAL_AI_MAX_SPELLS); //Reset Instance Data for trash count if (instance) { if ((!instance->GetData(DATA_ALLIANCE_RETREAT) && me->GetEntry() == JAINA) || (instance->GetData(DATA_ALLIANCE_RETREAT) && me->GetEntry() == THRALL)) { //Reset World States instance->DoUpdateWorldState(WORLD_STATE_WAVES, 0); instance->DoUpdateWorldState(WORLD_STATE_ENEMY, 0); instance->DoUpdateWorldState(WORLD_STATE_ENEMYCOUNT, 0); instance->SetData(DATA_RESET_TRASH_COUNT, 0); } } else sLog->outError(ERROR_INST_DATA); //Visibility DoHide = true; } void hyjalAI::EnterEvadeMode() { if (me->GetEntry() != JAINA) me->RemoveAllAuras(); me->DeleteThreatList(); me->CombatStop(true); me->LoadCreaturesAddon(); if (me->isAlive()) me->GetMotionMaster()->MoveTargetedHome(); me->SetLootRecipient(NULL); } void hyjalAI::EnterCombat(Unit* /*who*/) { if (IsDummy)return; for (uint8 i = 0; i < HYJAL_AI_MAX_SPELLS; ++i) if (Spells[i].Cooldown) SpellTimer[i] = Spells[i].Cooldown; Talk(ATTACKED); } void hyjalAI::MoveInLineOfSight(Unit* who) { if (IsDummy) return; npc_escortAI::MoveInLineOfSight(who); } void hyjalAI::SummonCreature(uint32 entry, float Base[4][3]) { uint32 random = rand()%4; float SpawnLoc[3]; for (uint8 i = 0; i < 3; ++i) { SpawnLoc[i] = Base[random][i]; } Creature* creature = NULL; switch (entry) { case 17906: //GARGOYLE if (!FirstBossDead && (WaveCount == 1 || WaveCount == 3)) {//summon at tower creature = me->SummonCreature(entry, SpawnPointSpecial[SPAWN_NEAR_TOWER][0]+irand(-20, 20), SpawnPointSpecial[SPAWN_NEAR_TOWER][1]+irand(-20, 20), SpawnPointSpecial[SPAWN_NEAR_TOWER][2]+irand(-10, 10), 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); if (creature) CAST_AI(hyjal_trashAI, creature->AI())->useFlyPath = true; } else {//summon at gate creature = me->SummonCreature(entry, SpawnPointSpecial[SPAWN_GARG_GATE][0]+irand(-10, 10), SpawnPointSpecial[SPAWN_GARG_GATE][1]+irand(-10, 10), SpawnPointSpecial[SPAWN_GARG_GATE][2]+irand(-10, 10), 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); } break; case 17907: //FROST_WYRM, if (FirstBossDead && WaveCount == 1) //summon at gate creature = me->SummonCreature(entry, SpawnPointSpecial[SPAWN_WYRM_GATE][0], SpawnPointSpecial[SPAWN_WYRM_GATE][1], SpawnPointSpecial[SPAWN_WYRM_GATE][2], 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); else { creature = me->SummonCreature(entry, SpawnPointSpecial[SPAWN_NEAR_TOWER][0], SpawnPointSpecial[SPAWN_NEAR_TOWER][1], SpawnPointSpecial[SPAWN_NEAR_TOWER][2], 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); if (creature) CAST_AI(hyjal_trashAI, creature->AI())->useFlyPath = true; } break; case 17908: //GIANT_INFERNAL ++InfernalCount; if (InfernalCount > 7) InfernalCount = 0; creature = me->SummonCreature(entry, InfernalPos[InfernalCount][0], InfernalPos[InfernalCount][1], InfernalPos[InfernalCount][2], 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); break; default: creature = me->SummonCreature(entry, SpawnLoc[0], SpawnLoc[1], SpawnLoc[2], 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 120000); break; } if (creature) { // Increment Enemy Count to be used in World States and instance script ++EnemyCount; creature->RemoveUnitMovementFlag(MOVEMENTFLAG_WALKING); creature->setActive(true); switch (entry) { case NECROMANCER: case ABOMINATION: case GHOUL: case BANSHEE: case CRYPT_FIEND: case GARGOYLE: case FROST_WYRM: case GIANT_INFERNAL: case FEL_STALKER: case RAGE_WINTERCHILL: case ANETHERON: case KAZROGAL: case AZGALOR: CAST_AI(hyjal_trashAI, creature->AI())->IsEvent = true; break; } if (instance) { if (instance->GetData(DATA_RAIDDAMAGE) < MINRAIDDAMAGE) creature->SetDisableReputationGain(true);//no repu for solo farming } // Check if Creature is a boss. if (creature->isWorldBoss()) { if (!FirstBossDead) BossGUID[0] = creature->GetGUID(); else BossGUID[1] = creature->GetGUID(); CheckTimer = 5000; } } } void hyjalAI::SummonNextWave(const Wave wave[18], uint32 Count, float Base[4][3]) { // 1 in 4 chance we give a rally yell. Not sure if the chance is offilike. if (rand()%4 == 0) Talk(RALLY); if (!instance) { sLog->outError(ERROR_INST_DATA); return; } InfernalCount = 0;//reset infernal count every new wave EnemyCount = instance->GetData(DATA_TRASH); for (uint8 i = 0; i < 18; ++i) { if (wave[Count].Mob[i]) SummonCreature(wave[Count].Mob[i], Base); } if (!wave[Count].IsBoss) { uint32 stateValue = Count+1; if (FirstBossDead) stateValue -= 9; // Subtract 9 from it to give the proper wave number if we are greater than 8 // Set world state to our current wave number instance->DoUpdateWorldState(WORLD_STATE_WAVES, stateValue); // Set world state to our current wave number // Enable world state instance->DoUpdateWorldState(WORLD_STATE_ENEMY, 1); // Enable world state instance->SetData(DATA_TRASH, EnemyCount); // Send data for instance script to update count if (!Debug) NextWaveTimer = wave[Count].WaveTimer; else { NextWaveTimer = 15000; sLog->outDebug(LOG_FILTER_TSCR, "TSCR: HyjalAI: debug mode is enabled. Next Wave in 15 seconds"); } } else { // Set world state for waves to 0 to disable it. instance->DoUpdateWorldState(WORLD_STATE_WAVES, 0); instance->DoUpdateWorldState(WORLD_STATE_ENEMY, 1); // Set World State for enemies invading to 1. instance->DoUpdateWorldState(WORLD_STATE_ENEMYCOUNT, 1); Summon = false; } CheckTimer = 5000; } void hyjalAI::StartEvent(Player* player) { if (!player || IsDummy || !instance) return; Talk(BEGIN); EventBegun = true; Summon = true; NextWaveTimer = 15000; CheckTimer = 5000; PlayerGUID = player->GetGUID(); me->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); instance->DoUpdateWorldState(WORLD_STATE_WAVES, 0); instance->DoUpdateWorldState(WORLD_STATE_ENEMY, 0); instance->DoUpdateWorldState(WORLD_STATE_ENEMYCOUNT, 0); DeSpawnVeins(); } uint32 hyjalAI::GetInstanceData(uint32 Event) { if (instance) return instance->GetData(Event); else sLog->outError(ERROR_INST_DATA); return 0; } void hyjalAI::Talk(uint32 id) { std::list<uint8> index; for (uint8 i = 0; i < 9; ++i) { if (Faction == 0) // Alliance { if (JainaQuotes[i].id == id) index.push_back(i); } else if (Faction == 1) // Horde { if (ThrallQuotes[i].id == id) index.push_back(i); } } if (index.empty()) return; // No quotes found, no use to continue uint8 ind = *(index.begin()) + rand()%index.size(); int32 YellId = 0; if (Faction == 0) // Alliance { YellId = JainaQuotes[ind].textid; } else if (Faction == 1) // Horde { YellId = ThrallQuotes[ind].textid; } if (YellId) DoScriptText(YellId, me); } void hyjalAI::Retreat() { if (instance) { instance->SetData(TYPE_RETREAT, SPECIAL); if (Faction == 0) { instance->SetData(DATA_ALLIANCE_RETREAT, 1); AddWaypoint(0, JainaWPs[0][0], JainaWPs[0][1], JainaWPs[0][2]); AddWaypoint(1, JainaWPs[1][0], JainaWPs[1][1], JainaWPs[1][2]); Start(false, false); SetDespawnAtEnd(false);//move to center of alliance base } if (Faction == 1) { instance->SetData(DATA_HORDE_RETREAT, 1); Creature* JainaDummy = me->SummonCreature(JAINA, JainaDummySpawn[0][0], JainaDummySpawn[0][1], JainaDummySpawn[0][2], JainaDummySpawn[0][3], TEMPSUMMON_TIMED_DESPAWN, 60000); if (JainaDummy) { JainaDummy->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); CAST_AI(hyjalAI, JainaDummy->AI())->IsDummy = true; DummyGuid = JainaDummy->GetGUID(); } AddWaypoint(0, JainaDummySpawn[1][0], JainaDummySpawn[1][1], JainaDummySpawn[1][2]); Start(false, false); SetDespawnAtEnd(false);//move to center of alliance base } } SpawnVeins(); Overrun = true; me->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP);//cant talk after overrun event started } void hyjalAI::SpawnVeins() { if (Faction == 0) { if (VeinsSpawned[0])//prevent any buggers return; for (uint8 i = 0; i<7; ++i) { GameObject* gem = me->SummonGameObject(ANCIENT_VEIN, VeinPos[i][0], VeinPos[i][1], VeinPos[i][2], VeinPos[i][3], VeinPos[i][4], VeinPos[i][5], VeinPos[i][6], VeinPos[i][7], 0); if (gem) VeinGUID[i]=gem->GetGUID(); } VeinsSpawned[0] = true; }else{ if (VeinsSpawned[1]) return; for (uint8 i = 7; i<14; ++i) { GameObject* gem = me->SummonGameObject(ANCIENT_VEIN, VeinPos[i][0], VeinPos[i][1], VeinPos[i][2], VeinPos[i][3], VeinPos[i][4], VeinPos[i][5], VeinPos[i][6], VeinPos[i][7], 0); if (gem) VeinGUID[i]=gem->GetGUID(); } VeinsSpawned[1] = true; } } void hyjalAI::DeSpawnVeins() { if (!instance) return; if (Faction == 1) { Creature* unit=Unit::GetCreature((*me), instance->GetData64(DATA_JAINAPROUDMOORE)); if (!unit)return; hyjalAI* ai = CAST_AI(hyjalAI, unit->AI()); if (!ai)return; for (uint8 i = 0; i<7; ++i) { if (GameObject* gem = instance->instance->GetGameObject(ai->VeinGUID[i])) gem->Delete(); } } else if (Faction) { Creature* unit=Unit::GetCreature((*me), instance->GetData64(DATA_THRALL)); if (!unit)return; hyjalAI* ai = CAST_AI(hyjalAI, unit->AI()); if (!ai)return; for (uint8 i = 7; i<14; ++i) { if (GameObject* gem = instance->instance->GetGameObject(ai->VeinGUID[i])) gem->Delete(); } } } void hyjalAI::UpdateAI(const uint32 diff) { if (IsDummy) { if (MassTeleportTimer < diff && DoMassTeleport) { DoCast(me, SPELL_MASS_TELEPORT, false); DoMassTeleport = false; } else MassTeleportTimer -= diff; return; } if (DoHide) { DoHide = false; switch (me->GetEntry()) { case JAINA: if (instance && instance->GetData(DATA_ALLIANCE_RETREAT)) { me->SetVisible(false); HideNearPos(me->GetPositionX(), me->GetPositionY()); HideNearPos(5037.76f, -1889.71f); for (uint8 i = 0; i < 92; ++i)//summon fires me->SummonGameObject(FLAMEOBJECT, AllianceFirePos[i][0], AllianceFirePos[i][1], AllianceFirePos[i][2], AllianceFirePos[i][3], AllianceFirePos[i][4], AllianceFirePos[i][5], AllianceFirePos[i][6], AllianceFirePos[i][7], 0); } else me->SetVisible(true); break; case THRALL: //thrall if (instance && instance->GetData(DATA_HORDE_RETREAT)) { me->SetVisible(false); HideNearPos(me->GetPositionX(), me->GetPositionY()); HideNearPos(5563, -2763.19f); HideNearPos(5542.2f, -2629.36f); for (uint8 i = 0; i < 65; ++i)//summon fires me->SummonGameObject(FLAMEOBJECT, HordeFirePos[i][0], HordeFirePos[i][1], HordeFirePos[i][2], HordeFirePos[i][3], HordeFirePos[i][4], HordeFirePos[i][5], HordeFirePos[i][6], HordeFirePos[i][7], 0); } else me->SetVisible(true); break; } } if (DoRespawn) { if (RespawnTimer <= diff) { DoRespawn = false; RespawnNearPos(me->GetPositionX(), me->GetPositionY()); if (Faction == 0) { RespawnNearPos(5037.76f, -1889.71f); } else if (Faction == 1) { RespawnNearPos(5563, -2763.19f); RespawnNearPos(5542.2f, -2629.36f); } me->SetVisible(true); }else{ RespawnTimer -= diff; me->SetVisible(false); } return; } if (Overrun) DoOverrun(Faction, diff); if (bRetreat) { if (RetreatTimer <= diff) { IsDummy = true; bRetreat = false; HideNearPos(me->GetPositionX(), me->GetPositionY()); switch (me->GetEntry()) { case JAINA://jaina HideNearPos(5037.76f, -1889.71f); break; case THRALL://thrall HideNearPos(5563, -2763.19f); HideNearPos(5542.2f, -2629.36f); HideNearPos(5603.75f, -2853.12f); break; } me->SetVisible(false); } else RetreatTimer -= diff; } if (!EventBegun) return; if (Summon) { if (instance && EnemyCount) { EnemyCount = instance->GetData(DATA_TRASH); if (!EnemyCount) NextWaveTimer = 5000; } if (NextWaveTimer <= diff) { if (Faction == 0) SummonNextWave(AllianceWaves, WaveCount, AllianceBase); else if (Faction == 1) SummonNextWave(HordeWaves, WaveCount, HordeBase); ++WaveCount; } else NextWaveTimer -= diff; } if (CheckTimer <= diff) { for (uint8 i = 0; i < 2; ++i) { if (BossGUID[i]) { Unit* unit = Unit::GetUnit((*me), BossGUID[i]); if (unit && (!unit->isAlive())) { if (BossGUID[i] == BossGUID[0]) { Talk(INCOMING); FirstBossDead = true; } else if (BossGUID[i] == BossGUID[1]) { Talk(SUCCESS); SecondBossDead = true; } EventBegun = false; CheckTimer = 0; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); BossGUID[i] = 0; if (instance) instance->DoUpdateWorldState(WORLD_STATE_ENEMY, 0); // Reset world state for enemies to disable it } } } CheckTimer = 5000; } else CheckTimer -= diff; if (!UpdateVictim()) return; for (uint8 i = 0; i < HYJAL_AI_MAX_SPELLS; ++i) { if (Spells[i].SpellId) { if (SpellTimer[i] <= diff) { if (me->IsNonMeleeSpellCasted(false)) me->InterruptNonMeleeSpells(false); Unit* target = NULL; switch (Spells[i].TargetType) { case TARGETTYPE_SELF: target = me; break; case TARGETTYPE_RANDOM: target = SelectTarget(SELECT_TARGET_RANDOM, 0); break; case TARGETTYPE_VICTIM: target = me->getVictim(); break; } if (target && target->isAlive()) { DoCast(target, Spells[i].SpellId); SpellTimer[i] = Spells[i].Cooldown; } } else SpellTimer[i] -= diff; } } DoMeleeAttackIfReady(); } void hyjalAI::JustDied(Unit* /*killer*/) { if (IsDummy)return; me->Respawn(); me->SetVisible(false); DoRespawn = true; RespawnTimer = 120000; Talk(DEATH); Summons.DespawnAll();//despawn all wave's summons if (instance) {//reset encounter if boss is despawned (ex: thrall is killed, boss despawns, event stucks at inprogress) if (instance->GetData(DATA_RAGEWINTERCHILLEVENT) == IN_PROGRESS) instance->SetData(DATA_RAGEWINTERCHILLEVENT, NOT_STARTED); if (instance->GetData(DATA_ANETHERONEVENT) == IN_PROGRESS) instance->SetData(DATA_ANETHERONEVENT, NOT_STARTED); if (instance->GetData(DATA_KAZROGALEVENT) == IN_PROGRESS) instance->SetData(DATA_KAZROGALEVENT, NOT_STARTED); if (instance->GetData(DATA_AZGALOREVENT) == IN_PROGRESS) instance->SetData(DATA_AZGALOREVENT, NOT_STARTED); instance->SetData(DATA_RESET_RAIDDAMAGE, 0);//reset damage on die } } void hyjalAI::HideNearPos(float x, float y) { CellCoord pair(Trinity::ComputeCellCoord(x, y)); Cell cell(pair); cell.SetNoCreate(); // First get all creatures. std::list<Creature*> creatures; Trinity::AllFriendlyCreaturesInGrid creature_check(me); Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid> creature_searcher(me, creatures, creature_check); TypeContainerVisitor <Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid>, GridTypeMapContainer> creature_visitor(creature_searcher); cell.Visit(pair, creature_visitor, *(me->GetMap()), *me, me->GetGridActivationRange()); if (!creatures.empty()) { for (std::list<Creature*>::const_iterator itr = creatures.begin(); itr != creatures.end(); ++itr) { (*itr)->SetVisible(false); (*itr)->setFaction(35);//make them friendly so mobs won't attack them } } } void hyjalAI::RespawnNearPos(float x, float y) { CellCoord p(Trinity::ComputeCellCoord(x, y)); Cell cell(p); cell.SetNoCreate(); Trinity::RespawnDo u_do; Trinity::WorldObjectWorker<Trinity::RespawnDo> worker(me, u_do); TypeContainerVisitor<Trinity::WorldObjectWorker<Trinity::RespawnDo>, GridTypeMapContainer > obj_worker(worker); cell.Visit(p, obj_worker, *me->GetMap(), *me, me->GetGridActivationRange()); } void hyjalAI::WaypointReached(uint32 i) { if (i == 1 || (i == 0 && me->GetEntry() == THRALL)) { me->MonsterYell(YELL_HURRY, 0, 0); WaitForTeleport = true; TeleportTimer = 20000; if (me->GetEntry() == JAINA) DoCast(me, SPELL_MASS_TELEPORT, false); if (me->GetEntry() == THRALL && DummyGuid) { Unit* Dummy = Unit::GetUnit((*me), DummyGuid); if (Dummy) { CAST_AI(hyjalAI, CAST_CRE(Dummy)->AI())->DoMassTeleport = true; CAST_AI(hyjalAI, CAST_CRE(Dummy)->AI())->MassTeleportTimer = 20000; Dummy->CastSpell(me, SPELL_MASS_TELEPORT, false); } } //do some talking //all alive guards walk near here CellCoord pair(Trinity::ComputeCellCoord(me->GetPositionX(), me->GetPositionY())); Cell cell(pair); cell.SetNoCreate(); // First get all creatures. std::list<Creature*> creatures; Trinity::AllFriendlyCreaturesInGrid creature_check(me); Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid> creature_searcher(me, creatures, creature_check); TypeContainerVisitor <Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid>, GridTypeMapContainer> creature_visitor(creature_searcher); cell.Visit(pair, creature_visitor, *(me->GetMap()), *me, me->GetGridActivationRange()); if (!creatures.empty()) { for (std::list<Creature*>::const_iterator itr = creatures.begin(); itr != creatures.end(); ++itr) { if ((*itr) && (*itr)->isAlive() && (*itr) != me && (*itr)->GetEntry() != JAINA) { if (!(*itr)->IsWithinDist(me, 60)) (*itr)->RemoveUnitMovementFlag(MOVEMENTFLAG_WALKING); float x, y, z; (*itr)->SetDefaultMovementType(IDLE_MOTION_TYPE); (*itr)->GetMotionMaster()->Initialize(); float range = 10; if (me->GetEntry() == THRALL)range = 20; me->GetNearPoint(me, x, y, z, range, 0, me->GetAngle((*itr))); (*itr)->GetMotionMaster()->MovePoint(0, x+irand(-5, 5), y+irand(-5, 5), me->GetPositionZ()); } } } } } void hyjalAI::DoOverrun(uint32 faction, const uint32 diff) { npc_escortAI::UpdateAI(diff); if (WaitForTeleport) { if (TeleportTimer <= diff) { CellCoord pair(Trinity::ComputeCellCoord(me->GetPositionX(), me->GetPositionY())); Cell cell(pair); cell.SetNoCreate(); std::list<Creature*> creatures; Trinity::AllFriendlyCreaturesInGrid creature_check(me); Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid> creature_searcher(me, creatures, creature_check); TypeContainerVisitor <Trinity::CreatureListSearcher<Trinity::AllFriendlyCreaturesInGrid>, GridTypeMapContainer> creature_visitor(creature_searcher); cell.Visit(pair, creature_visitor, *(me->GetMap()), *me, me->GetGridActivationRange()); if (!creatures.empty()) { for (std::list<Creature*>::const_iterator itr = creatures.begin(); itr != creatures.end(); ++itr) { if ((*itr) && (*itr)->isAlive()) { (*itr)->CastSpell(*itr, SPELL_TELEPORT_VISUAL, true); (*itr)->setFaction(35);//make them friendly so mobs won't attack them (*itr)->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); } } DoCast(me, SPELL_TELEPORT_VISUAL); bRetreat = true; RetreatTimer = 1000; } WaitForTeleport = false; Teleported = true; }TeleportTimer -= diff; } if (!Teleported) return; Overrun = false;//execute once switch (faction) { case 0://alliance for (uint8 i = 0; i < 92; ++i)//summon fires me->SummonGameObject(FLAMEOBJECT, AllianceFirePos[i][0], AllianceFirePos[i][1], AllianceFirePos[i][2], AllianceFirePos[i][3], AllianceFirePos[i][4], AllianceFirePos[i][5], AllianceFirePos[i][6], AllianceFirePos[i][7], 0); for (uint8 i = 0; i < 25; ++i)//summon 25 ghouls { uint8 r = rand()%4; Creature* unit = me->SummonCreature(GHOUL, AllianceBase[r][0]+irand(-15, 15), AllianceBase[r][1]+irand(-15, 15), AllianceBase[r][2], 0, TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } for (uint8 i = 0; i < 3; ++i)//summon 3 abominations { uint8 r = rand()%4; Creature* unit = me->SummonCreature(ABOMINATION, AllianceBase[r][0]+irand(-15, 15), AllianceBase[r][1]+irand(-15, 15), AllianceBase[r][2], 0, TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } for (uint8 i = 0; i < 5; ++i)//summon 5 gargoyles { Creature* unit = me->SummonCreature(GARGOYLE, AllianceOverrunGargPos[i][0], AllianceOverrunGargPos[i][1], AllianceOverrunGargPos[i][2], AllianceOverrunGargPos[i][3], TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { unit->SetHomePosition(AllianceOverrunGargPos[i][0], AllianceOverrunGargPos[i][1], AllianceOverrunGargPos[i][2], AllianceOverrunGargPos[i][3]); CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } break; case 1://horde for (uint8 i = 0; i < 65; ++i)//summon fires me->SummonGameObject(FLAMEOBJECT, HordeFirePos[i][0], HordeFirePos[i][1], HordeFirePos[i][2], HordeFirePos[i][3], HordeFirePos[i][4], HordeFirePos[i][5], HordeFirePos[i][6], HordeFirePos[i][7], 0); for (uint8 i = 0; i < 26; ++i)//summon infernals { Creature* unit = me->SummonCreature(GIANT_INFERNAL, InfernalSPWP[i][0], InfernalSPWP[i][1], InfernalSPWP[i][2], InfernalSPWP[i][3], TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { unit->SetHomePosition(InfernalSPWP[i][0], InfernalSPWP[i][1], InfernalSPWP[i][2], InfernalSPWP[i][3]); CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } for (uint8 i = 0; i < 25; ++i)//summon 25 ghouls { uint8 r = rand()%4; Creature* unit = me->SummonCreature(GHOUL, HordeBase[r][0]+irand(-15, 15), HordeBase[r][1]+irand(-15, 15), HordeBase[r][2], 0, TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } for (uint8 i = 0; i < 5; ++i)//summon 5 abominations { uint8 r = rand()%4; Creature* unit = me->SummonCreature(ABOMINATION, HordeBase[r][0]+irand(-15, 15), HordeBase[r][1]+irand(-15, 15), HordeBase[r][2], 0, TEMPSUMMON_MANUAL_DESPAWN, 2*60*1000); if (unit) { CAST_AI(hyjal_trashAI, unit->AI())->faction = Faction; CAST_AI(hyjal_trashAI, unit->AI())->IsOverrun = true; CAST_AI(hyjal_trashAI, unit->AI())->OverrunType = i; unit->setActive(true); } } break; } }
gpl-2.0
tangyiyong/collectd
src/amqp.c
18
35401
/** * collectd - src/amqp.c * Copyright (C) 2009 Sebastien Pahl * Copyright (C) 2010-2012 Florian Forster * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Sebastien Pahl <sebastien.pahl at dotcloud.com> * Florian Forster <octo at collectd.org> **/ #include "collectd.h" #include "common.h" #include "plugin.h" #include "utils_cmd_putval.h" #include "utils_format_json.h" #include "utils_format_graphite.h" #include <pthread.h> #include <amqp.h> #include <amqp_framing.h> #ifdef HAVE_AMQP_TCP_SOCKET_H # include <amqp_tcp_socket.h> #endif #ifdef HAVE_AMQP_SOCKET_H # include <amqp_socket.h> #endif #ifdef HAVE_AMQP_TCP_SOCKET #if defined HAVE_DECL_AMQP_SOCKET_CLOSE && !HAVE_DECL_AMQP_SOCKET_CLOSE /* rabbitmq-c does not currently ship amqp_socket.h * and, thus, does not define this function. */ int amqp_socket_close(amqp_socket_t *); #endif #endif /* Defines for the delivery mode. I have no idea why they're not defined by the * library.. */ #define CAMQP_DM_VOLATILE 1 #define CAMQP_DM_PERSISTENT 2 #define CAMQP_FORMAT_COMMAND 1 #define CAMQP_FORMAT_JSON 2 #define CAMQP_FORMAT_GRAPHITE 3 #define CAMQP_CHANNEL 1 /* * Data types */ struct camqp_config_s { _Bool publish; char *name; char *host; int port; char *vhost; char *user; char *password; char *exchange; char *routing_key; /* Number of seconds to wait before connection is retried */ int connection_retry_delay; /* publish only */ uint8_t delivery_mode; _Bool store_rates; int format; /* publish & graphite format only */ char *prefix; char *postfix; char escape_char; unsigned int graphite_flags; /* subscribe only */ char *exchange_type; char *queue; _Bool queue_durable; _Bool queue_auto_delete; amqp_connection_state_t connection; pthread_mutex_t lock; }; typedef struct camqp_config_s camqp_config_t; /* * Global variables */ static const char *def_host = "localhost"; static const char *def_vhost = "/"; static const char *def_user = "guest"; static const char *def_password = "guest"; static const char *def_exchange = "amq.fanout"; static pthread_t *subscriber_threads = NULL; static size_t subscriber_threads_num = 0; static _Bool subscriber_threads_running = 1; #define CONF(c,f) (((c)->f != NULL) ? (c)->f : def_##f) /* * Functions */ static void camqp_close_connection (camqp_config_t *conf) /* {{{ */ { int sockfd; if ((conf == NULL) || (conf->connection == NULL)) return; sockfd = amqp_get_sockfd (conf->connection); amqp_channel_close (conf->connection, CAMQP_CHANNEL, AMQP_REPLY_SUCCESS); amqp_connection_close (conf->connection, AMQP_REPLY_SUCCESS); amqp_destroy_connection (conf->connection); close (sockfd); conf->connection = NULL; } /* }}} void camqp_close_connection */ static void camqp_config_free (void *ptr) /* {{{ */ { camqp_config_t *conf = ptr; if (conf == NULL) return; camqp_close_connection (conf); sfree (conf->name); sfree (conf->host); sfree (conf->vhost); sfree (conf->user); sfree (conf->password); sfree (conf->exchange); sfree (conf->exchange_type); sfree (conf->queue); sfree (conf->routing_key); sfree (conf->prefix); sfree (conf->postfix); sfree (conf); } /* }}} void camqp_config_free */ static char *camqp_bytes_cstring (amqp_bytes_t *in) /* {{{ */ { char *ret; if ((in == NULL) || (in->bytes == NULL)) return (NULL); ret = malloc (in->len + 1); if (ret == NULL) return (NULL); memcpy (ret, in->bytes, in->len); ret[in->len] = 0; return (ret); } /* }}} char *camqp_bytes_cstring */ static _Bool camqp_is_error (camqp_config_t *conf) /* {{{ */ { amqp_rpc_reply_t r; r = amqp_get_rpc_reply (conf->connection); if (r.reply_type == AMQP_RESPONSE_NORMAL) return (0); return (1); } /* }}} _Bool camqp_is_error */ static char *camqp_strerror (camqp_config_t *conf, /* {{{ */ char *buffer, size_t buffer_size) { amqp_rpc_reply_t r; r = amqp_get_rpc_reply (conf->connection); switch (r.reply_type) { case AMQP_RESPONSE_NORMAL: sstrncpy (buffer, "Success", sizeof (buffer)); break; case AMQP_RESPONSE_NONE: sstrncpy (buffer, "Missing RPC reply type", sizeof (buffer)); break; case AMQP_RESPONSE_LIBRARY_EXCEPTION: #if HAVE_AMQP_RPC_REPLY_T_LIBRARY_ERRNO if (r.library_errno) return (sstrerror (r.library_errno, buffer, buffer_size)); #else if (r.library_error) return (sstrerror (r.library_error, buffer, buffer_size)); #endif else sstrncpy (buffer, "End of stream", sizeof (buffer)); break; case AMQP_RESPONSE_SERVER_EXCEPTION: if (r.reply.id == AMQP_CONNECTION_CLOSE_METHOD) { amqp_connection_close_t *m = r.reply.decoded; char *tmp = camqp_bytes_cstring (&m->reply_text); ssnprintf (buffer, buffer_size, "Server connection error %d: %s", m->reply_code, tmp); sfree (tmp); } else if (r.reply.id == AMQP_CHANNEL_CLOSE_METHOD) { amqp_channel_close_t *m = r.reply.decoded; char *tmp = camqp_bytes_cstring (&m->reply_text); ssnprintf (buffer, buffer_size, "Server channel error %d: %s", m->reply_code, tmp); sfree (tmp); } else { ssnprintf (buffer, buffer_size, "Server error method %#"PRIx32, r.reply.id); } break; default: ssnprintf (buffer, buffer_size, "Unknown reply type %i", (int) r.reply_type); } return (buffer); } /* }}} char *camqp_strerror */ #if HAVE_AMQP_RPC_REPLY_T_LIBRARY_ERRNO static int camqp_create_exchange (camqp_config_t *conf) /* {{{ */ { amqp_exchange_declare_ok_t *ed_ret; if (conf->exchange_type == NULL) return (0); ed_ret = amqp_exchange_declare (conf->connection, /* channel = */ CAMQP_CHANNEL, /* exchange = */ amqp_cstring_bytes (conf->exchange), /* type = */ amqp_cstring_bytes (conf->exchange_type), /* passive = */ 0, /* durable = */ 0, /* auto_delete = */ 1, /* arguments = */ AMQP_EMPTY_TABLE); if ((ed_ret == NULL) && camqp_is_error (conf)) { char errbuf[1024]; ERROR ("amqp plugin: amqp_exchange_declare failed: %s", camqp_strerror (conf, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (-1); } INFO ("amqp plugin: Successfully created exchange \"%s\" " "with type \"%s\".", conf->exchange, conf->exchange_type); return (0); } /* }}} int camqp_create_exchange */ #else static int camqp_create_exchange (camqp_config_t *conf) /* {{{ */ { amqp_exchange_declare_ok_t *ed_ret; amqp_table_t argument_table; struct amqp_table_entry_t_ argument_table_entries[1]; if (conf->exchange_type == NULL) return (0); /* Valid arguments: "auto_delete", "internal" */ argument_table.num_entries = STATIC_ARRAY_SIZE (argument_table_entries); argument_table.entries = argument_table_entries; argument_table_entries[0].key = amqp_cstring_bytes ("auto_delete"); argument_table_entries[0].value.kind = AMQP_FIELD_KIND_BOOLEAN; argument_table_entries[0].value.value.boolean = 1; ed_ret = amqp_exchange_declare (conf->connection, /* channel = */ CAMQP_CHANNEL, /* exchange = */ amqp_cstring_bytes (conf->exchange), /* type = */ amqp_cstring_bytes (conf->exchange_type), /* passive = */ 0, /* durable = */ 0, #if defined(AMQP_VERSION) && AMQP_VERSION >= 0x00060000 /* auto delete = */ 0, /* internal = */ 0, #endif /* arguments = */ argument_table); if ((ed_ret == NULL) && camqp_is_error (conf)) { char errbuf[1024]; ERROR ("amqp plugin: amqp_exchange_declare failed: %s", camqp_strerror (conf, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (-1); } INFO ("amqp plugin: Successfully created exchange \"%s\" " "with type \"%s\".", conf->exchange, conf->exchange_type); return (0); } /* }}} int camqp_create_exchange */ #endif static int camqp_setup_queue (camqp_config_t *conf) /* {{{ */ { amqp_queue_declare_ok_t *qd_ret; amqp_basic_consume_ok_t *cm_ret; qd_ret = amqp_queue_declare (conf->connection, /* channel = */ CAMQP_CHANNEL, /* queue = */ (conf->queue != NULL) ? amqp_cstring_bytes (conf->queue) : AMQP_EMPTY_BYTES, /* passive = */ 0, /* durable = */ conf->queue_durable, /* exclusive = */ 0, /* auto_delete = */ conf->queue_auto_delete, /* arguments = */ AMQP_EMPTY_TABLE); if (qd_ret == NULL) { ERROR ("amqp plugin: amqp_queue_declare failed."); camqp_close_connection (conf); return (-1); } if (conf->queue == NULL) { conf->queue = camqp_bytes_cstring (&qd_ret->queue); if (conf->queue == NULL) { ERROR ("amqp plugin: camqp_bytes_cstring failed."); camqp_close_connection (conf); return (-1); } INFO ("amqp plugin: Created queue \"%s\".", conf->queue); } DEBUG ("amqp plugin: Successfully created queue \"%s\".", conf->queue); /* bind to an exchange */ if (conf->exchange != NULL) { amqp_queue_bind_ok_t *qb_ret; assert (conf->queue != NULL); qb_ret = amqp_queue_bind (conf->connection, /* channel = */ CAMQP_CHANNEL, /* queue = */ amqp_cstring_bytes (conf->queue), /* exchange = */ amqp_cstring_bytes (conf->exchange), /* routing_key = */ (conf->routing_key != NULL) ? amqp_cstring_bytes (conf->routing_key) : AMQP_EMPTY_BYTES, /* arguments = */ AMQP_EMPTY_TABLE); if ((qb_ret == NULL) && camqp_is_error (conf)) { char errbuf[1024]; ERROR ("amqp plugin: amqp_queue_bind failed: %s", camqp_strerror (conf, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (-1); } DEBUG ("amqp plugin: Successfully bound queue \"%s\" to exchange \"%s\".", conf->queue, conf->exchange); } /* if (conf->exchange != NULL) */ cm_ret = amqp_basic_consume (conf->connection, /* channel = */ CAMQP_CHANNEL, /* queue = */ amqp_cstring_bytes (conf->queue), /* consumer_tag = */ AMQP_EMPTY_BYTES, /* no_local = */ 0, /* no_ack = */ 1, /* exclusive = */ 0, /* arguments = */ AMQP_EMPTY_TABLE ); if ((cm_ret == NULL) && camqp_is_error (conf)) { char errbuf[1024]; ERROR ("amqp plugin: amqp_basic_consume failed: %s", camqp_strerror (conf, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (-1); } return (0); } /* }}} int camqp_setup_queue */ static int camqp_connect (camqp_config_t *conf) /* {{{ */ { static time_t last_connect_time = 0; amqp_rpc_reply_t reply; int status; #ifdef HAVE_AMQP_TCP_SOCKET amqp_socket_t *socket; #else int sockfd; #endif if (conf->connection != NULL) return (0); time_t now = time(NULL); if (now < (last_connect_time + conf->connection_retry_delay)) { DEBUG("amqp plugin: skipping connection retry, " "ConnectionRetryDelay: %d", conf->connection_retry_delay); return(1); } else { DEBUG ("amqp plugin: retrying connection"); last_connect_time = now; } conf->connection = amqp_new_connection (); if (conf->connection == NULL) { ERROR ("amqp plugin: amqp_new_connection failed."); return (ENOMEM); } #ifdef HAVE_AMQP_TCP_SOCKET # define CLOSE_SOCKET() /* amqp_destroy_connection() closes the socket for us */ /* TODO: add support for SSL using amqp_ssl_socket_new * and related functions */ socket = amqp_tcp_socket_new (conf->connection); if (! socket) { ERROR ("amqp plugin: amqp_tcp_socket_new failed."); amqp_destroy_connection (conf->connection); conf->connection = NULL; return (ENOMEM); } status = amqp_socket_open (socket, CONF(conf, host), conf->port); if (status < 0) { char errbuf[1024]; status *= -1; ERROR ("amqp plugin: amqp_socket_open failed: %s", sstrerror (status, errbuf, sizeof (errbuf))); amqp_destroy_connection (conf->connection); conf->connection = NULL; return (status); } #else /* HAVE_AMQP_TCP_SOCKET */ # define CLOSE_SOCKET() close(sockfd) /* this interface is deprecated as of rabbitmq-c 0.4 */ sockfd = amqp_open_socket (CONF(conf, host), conf->port); if (sockfd < 0) { char errbuf[1024]; status = (-1) * sockfd; ERROR ("amqp plugin: amqp_open_socket failed: %s", sstrerror (status, errbuf, sizeof (errbuf))); amqp_destroy_connection (conf->connection); conf->connection = NULL; return (status); } amqp_set_sockfd (conf->connection, sockfd); #endif reply = amqp_login (conf->connection, CONF(conf, vhost), /* channel max = */ 0, /* frame max = */ 131072, /* heartbeat = */ 0, /* authentication = */ AMQP_SASL_METHOD_PLAIN, CONF(conf, user), CONF(conf, password)); if (reply.reply_type != AMQP_RESPONSE_NORMAL) { ERROR ("amqp plugin: amqp_login (vhost = %s, user = %s) failed.", CONF(conf, vhost), CONF(conf, user)); amqp_destroy_connection (conf->connection); CLOSE_SOCKET (); conf->connection = NULL; return (1); } amqp_channel_open (conf->connection, /* channel = */ 1); /* FIXME: Is checking "reply.reply_type" really correct here? How does * it get set? --octo */ if (reply.reply_type != AMQP_RESPONSE_NORMAL) { ERROR ("amqp plugin: amqp_channel_open failed."); amqp_connection_close (conf->connection, AMQP_REPLY_SUCCESS); amqp_destroy_connection (conf->connection); CLOSE_SOCKET (); conf->connection = NULL; return (1); } INFO ("amqp plugin: Successfully opened connection to vhost \"%s\" " "on %s:%i.", CONF(conf, vhost), CONF(conf, host), conf->port); status = camqp_create_exchange (conf); if (status != 0) return (status); if (!conf->publish) return (camqp_setup_queue (conf)); return (0); } /* }}} int camqp_connect */ static int camqp_shutdown (void) /* {{{ */ { size_t i; DEBUG ("amqp plugin: Shutting down %zu subscriber threads.", subscriber_threads_num); subscriber_threads_running = 0; for (i = 0; i < subscriber_threads_num; i++) { /* FIXME: Sending a signal is not very elegant here. Maybe find out how * to use a timeout in the thread and check for the variable in regular * intervals. */ pthread_kill (subscriber_threads[i], SIGTERM); pthread_join (subscriber_threads[i], /* retval = */ NULL); } subscriber_threads_num = 0; sfree (subscriber_threads); DEBUG ("amqp plugin: All subscriber threads exited."); return (0); } /* }}} int camqp_shutdown */ /* * Subscribing code */ static int camqp_read_body (camqp_config_t *conf, /* {{{ */ size_t body_size, const char *content_type) { char body[body_size + 1]; char *body_ptr; size_t received; amqp_frame_t frame; int status; memset (body, 0, sizeof (body)); body_ptr = &body[0]; received = 0; while (received < body_size) { status = amqp_simple_wait_frame (conf->connection, &frame); if (status < 0) { char errbuf[1024]; status = (-1) * status; ERROR ("amqp plugin: amqp_simple_wait_frame failed: %s", sstrerror (status, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (status); } if (frame.frame_type != AMQP_FRAME_BODY) { NOTICE ("amqp plugin: Unexpected frame type: %#"PRIx8, frame.frame_type); return (-1); } if ((body_size - received) < frame.payload.body_fragment.len) { WARNING ("amqp plugin: Body is larger than indicated by header."); return (-1); } memcpy (body_ptr, frame.payload.body_fragment.bytes, frame.payload.body_fragment.len); body_ptr += frame.payload.body_fragment.len; received += frame.payload.body_fragment.len; } /* while (received < body_size) */ if (strcasecmp ("text/collectd", content_type) == 0) { status = handle_putval (stderr, body); if (status != 0) ERROR ("amqp plugin: handle_putval failed with status %i.", status); return (status); } else if (strcasecmp ("application/json", content_type) == 0) { ERROR ("amqp plugin: camqp_read_body: Parsing JSON data has not " "been implemented yet. FIXME!"); return (0); } else { ERROR ("amqp plugin: camqp_read_body: Unknown content type \"%s\".", content_type); return (EINVAL); } /* not reached */ return (0); } /* }}} int camqp_read_body */ static int camqp_read_header (camqp_config_t *conf) /* {{{ */ { int status; amqp_frame_t frame; amqp_basic_properties_t *properties; char *content_type; status = amqp_simple_wait_frame (conf->connection, &frame); if (status < 0) { char errbuf[1024]; status = (-1) * status; ERROR ("amqp plugin: amqp_simple_wait_frame failed: %s", sstrerror (status, errbuf, sizeof (errbuf))); camqp_close_connection (conf); return (status); } if (frame.frame_type != AMQP_FRAME_HEADER) { NOTICE ("amqp plugin: Unexpected frame type: %#"PRIx8, frame.frame_type); return (-1); } properties = frame.payload.properties.decoded; content_type = camqp_bytes_cstring (&properties->content_type); if (content_type == NULL) { ERROR ("amqp plugin: Unable to determine content type."); return (-1); } status = camqp_read_body (conf, (size_t) frame.payload.properties.body_size, content_type); sfree (content_type); return (status); } /* }}} int camqp_read_header */ static void *camqp_subscribe_thread (void *user_data) /* {{{ */ { camqp_config_t *conf = user_data; int status; cdtime_t interval = plugin_get_interval (); while (subscriber_threads_running) { amqp_frame_t frame; status = camqp_connect (conf); if (status != 0) { struct timespec ts_interval; ERROR ("amqp plugin: camqp_connect failed. " "Will sleep for %.3f seconds.", CDTIME_T_TO_DOUBLE (interval)); CDTIME_T_TO_TIMESPEC (interval, &ts_interval); nanosleep (&ts_interval, /* remaining = */ NULL); continue; } status = amqp_simple_wait_frame (conf->connection, &frame); if (status < 0) { struct timespec ts_interval; ERROR ("amqp plugin: amqp_simple_wait_frame failed. " "Will sleep for %.3f seconds.", CDTIME_T_TO_DOUBLE (interval)); camqp_close_connection (conf); CDTIME_T_TO_TIMESPEC (interval, &ts_interval); nanosleep (&ts_interval, /* remaining = */ NULL); continue; } if (frame.frame_type != AMQP_FRAME_METHOD) { DEBUG ("amqp plugin: Unexpected frame type: %#"PRIx8, frame.frame_type); continue; } if (frame.payload.method.id != AMQP_BASIC_DELIVER_METHOD) { DEBUG ("amqp plugin: Unexpected method id: %#"PRIx32, frame.payload.method.id); continue; } camqp_read_header (conf); amqp_maybe_release_buffers (conf->connection); } /* while (subscriber_threads_running) */ camqp_config_free (conf); pthread_exit (NULL); return (NULL); } /* }}} void *camqp_subscribe_thread */ static int camqp_subscribe_init (camqp_config_t *conf) /* {{{ */ { int status; pthread_t *tmp; tmp = realloc (subscriber_threads, sizeof (*subscriber_threads) * (subscriber_threads_num + 1)); if (tmp == NULL) { ERROR ("amqp plugin: realloc failed."); camqp_config_free (conf); return (ENOMEM); } subscriber_threads = tmp; tmp = subscriber_threads + subscriber_threads_num; memset (tmp, 0, sizeof (*tmp)); status = plugin_thread_create (tmp, /* attr = */ NULL, camqp_subscribe_thread, conf); if (status != 0) { char errbuf[1024]; ERROR ("amqp plugin: pthread_create failed: %s", sstrerror (status, errbuf, sizeof (errbuf))); camqp_config_free (conf); return (status); } subscriber_threads_num++; return (0); } /* }}} int camqp_subscribe_init */ /* * Publishing code */ /* XXX: You must hold "conf->lock" when calling this function! */ static int camqp_write_locked (camqp_config_t *conf, /* {{{ */ const char *buffer, const char *routing_key) { amqp_basic_properties_t props; int status; status = camqp_connect (conf); if (status != 0) return (status); memset (&props, 0, sizeof (props)); props._flags = AMQP_BASIC_CONTENT_TYPE_FLAG | AMQP_BASIC_DELIVERY_MODE_FLAG | AMQP_BASIC_APP_ID_FLAG; if (conf->format == CAMQP_FORMAT_COMMAND) props.content_type = amqp_cstring_bytes("text/collectd"); else if (conf->format == CAMQP_FORMAT_JSON) props.content_type = amqp_cstring_bytes("application/json"); else if (conf->format == CAMQP_FORMAT_GRAPHITE) props.content_type = amqp_cstring_bytes("text/graphite"); else assert (23 == 42); props.delivery_mode = conf->delivery_mode; props.app_id = amqp_cstring_bytes("collectd"); status = amqp_basic_publish(conf->connection, /* channel = */ 1, amqp_cstring_bytes(CONF(conf, exchange)), amqp_cstring_bytes (routing_key), /* mandatory = */ 0, /* immediate = */ 0, &props, amqp_cstring_bytes(buffer)); if (status != 0) { ERROR ("amqp plugin: amqp_basic_publish failed with status %i.", status); camqp_close_connection (conf); } return (status); } /* }}} int camqp_write_locked */ static int camqp_write (const data_set_t *ds, const value_list_t *vl, /* {{{ */ user_data_t *user_data) { camqp_config_t *conf = user_data->data; char routing_key[6 * DATA_MAX_NAME_LEN]; char buffer[8192]; int status; if ((ds == NULL) || (vl == NULL) || (conf == NULL)) return (EINVAL); memset (buffer, 0, sizeof (buffer)); if (conf->routing_key != NULL) { sstrncpy (routing_key, conf->routing_key, sizeof (routing_key)); } else { size_t i; ssnprintf (routing_key, sizeof (routing_key), "collectd/%s/%s/%s/%s/%s", vl->host, vl->plugin, vl->plugin_instance, vl->type, vl->type_instance); /* Switch slashes (the only character forbidden by collectd) and dots * (the separation character used by AMQP). */ for (i = 0; routing_key[i] != 0; i++) { if (routing_key[i] == '.') routing_key[i] = '/'; else if (routing_key[i] == '/') routing_key[i] = '.'; } } if (conf->format == CAMQP_FORMAT_COMMAND) { status = create_putval (buffer, sizeof (buffer), ds, vl); if (status != 0) { ERROR ("amqp plugin: create_putval failed with status %i.", status); return (status); } } else if (conf->format == CAMQP_FORMAT_JSON) { size_t bfree = sizeof (buffer); size_t bfill = 0; format_json_initialize (buffer, &bfill, &bfree); format_json_value_list (buffer, &bfill, &bfree, ds, vl, conf->store_rates); format_json_finalize (buffer, &bfill, &bfree); } else if (conf->format == CAMQP_FORMAT_GRAPHITE) { status = format_graphite (buffer, sizeof (buffer), ds, vl, conf->prefix, conf->postfix, conf->escape_char, conf->graphite_flags); if (status != 0) { ERROR ("amqp plugin: format_graphite failed with status %i.", status); return (status); } } else { ERROR ("amqp plugin: Invalid format (%i).", conf->format); return (-1); } pthread_mutex_lock (&conf->lock); status = camqp_write_locked (conf, buffer, routing_key); pthread_mutex_unlock (&conf->lock); return (status); } /* }}} int camqp_write */ /* * Config handling */ static int camqp_config_set_format (oconfig_item_t *ci, /* {{{ */ camqp_config_t *conf) { char *string; int status; string = NULL; status = cf_util_get_string (ci, &string); if (status != 0) return (status); assert (string != NULL); if (strcasecmp ("Command", string) == 0) conf->format = CAMQP_FORMAT_COMMAND; else if (strcasecmp ("JSON", string) == 0) conf->format = CAMQP_FORMAT_JSON; else if (strcasecmp ("Graphite", string) == 0) conf->format = CAMQP_FORMAT_GRAPHITE; else { WARNING ("amqp plugin: Invalid format string: %s", string); } free (string); return (0); } /* }}} int config_set_string */ static int camqp_config_connection (oconfig_item_t *ci, /* {{{ */ _Bool publish) { camqp_config_t *conf; int status; int i; conf = malloc (sizeof (*conf)); if (conf == NULL) { ERROR ("amqp plugin: malloc failed."); return (ENOMEM); } /* Initialize "conf" {{{ */ memset (conf, 0, sizeof (*conf)); conf->publish = publish; conf->name = NULL; conf->format = CAMQP_FORMAT_COMMAND; conf->host = NULL; conf->port = 5672; conf->vhost = NULL; conf->user = NULL; conf->password = NULL; conf->exchange = NULL; conf->routing_key = NULL; conf->connection_retry_delay = 0; /* publish only */ conf->delivery_mode = CAMQP_DM_VOLATILE; conf->store_rates = 0; conf->graphite_flags = 0; /* publish & graphite only */ conf->prefix = NULL; conf->postfix = NULL; conf->escape_char = '_'; /* subscribe only */ conf->exchange_type = NULL; conf->queue = NULL; conf->queue_durable = 0; conf->queue_auto_delete = 1; /* general */ conf->connection = NULL; pthread_mutex_init (&conf->lock, /* attr = */ NULL); /* }}} */ status = cf_util_get_string (ci, &conf->name); if (status != 0) { sfree (conf); return (status); } for (i = 0; i < ci->children_num; i++) { oconfig_item_t *child = ci->children + i; if (strcasecmp ("Host", child->key) == 0) status = cf_util_get_string (child, &conf->host); else if (strcasecmp ("Port", child->key) == 0) { status = cf_util_get_port_number (child); if (status > 0) { conf->port = status; status = 0; } } else if (strcasecmp ("VHost", child->key) == 0) status = cf_util_get_string (child, &conf->vhost); else if (strcasecmp ("User", child->key) == 0) status = cf_util_get_string (child, &conf->user); else if (strcasecmp ("Password", child->key) == 0) status = cf_util_get_string (child, &conf->password); else if (strcasecmp ("Exchange", child->key) == 0) status = cf_util_get_string (child, &conf->exchange); else if ((strcasecmp ("ExchangeType", child->key) == 0) && !publish) status = cf_util_get_string (child, &conf->exchange_type); else if ((strcasecmp ("Queue", child->key) == 0) && !publish) status = cf_util_get_string (child, &conf->queue); else if ((strcasecmp ("QueueDurable", child->key) == 0) && !publish) status = cf_util_get_boolean (child, &conf->queue_durable); else if ((strcasecmp ("QueueAutoDelete", child->key) == 0) && !publish) status = cf_util_get_boolean (child, &conf->queue_auto_delete); else if (strcasecmp ("RoutingKey", child->key) == 0) status = cf_util_get_string (child, &conf->routing_key); else if ((strcasecmp ("Persistent", child->key) == 0) && publish) { _Bool tmp = 0; status = cf_util_get_boolean (child, &tmp); if (tmp) conf->delivery_mode = CAMQP_DM_PERSISTENT; else conf->delivery_mode = CAMQP_DM_VOLATILE; } else if ((strcasecmp ("StoreRates", child->key) == 0) && publish) { status = cf_util_get_boolean (child, &conf->store_rates); (void) cf_util_get_flag (child, &conf->graphite_flags, GRAPHITE_STORE_RATES); } else if ((strcasecmp ("Format", child->key) == 0) && publish) status = camqp_config_set_format (child, conf); else if ((strcasecmp ("GraphiteSeparateInstances", child->key) == 0) && publish) status = cf_util_get_flag (child, &conf->graphite_flags, GRAPHITE_SEPARATE_INSTANCES); else if ((strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) && publish) status = cf_util_get_flag (child, &conf->graphite_flags, GRAPHITE_ALWAYS_APPEND_DS); else if ((strcasecmp ("GraphitePrefix", child->key) == 0) && publish) status = cf_util_get_string (child, &conf->prefix); else if ((strcasecmp ("GraphitePostfix", child->key) == 0) && publish) status = cf_util_get_string (child, &conf->postfix); else if ((strcasecmp ("GraphiteEscapeChar", child->key) == 0) && publish) { char *tmp_buff = NULL; status = cf_util_get_string (child, &tmp_buff); if (strlen (tmp_buff) > 1) WARNING ("amqp plugin: The option \"GraphiteEscapeChar\" handles " "only one character. Others will be ignored."); conf->escape_char = tmp_buff[0]; sfree (tmp_buff); } else if (strcasecmp ("ConnectionRetryDelay", child->key) == 0) status = cf_util_get_int (child, &conf->connection_retry_delay); else WARNING ("amqp plugin: Ignoring unknown " "configuration option \"%s\".", child->key); if (status != 0) break; } /* for (i = 0; i < ci->children_num; i++) */ if ((status == 0) && (conf->exchange == NULL)) { if (conf->exchange_type != NULL) WARNING ("amqp plugin: The option \"ExchangeType\" was given " "without the \"Exchange\" option. It will be ignored."); if (!publish && (conf->routing_key != NULL)) WARNING ("amqp plugin: The option \"RoutingKey\" was given " "without the \"Exchange\" option. It will be ignored."); } if (status != 0) { camqp_config_free (conf); return (status); } if (conf->exchange != NULL) { DEBUG ("amqp plugin: camqp_config_connection: exchange = %s;", conf->exchange); } if (publish) { char cbname[128]; user_data_t ud = { conf, camqp_config_free }; ssnprintf (cbname, sizeof (cbname), "amqp/%s", conf->name); status = plugin_register_write (cbname, camqp_write, &ud); if (status != 0) { camqp_config_free (conf); return (status); } } else { status = camqp_subscribe_init (conf); if (status != 0) { camqp_config_free (conf); return (status); } } return (0); } /* }}} int camqp_config_connection */ static int camqp_config (oconfig_item_t *ci) /* {{{ */ { int i; for (i = 0; i < ci->children_num; i++) { oconfig_item_t *child = ci->children + i; if (strcasecmp ("Publish", child->key) == 0) camqp_config_connection (child, /* publish = */ 1); else if (strcasecmp ("Subscribe", child->key) == 0) camqp_config_connection (child, /* publish = */ 0); else WARNING ("amqp plugin: Ignoring unknown config option \"%s\".", child->key); } /* for (ci->children_num) */ return (0); } /* }}} int camqp_config */ void module_register (void) { plugin_register_complex_config ("amqp", camqp_config); plugin_register_shutdown ("amqp", camqp_shutdown); } /* void module_register */ /* vim: set sw=4 sts=4 et fdm=marker : */
gpl-2.0
houzhenggang/bcm63xx-next
net/netfilter/xt_hashlimit.c
274
25307
/* * xt_hashlimit - Netfilter module to limit the number of packets per time * separately for each hashbucket (sourceip/sourceport/dstip/dstport) * * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * * Development of this code was funded by Astaro AG, http://www.astaro.com/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <linux/in.h> #include <linux/ip.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #include <linux/ipv6.h> #include <net/ipv6.h> #endif #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/xt_hashlimit.h> #include <linux/mutex.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); MODULE_ALIAS("ipt_hashlimit"); MODULE_ALIAS("ip6t_hashlimit"); struct hashlimit_net { struct hlist_head htables; struct proc_dir_entry *ipt_hashlimit; struct proc_dir_entry *ip6t_hashlimit; }; static int hashlimit_net_id; static inline struct hashlimit_net *hashlimit_pernet(struct net *net) { return net_generic(net, hashlimit_net_id); } /* need to declare this at the top */ static const struct file_operations dl_file_ops; /* hash table crap */ struct dsthash_dst { union { struct { __be32 src; __be32 dst; } ip; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) struct { __be32 src[4]; __be32 dst[4]; } ip6; #endif }; __be16 src_port; __be16 dst_port; }; struct dsthash_ent { /* static / read-only parts in the beginning */ struct hlist_node node; struct dsthash_dst dst; /* modified structure members in the end */ spinlock_t lock; unsigned long expires; /* precalculated expiry time */ struct { unsigned long prev; /* last modification */ u_int32_t credit; u_int32_t credit_cap, cost; } rateinfo; struct rcu_head rcu; }; struct xt_hashlimit_htable { struct hlist_node node; /* global list of all htables */ int use; u_int8_t family; bool rnd_initialized; struct hashlimit_cfg1 cfg; /* config */ /* used internally */ spinlock_t lock; /* lock for list_head */ u_int32_t rnd; /* random seed for hash */ unsigned int count; /* number entries in table */ struct timer_list timer; /* timer for gc */ /* seq_file stuff */ struct proc_dir_entry *pde; const char *name; struct net *net; struct hlist_head hash[0]; /* hashtable itself */ }; static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */ static struct kmem_cache *hashlimit_cachep __read_mostly; static inline bool dst_cmp(const struct dsthash_ent *ent, const struct dsthash_dst *b) { return !memcmp(&ent->dst, b, sizeof(ent->dst)); } static u_int32_t hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) { u_int32_t hash = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), ht->rnd); /* * Instead of returning hash % ht->cfg.size (implying a divide) * we return the high 32 bits of the (hash * ht->cfg.size) that will * give results between [0 and cfg.size-1] and same hash distribution, * but using a multiply, less expensive than a divide */ return ((u64)hash * ht->cfg.size) >> 32; } static struct dsthash_ent * dsthash_find(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) { struct dsthash_ent *ent; u_int32_t hash = hash_dst(ht, dst); if (!hlist_empty(&ht->hash[hash])) { hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) if (dst_cmp(ent, dst)) { spin_lock(&ent->lock); return ent; } } return NULL; } /* allocate dsthash_ent, initialize dst, put in htable and lock it */ static struct dsthash_ent * dsthash_alloc_init(struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst, bool *race) { struct dsthash_ent *ent; spin_lock(&ht->lock); /* Two or more packets may race to create the same entry in the * hashtable, double check if this packet lost race. */ ent = dsthash_find(ht, dst); if (ent != NULL) { spin_unlock(&ht->lock); *race = true; return ent; } /* initialize hash with random val at the time we allocate * the first hashtable entry */ if (unlikely(!ht->rnd_initialized)) { get_random_bytes(&ht->rnd, sizeof(ht->rnd)); ht->rnd_initialized = true; } if (ht->cfg.max && ht->count >= ht->cfg.max) { /* FIXME: do something. question is what.. */ net_err_ratelimited("max count of %u reached\n", ht->cfg.max); ent = NULL; } else ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); if (ent) { memcpy(&ent->dst, dst, sizeof(ent->dst)); spin_lock_init(&ent->lock); spin_lock(&ent->lock); hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); ht->count++; } spin_unlock(&ht->lock); return ent; } static void dsthash_free_rcu(struct rcu_head *head) { struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); kmem_cache_free(hashlimit_cachep, ent); } static inline void dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) { hlist_del_rcu(&ent->node); call_rcu_bh(&ent->rcu, dsthash_free_rcu); ht->count--; } static void htable_gc(unsigned long htlong); static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; unsigned int size; unsigned int i; if (minfo->cfg.size) { size = minfo->cfg.size; } else { size = (totalram_pages << PAGE_SHIFT) / 16384 / sizeof(struct list_head); if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) size = 8192; if (size < 16) size = 16; } /* FIXME: don't use vmalloc() here or anywhere else -HW */ hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + sizeof(struct list_head) * size); if (hinfo == NULL) return -ENOMEM; minfo->hinfo = hinfo; /* copy match config into hashtable config */ memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg)); hinfo->cfg.size = size; if (hinfo->cfg.max == 0) hinfo->cfg.max = 8 * hinfo->cfg.size; else if (hinfo->cfg.max < hinfo->cfg.size) hinfo->cfg.max = hinfo->cfg.size; for (i = 0; i < hinfo->cfg.size; i++) INIT_HLIST_HEAD(&hinfo->hash[i]); hinfo->use = 1; hinfo->count = 0; hinfo->family = family; hinfo->rnd_initialized = false; hinfo->name = kstrdup(minfo->name, GFP_KERNEL); if (!hinfo->name) { vfree(hinfo); return -ENOMEM; } spin_lock_init(&hinfo->lock); hinfo->pde = proc_create_data(minfo->name, 0, (family == NFPROTO_IPV4) ? hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, &dl_file_ops, hinfo); if (hinfo->pde == NULL) { kfree(hinfo->name); vfree(hinfo); return -ENOMEM; } hinfo->net = net; setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); add_timer(&hinfo->timer); hlist_add_head(&hinfo->node, &hashlimit_net->htables); return 0; } static bool select_all(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he) { return 1; } static bool select_gc(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he) { return time_after_eq(jiffies, he->expires); } static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool (*select)(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he)) { unsigned int i; /* lock hash table and iterate over it */ spin_lock_bh(&ht->lock); for (i = 0; i < ht->cfg.size; i++) { struct dsthash_ent *dh; struct hlist_node *n; hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { if ((*select)(ht, dh)) dsthash_free(ht, dh); } } spin_unlock_bh(&ht->lock); } /* hash table garbage collector, run by timer */ static void htable_gc(unsigned long htlong) { struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong; htable_selective_cleanup(ht, select_gc); /* re-add the timer accordingly */ ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval); add_timer(&ht->timer); } static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); struct proc_dir_entry *parent; if (hinfo->family == NFPROTO_IPV4) parent = hashlimit_net->ipt_hashlimit; else parent = hashlimit_net->ip6t_hashlimit; if (parent != NULL) remove_proc_entry(hinfo->name, parent); } static void htable_destroy(struct xt_hashlimit_htable *hinfo) { del_timer_sync(&hinfo->timer); htable_remove_proc_entry(hinfo); htable_selective_cleanup(hinfo, select_all); kfree(hinfo->name); vfree(hinfo); } static struct xt_hashlimit_htable *htable_find_get(struct net *net, const char *name, u_int8_t family) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { if (!strcmp(name, hinfo->name) && hinfo->family == family) { hinfo->use++; return hinfo; } } return NULL; } static void htable_put(struct xt_hashlimit_htable *hinfo) { mutex_lock(&hashlimit_mutex); if (--hinfo->use == 0) { hlist_del(&hinfo->node); htable_destroy(hinfo); } mutex_unlock(&hashlimit_mutex); } /* The algorithm used is the Simple Token Bucket Filter (TBF) * see net/sched/sch_tbf.c in the linux source tree */ /* Rusty: This is my (non-mathematically-inclined) understanding of this algorithm. The `average rate' in jiffies becomes your initial amount of credit `credit' and the most credit you can ever have `credit_cap'. The `peak rate' becomes the cost of passing the test, `cost'. `prev' tracks the last packet hit: you gain one credit per jiffy. If you get credit balance more than this, the extra credit is discarded. Every time the match passes, you lose `cost' credits; if you don't have that many, the test fails. See Alexey's formal explanation in net/sched/sch_tbf.c. To get the maximum range, we multiply by this factor (ie. you get N credits per jiffy). We want to allow a rate as low as 1 per day (slowest userspace tool allows), which means CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. */ #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24)) /* Repeated shift and or gives us all 1s, final shift and add 1 gives * us the power of 2 below the theoretical max, so GCC simply does a * shift. */ #define _POW2_BELOW2(x) ((x)|((x)>>1)) #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2)) #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4)) #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8)) #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16)) #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) /* in byte mode, the lowest possible rate is one packet/second. * credit_cap is used as a counter that tells us how many times we can * refill the "credits available" counter when it becomes empty. */ #define MAX_CPJ_BYTES (0xFFFFFFFF / HZ) #define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES) static u32 xt_hashlimit_len_to_chunks(u32 len) { return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1; } /* Precision saver. */ static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) /* Divide first. */ return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } static u32 user2credits_byte(u32 user) { u64 us = user; us *= HZ * CREDITS_PER_JIFFY_BYTES; return (u32) (us >> 32); } static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode) { unsigned long delta = now - dh->rateinfo.prev; u32 cap; if (delta == 0) return; dh->rateinfo.prev = now; if (mode & XT_HASHLIMIT_BYTES) { u32 tmp = dh->rateinfo.credit; dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta; cap = CREDITS_PER_JIFFY_BYTES * HZ; if (tmp >= dh->rateinfo.credit) {/* overflow */ dh->rateinfo.credit = cap; return; } } else { dh->rateinfo.credit += delta * CREDITS_PER_JIFFY; cap = dh->rateinfo.credit_cap; } if (dh->rateinfo.credit > cap) dh->rateinfo.credit = cap; } static void rateinfo_init(struct dsthash_ent *dh, struct xt_hashlimit_htable *hinfo) { dh->rateinfo.prev = jiffies; if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); dh->rateinfo.credit_cap = hinfo->cfg.burst; } else { dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); dh->rateinfo.cost = user2credits(hinfo->cfg.avg); dh->rateinfo.credit_cap = dh->rateinfo.credit; } } static inline __be32 maskl(__be32 a, unsigned int l) { return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; } #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) { switch (p) { case 0 ... 31: i[0] = maskl(i[0], p); i[1] = i[2] = i[3] = 0; break; case 32 ... 63: i[1] = maskl(i[1], p - 32); i[2] = i[3] = 0; break; case 64 ... 95: i[2] = maskl(i[2], p - 64); i[3] = 0; break; case 96 ... 127: i[3] = maskl(i[3], p - 96); break; case 128: break; } } #endif static int hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, const struct sk_buff *skb, unsigned int protoff) { __be16 _ports[2], *ports; u8 nexthdr; int poff; memset(dst, 0, sizeof(*dst)); switch (hinfo->family) { case NFPROTO_IPV4: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) dst->ip.dst = maskl(ip_hdr(skb)->daddr, hinfo->cfg.dstmask); if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) dst->ip.src = maskl(ip_hdr(skb)->saddr, hinfo->cfg.srcmask); if (!(hinfo->cfg.mode & (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; nexthdr = ip_hdr(skb)->protocol; break; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) case NFPROTO_IPV6: { __be16 frag_off; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) { memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr, sizeof(dst->ip6.dst)); hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask); } if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) { memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr, sizeof(dst->ip6.src)); hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask); } if (!(hinfo->cfg.mode & (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if ((int)protoff < 0) return -1; break; } #endif default: BUG(); return 0; } poff = proto_ports_offset(nexthdr); if (poff >= 0) { ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports), &_ports); } else { _ports[0] = _ports[1] = 0; ports = _ports; } if (!ports) return -1; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT) dst->src_port = ports[0]; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT) dst->dst_port = ports[1]; return 0; } static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh) { u64 tmp = xt_hashlimit_len_to_chunks(len); tmp = tmp * dh->rateinfo.cost; if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ)) tmp = CREDITS_PER_JIFFY_BYTES * HZ; if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) { dh->rateinfo.credit_cap--; dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; } return (u32) tmp; } static bool hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; struct xt_hashlimit_htable *hinfo = info->hinfo; unsigned long now = jiffies; struct dsthash_ent *dh; struct dsthash_dst dst; bool race = false; u32 cost; if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) goto hotdrop; rcu_read_lock_bh(); dh = dsthash_find(hinfo, &dst); if (dh == NULL) { dh = dsthash_alloc_init(hinfo, &dst, &race); if (dh == NULL) { rcu_read_unlock_bh(); goto hotdrop; } else if (race) { /* Already got an entry, update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); rateinfo_recalc(dh, now, hinfo->cfg.mode); } else { dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); rateinfo_init(dh, hinfo); } } else { /* update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); rateinfo_recalc(dh, now, hinfo->cfg.mode); } if (info->cfg.mode & XT_HASHLIMIT_BYTES) cost = hashlimit_byte_cost(skb->len, dh); else cost = dh->rateinfo.cost; if (dh->rateinfo.credit >= cost) { /* below the limit */ dh->rateinfo.credit -= cost; spin_unlock(&dh->lock); rcu_read_unlock_bh(); return !(info->cfg.mode & XT_HASHLIMIT_INVERT); } spin_unlock(&dh->lock); rcu_read_unlock_bh(); /* default match is underlimit - so over the limit, we need to invert */ return info->cfg.mode & XT_HASHLIMIT_INVERT; hotdrop: par->hotdrop = true; return false; } static int hashlimit_mt_check(const struct xt_mtchk_param *par) { struct net *net = par->net; struct xt_hashlimit_mtinfo1 *info = par->matchinfo; int ret; if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) return -EINVAL; if (info->name[sizeof(info->name)-1] != '\0') return -EINVAL; if (par->family == NFPROTO_IPV4) { if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) return -EINVAL; } else { if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128) return -EINVAL; } if (info->cfg.mode & ~XT_HASHLIMIT_ALL) { pr_info("Unknown mode mask %X, kernel too old?\n", info->cfg.mode); return -EINVAL; } /* Check for overflow. */ if (info->cfg.mode & XT_HASHLIMIT_BYTES) { if (user2credits_byte(info->cfg.avg) == 0) { pr_info("overflow, rate too high: %u\n", info->cfg.avg); return -EINVAL; } } else if (info->cfg.burst == 0 || user2credits(info->cfg.avg * info->cfg.burst) < user2credits(info->cfg.avg)) { pr_info("overflow, try lower: %u/%u\n", info->cfg.avg, info->cfg.burst); return -ERANGE; } mutex_lock(&hashlimit_mutex); info->hinfo = htable_find_get(net, info->name, par->family); if (info->hinfo == NULL) { ret = htable_create(net, info, par->family); if (ret < 0) { mutex_unlock(&hashlimit_mutex); return ret; } } mutex_unlock(&hashlimit_mutex); return 0; } static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; htable_put(info->hinfo); } static struct xt_match hashlimit_mt_reg[] __read_mostly = { { .name = "hashlimit", .revision = 1, .family = NFPROTO_IPV4, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "hashlimit", .revision = 1, .family = NFPROTO_IPV6, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, }, #endif }; /* PROC stuff */ static void *dl_seq_start(struct seq_file *s, loff_t *pos) __acquires(htable->lock) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket; spin_lock_bh(&htable->lock); if (*pos >= htable->cfg.size) return NULL; bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); if (!bucket) return ERR_PTR(-ENOMEM); *bucket = *pos; return bucket; } static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; *pos = ++(*bucket); if (*pos >= htable->cfg.size) { kfree(v); return NULL; } return bucket; } static void dl_seq_stop(struct seq_file *s, void *v) __releases(htable->lock) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; if (!IS_ERR(bucket)) kfree(bucket); spin_unlock_bh(&htable->lock); } static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { int res; const struct xt_hashlimit_htable *ht = s->private; spin_lock(&ent->lock); /* recalculate to show accurate numbers */ rateinfo_recalc(ent, jiffies, ht->cfg.mode); switch (family) { case NFPROTO_IPV4: res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, &ent->dst.ip.src, ntohs(ent->dst.src_port), &ent->dst.ip.dst, ntohs(ent->dst.dst_port), ent->rateinfo.credit, ent->rateinfo.credit_cap, ent->rateinfo.cost); break; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) case NFPROTO_IPV6: res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, &ent->dst.ip6.src, ntohs(ent->dst.src_port), &ent->dst.ip6.dst, ntohs(ent->dst.dst_port), ent->rateinfo.credit, ent->rateinfo.credit_cap, ent->rateinfo.cost); break; #endif default: BUG(); res = 0; } spin_unlock(&ent->lock); return res; } static int dl_seq_show(struct seq_file *s, void *v) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; struct dsthash_ent *ent; if (!hlist_empty(&htable->hash[*bucket])) { hlist_for_each_entry(ent, &htable->hash[*bucket], node) if (dl_seq_real_show(ent, htable->family, s)) return -1; } return 0; } static const struct seq_operations dl_seq_ops = { .start = dl_seq_start, .next = dl_seq_next, .stop = dl_seq_stop, .show = dl_seq_show }; static int dl_proc_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &dl_seq_ops); if (!ret) { struct seq_file *sf = file->private_data; sf->private = PDE_DATA(inode); } return ret; } static const struct file_operations dl_file_ops = { .owner = THIS_MODULE, .open = dl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; static int __net_init hashlimit_proc_net_init(struct net *net) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); if (!hashlimit_net->ipt_hashlimit) return -ENOMEM; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); if (!hashlimit_net->ip6t_hashlimit) { remove_proc_entry("ipt_hashlimit", net->proc_net); return -ENOMEM; } #endif return 0; } static void __net_exit hashlimit_proc_net_exit(struct net *net) { struct xt_hashlimit_htable *hinfo; struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); /* hashlimit_net_exit() is called before hashlimit_mt_destroy(). * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc * entries is empty before trying to remove it. */ mutex_lock(&hashlimit_mutex); hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) htable_remove_proc_entry(hinfo); hashlimit_net->ipt_hashlimit = NULL; hashlimit_net->ip6t_hashlimit = NULL; mutex_unlock(&hashlimit_mutex); remove_proc_entry("ipt_hashlimit", net->proc_net); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) remove_proc_entry("ip6t_hashlimit", net->proc_net); #endif } static int __net_init hashlimit_net_init(struct net *net) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); INIT_HLIST_HEAD(&hashlimit_net->htables); return hashlimit_proc_net_init(net); } static void __net_exit hashlimit_net_exit(struct net *net) { hashlimit_proc_net_exit(net); } static struct pernet_operations hashlimit_net_ops = { .init = hashlimit_net_init, .exit = hashlimit_net_exit, .id = &hashlimit_net_id, .size = sizeof(struct hashlimit_net), }; static int __init hashlimit_mt_init(void) { int err; err = register_pernet_subsys(&hashlimit_net_ops); if (err < 0) return err; err = xt_register_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); if (err < 0) goto err1; err = -ENOMEM; hashlimit_cachep = kmem_cache_create("xt_hashlimit", sizeof(struct dsthash_ent), 0, 0, NULL); if (!hashlimit_cachep) { pr_warning("unable to create slab cache\n"); goto err2; } return 0; err2: xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); err1: unregister_pernet_subsys(&hashlimit_net_ops); return err; } static void __exit hashlimit_mt_exit(void) { xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); unregister_pernet_subsys(&hashlimit_net_ops); rcu_barrier_bh(); kmem_cache_destroy(hashlimit_cachep); } module_init(hashlimit_mt_init); module_exit(hashlimit_mt_exit);
gpl-2.0
y10g/elini_kernel
drivers/ide/ide-disk.c
530
19819
/* * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) * Copyright (C) 1998-2002 Linux ATA Development * Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz */ /* * Mostly written by Mark Lord <mlord@pobox.com> * and Gadi Oxman <gadio@netvision.net.il> * and Andre Hedrick <andre@linux-ide.org> * * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c. */ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/leds.h> #include <linux/ide.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/div64.h> #include "ide-disk.h" static const u8 ide_rw_cmds[] = { ATA_CMD_READ_MULTI, ATA_CMD_WRITE_MULTI, ATA_CMD_READ_MULTI_EXT, ATA_CMD_WRITE_MULTI_EXT, ATA_CMD_PIO_READ, ATA_CMD_PIO_WRITE, ATA_CMD_PIO_READ_EXT, ATA_CMD_PIO_WRITE_EXT, ATA_CMD_READ, ATA_CMD_WRITE, ATA_CMD_READ_EXT, ATA_CMD_WRITE_EXT, }; static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma) { u8 index, lba48, write; lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0; write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; if (dma) { cmd->protocol = ATA_PROT_DMA; index = 8; } else { cmd->protocol = ATA_PROT_PIO; if (drive->mult_count) { cmd->tf_flags |= IDE_TFLAG_MULTI_PIO; index = 0; } else index = 4; } cmd->tf.command = ide_rw_cmds[index + lba48 + write]; } /* * __ide_do_rw_disk() issues READ and WRITE commands to a disk, * using LBA if supported, or CHS otherwise, to address sectors. */ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = drive->hwif; u16 nsectors = (u16)blk_rq_sectors(rq); u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); struct ide_cmd cmd; struct ide_taskfile *tf = &cmd.tf; ide_startstop_t rc; if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { if (block + blk_rq_sectors(rq) > 1ULL << 28) dma = 0; else lba48 = 0; } memset(&cmd, 0, sizeof(cmd)); cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; if (drive->dev_flags & IDE_DFLAG_LBA) { if (lba48) { pr_debug("%s: LBA=0x%012llx\n", drive->name, (unsigned long long)block); tf->nsect = nsectors & 0xff; tf->lbal = (u8) block; tf->lbam = (u8)(block >> 8); tf->lbah = (u8)(block >> 16); tf->device = ATA_LBA; tf = &cmd.hob; tf->nsect = (nsectors >> 8) & 0xff; tf->lbal = (u8)(block >> 24); if (sizeof(block) != 4) { tf->lbam = (u8)((u64)block >> 32); tf->lbah = (u8)((u64)block >> 40); } cmd.valid.out.hob = IDE_VALID_OUT_HOB; cmd.valid.in.hob = IDE_VALID_IN_HOB; cmd.tf_flags |= IDE_TFLAG_LBA48; } else { tf->nsect = nsectors & 0xff; tf->lbal = block; tf->lbam = block >>= 8; tf->lbah = block >>= 8; tf->device = ((block >> 8) & 0xf) | ATA_LBA; } } else { unsigned int sect, head, cyl, track; track = (int)block / drive->sect; sect = (int)block % drive->sect + 1; head = track % drive->head; cyl = track / drive->head; pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect); tf->nsect = nsectors & 0xff; tf->lbal = sect; tf->lbam = cyl; tf->lbah = cyl >> 8; tf->device = head; } cmd.tf_flags |= IDE_TFLAG_FS; if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; ide_tf_set_cmd(drive, &cmd, dma); cmd.rq = rq; if (dma == 0) { ide_init_sg_cmd(&cmd, nsectors << 9); ide_map_sg(drive, &cmd); } rc = do_rw_taskfile(drive, &cmd); if (rc == ide_stopped && dma) { /* fallback to PIO */ cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK; ide_tf_set_cmd(drive, &cmd, 0); ide_init_sg_cmd(&cmd, nsectors << 9); rc = do_rw_taskfile(drive, &cmd); } return rc; } /* * 268435455 == 137439 MB or 28bit limit * 320173056 == 163929 MB or 48bit addressing * 1073741822 == 549756 MB or 48bit addressing fake drive */ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = drive->hwif; BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); BUG_ON(!blk_fs_request(rq)); ledtrig_ide_activity(); pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", (unsigned long long)block, blk_rq_sectors(rq), (unsigned long)rq->buffer); if (hwif->rw_disk) hwif->rw_disk(drive, rq); return __ide_do_rw_disk(drive, rq, block); } /* * Queries for true maximum capacity of the drive. * Returns maximum LBA address (> 0) of the drive, 0 if failed. */ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) { struct ide_cmd cmd; struct ide_taskfile *tf = &cmd.tf; u64 addr = 0; memset(&cmd, 0, sizeof(cmd)); if (lba48) tf->command = ATA_CMD_READ_NATIVE_MAX_EXT; else tf->command = ATA_CMD_READ_NATIVE_MAX; tf->device = ATA_LBA; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; if (lba48) { cmd.valid.out.hob = IDE_VALID_OUT_HOB; cmd.valid.in.hob = IDE_VALID_IN_HOB; cmd.tf_flags = IDE_TFLAG_LBA48; } ide_no_data_taskfile(drive, &cmd); /* if OK, compute maximum address value */ if (!(tf->status & ATA_ERR)) addr = ide_get_lba_addr(&cmd, lba48) + 1; return addr; } /* * Sets maximum virtual LBA address of the drive. * Returns new maximum virtual LBA address (> 0) or 0 on failure. */ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) { struct ide_cmd cmd; struct ide_taskfile *tf = &cmd.tf; u64 addr_set = 0; addr_req--; memset(&cmd, 0, sizeof(cmd)); tf->lbal = (addr_req >> 0) & 0xff; tf->lbam = (addr_req >>= 8) & 0xff; tf->lbah = (addr_req >>= 8) & 0xff; if (lba48) { cmd.hob.lbal = (addr_req >>= 8) & 0xff; cmd.hob.lbam = (addr_req >>= 8) & 0xff; cmd.hob.lbah = (addr_req >>= 8) & 0xff; tf->command = ATA_CMD_SET_MAX_EXT; } else { tf->device = (addr_req >>= 8) & 0x0f; tf->command = ATA_CMD_SET_MAX; } tf->device |= ATA_LBA; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; if (lba48) { cmd.valid.out.hob = IDE_VALID_OUT_HOB; cmd.valid.in.hob = IDE_VALID_IN_HOB; cmd.tf_flags = IDE_TFLAG_LBA48; } ide_no_data_taskfile(drive, &cmd); /* if OK, compute maximum address value */ if (!(tf->status & ATA_ERR)) addr_set = ide_get_lba_addr(&cmd, lba48) + 1; return addr_set; } static unsigned long long sectors_to_MB(unsigned long long n) { n <<= 9; /* make it bytes */ do_div(n, 1000000); /* make it MB */ return n; } /* * Some disks report total number of sectors instead of * maximum sector address. We list them here. */ static const struct drive_list_entry hpa_list[] = { { "ST340823A", NULL }, { "ST320413A", NULL }, { "ST310211A", NULL }, { NULL, NULL } }; static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48) { u64 capacity, set_max; capacity = drive->capacity64; set_max = idedisk_read_native_max_address(drive, lba48); if (ide_in_drive_list(drive->id, hpa_list)) { /* * Since we are inclusive wrt to firmware revisions do this * extra check and apply the workaround only when needed. */ if (set_max == capacity + 1) set_max--; } return set_max; } static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48) { set_max = idedisk_set_max_address(drive, set_max, lba48); if (set_max) drive->capacity64 = set_max; return set_max; } static void idedisk_check_hpa(ide_drive_t *drive) { u64 capacity, set_max; int lba48 = ata_id_lba48_enabled(drive->id); capacity = drive->capacity64; set_max = ide_disk_hpa_get_native_capacity(drive, lba48); if (set_max <= capacity) return; drive->probed_capacity = set_max; printk(KERN_INFO "%s: Host Protected Area detected.\n" "\tcurrent capacity is %llu sectors (%llu MB)\n" "\tnative capacity is %llu sectors (%llu MB)\n", drive->name, capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0) return; set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48); if (set_max) printk(KERN_INFO "%s: Host Protected Area disabled.\n", drive->name); } static int ide_disk_get_capacity(ide_drive_t *drive) { u16 *id = drive->id; int lba; if (ata_id_lba48_enabled(id)) { /* drive speaks 48-bit LBA */ lba = 1; drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) { /* drive speaks 28-bit LBA */ lba = 1; drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY); } else { /* drive speaks boring old 28-bit CHS */ lba = 0; drive->capacity64 = drive->cyl * drive->head * drive->sect; } drive->probed_capacity = drive->capacity64; if (lba) { drive->dev_flags |= IDE_DFLAG_LBA; /* * If this device supports the Host Protected Area feature set, * then we may need to change our opinion about its capacity. */ if (ata_id_hpa_enabled(id)) idedisk_check_hpa(drive); } /* limit drive capacity to 137GB if LBA48 cannot be used */ if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 && drive->capacity64 > 1ULL << 28) { printk(KERN_WARNING "%s: cannot use LBA48 - full capacity " "%llu sectors (%llu MB)\n", drive->name, (unsigned long long)drive->capacity64, sectors_to_MB(drive->capacity64)); drive->probed_capacity = drive->capacity64 = 1ULL << 28; } if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && (drive->dev_flags & IDE_DFLAG_LBA48)) { if (drive->capacity64 > 1ULL << 28) { printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode" " will be used for accessing sectors " "> %u\n", drive->name, 1 << 28); } else drive->dev_flags &= ~IDE_DFLAG_LBA48; } return 0; } static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity) { u64 set = min(capacity, drive->probed_capacity); u16 *id = drive->id; int lba48 = ata_id_lba48_enabled(id); if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 || ata_id_hpa_enabled(id) == 0) goto out; /* * according to the spec the SET MAX ADDRESS command shall be * immediately preceded by a READ NATIVE MAX ADDRESS command */ capacity = ide_disk_hpa_get_native_capacity(drive, lba48); if (capacity == 0) goto out; set = ide_disk_hpa_set_capacity(drive, set, lba48); if (set) { /* needed for ->resume to disable HPA */ drive->dev_flags |= IDE_DFLAG_NOHPA; return set; } out: return drive->capacity64; } static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) { ide_drive_t *drive = q->queuedata; struct ide_cmd *cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); /* FIXME: map struct ide_taskfile on rq->cmd[] */ BUG_ON(cmd == NULL); memset(cmd, 0, sizeof(*cmd)); if (ata_id_flush_ext_enabled(drive->id) && (drive->capacity64 >= (1UL << 28))) cmd->tf.command = ATA_CMD_FLUSH_EXT; else cmd->tf.command = ATA_CMD_FLUSH; cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd->tf_flags = IDE_TFLAG_DYN; cmd->protocol = ATA_PROT_NODATA; rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->special = cmd; cmd->rq = rq; } ide_devset_get(multcount, mult_count); /* * This is tightly woven into the driver->do_special can not touch. * DON'T do it again until a total personality rewrite is committed. */ static int set_multcount(ide_drive_t *drive, int arg) { struct request *rq; int error; if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff)) return -EINVAL; if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) return -EBUSY; rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_ATA_TASKFILE; drive->mult_req = arg; drive->special_flags |= IDE_SFLAG_SET_MULTMODE; error = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); return (drive->mult_count == arg) ? 0 : -EIO; } ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR); static int set_nowerr(ide_drive_t *drive, int arg) { if (arg < 0 || arg > 1) return -EINVAL; if (arg) drive->dev_flags |= IDE_DFLAG_NOWERR; else drive->dev_flags &= ~IDE_DFLAG_NOWERR; drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT; return 0; } static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect) { struct ide_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.tf.feature = feature; cmd.tf.nsect = nsect; cmd.tf.command = ATA_CMD_SET_FEATURES; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; return ide_no_data_taskfile(drive, &cmd); } static void update_ordered(ide_drive_t *drive) { u16 *id = drive->id; unsigned ordered = QUEUE_ORDERED_NONE; prepare_flush_fn *prep_fn = NULL; if (drive->dev_flags & IDE_DFLAG_WCACHE) { unsigned long long capacity; int barrier; /* * We must avoid issuing commands a drive does not * understand or we may crash it. We check flush cache * is supported. We also check we have the LBA48 flush * cache if the drive capacity is too large. By this * time we have trimmed the drive capacity if LBA48 is * not available so we don't need to recheck that. */ capacity = ide_gd_capacity(drive); barrier = ata_id_flush_enabled(id) && (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 && ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 || capacity <= (1ULL << 28) || ata_id_flush_ext_enabled(id)); printk(KERN_INFO "%s: cache flushes %ssupported\n", drive->name, barrier ? "" : "not "); if (barrier) { ordered = QUEUE_ORDERED_DRAIN_FLUSH; prep_fn = idedisk_prepare_flush; } } else ordered = QUEUE_ORDERED_DRAIN; blk_queue_ordered(drive->queue, ordered, prep_fn); } ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE); static int set_wcache(ide_drive_t *drive, int arg) { int err = 1; if (arg < 0 || arg > 1) return -EINVAL; if (ata_id_flush_enabled(drive->id)) { err = ide_do_setfeature(drive, arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0); if (err == 0) { if (arg) drive->dev_flags |= IDE_DFLAG_WCACHE; else drive->dev_flags &= ~IDE_DFLAG_WCACHE; } } update_ordered(drive); return err; } static int do_idedisk_flushcache(ide_drive_t *drive) { struct ide_cmd cmd; memset(&cmd, 0, sizeof(cmd)); if (ata_id_flush_ext_enabled(drive->id)) cmd.tf.command = ATA_CMD_FLUSH_EXT; else cmd.tf.command = ATA_CMD_FLUSH; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; return ide_no_data_taskfile(drive, &cmd); } ide_devset_get(acoustic, acoustic); static int set_acoustic(ide_drive_t *drive, int arg) { if (arg < 0 || arg > 254) return -EINVAL; ide_do_setfeature(drive, arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg); drive->acoustic = arg; return 0; } ide_devset_get_flag(addressing, IDE_DFLAG_LBA48); /* * drive->addressing: * 0: 28-bit * 1: 48-bit * 2: 48-bit capable doing 28-bit */ static int set_addressing(ide_drive_t *drive, int arg) { if (arg < 0 || arg > 2) return -EINVAL; if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) || ata_id_lba48_enabled(drive->id) == 0)) return -EIO; if (arg == 2) arg = 0; if (arg) drive->dev_flags |= IDE_DFLAG_LBA48; else drive->dev_flags &= ~IDE_DFLAG_LBA48; return 0; } ide_ext_devset_rw(acoustic, acoustic); ide_ext_devset_rw(address, addressing); ide_ext_devset_rw(multcount, multcount); ide_ext_devset_rw(wcache, wcache); ide_ext_devset_rw_sync(nowerr, nowerr); static int ide_disk_check(ide_drive_t *drive, const char *s) { return 1; } static void ide_disk_setup(ide_drive_t *drive) { struct ide_disk_obj *idkp = drive->driver_data; struct request_queue *q = drive->queue; ide_hwif_t *hwif = drive->hwif; u16 *id = drive->id; char *m = (char *)&id[ATA_ID_PROD]; unsigned long long capacity; ide_proc_register_driver(drive, idkp->driver); if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) return; if (drive->dev_flags & IDE_DFLAG_REMOVABLE) { /* * Removable disks (eg. SYQUEST); ignore 'WD' drives */ if (m[0] != 'W' || m[1] != 'D') drive->dev_flags |= IDE_DFLAG_DOORLOCKING; } (void)set_addressing(drive, 1); if (drive->dev_flags & IDE_DFLAG_LBA48) { int max_s = 2048; if (max_s > hwif->rqsize) max_s = hwif->rqsize; blk_queue_max_sectors(q, max_s); } printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, queue_max_sectors(q) / 2); if (ata_id_is_ssd(id)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); /* calculate drive capacity, and select LBA if possible */ ide_disk_get_capacity(drive); /* * if possible, give fdisk access to more of the drive, * by correcting bios_cyls: */ capacity = ide_gd_capacity(drive); if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) { if (ata_id_lba48_enabled(drive->id)) { /* compatibility */ drive->bios_sect = 63; drive->bios_head = 255; } if (drive->bios_sect && drive->bios_head) { unsigned int cap0 = capacity; /* truncate to 32 bits */ unsigned int cylsz, cyl; if (cap0 != capacity) drive->bios_cyl = 65535; else { cylsz = drive->bios_sect * drive->bios_head; cyl = cap0 / cylsz; if (cyl > 65535) cyl = 65535; if (cyl > drive->bios_cyl) drive->bios_cyl = cyl; } } } printk(KERN_INFO "%s: %llu sectors (%llu MB)", drive->name, capacity, sectors_to_MB(capacity)); /* Only print cache size when it was specified */ if (id[ATA_ID_BUF_SIZE]) printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2); printk(KERN_CONT ", CHS=%d/%d/%d\n", drive->bios_cyl, drive->bios_head, drive->bios_sect); /* write cache enabled? */ if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id)) drive->dev_flags |= IDE_DFLAG_WCACHE; set_wcache(drive, 1); if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 && (drive->head == 0 || drive->head > 16)) { printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n", drive->name, drive->head); drive->dev_flags &= ~IDE_DFLAG_ATTACH; } else drive->dev_flags |= IDE_DFLAG_ATTACH; } static void ide_disk_flush(ide_drive_t *drive) { if (ata_id_flush_enabled(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) return; if (do_idedisk_flushcache(drive)) printk(KERN_INFO "%s: wcache flush failed!\n", drive->name); } static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk) { return 0; } static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk, int on) { struct ide_cmd cmd; int ret; if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) return 0; memset(&cmd, 0, sizeof(cmd)); cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; ret = ide_no_data_taskfile(drive, &cmd); if (ret) drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; return ret; } const struct ide_disk_ops ide_ata_disk_ops = { .check = ide_disk_check, .set_capacity = ide_disk_set_capacity, .get_capacity = ide_disk_get_capacity, .setup = ide_disk_setup, .flush = ide_disk_flush, .init_media = ide_disk_init_media, .set_doorlock = ide_disk_set_doorlock, .do_request = ide_do_rw_disk, .ioctl = ide_disk_ioctl, };
gpl-2.0
jamesjjliao/linux
drivers/staging/lustre/lustre/llite/vvp_io.c
530
34465
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * Implementation of cl_io for VVP layer. * * Author: Nikita Danilov <nikita.danilov@sun.com> * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com> */ #define DEBUG_SUBSYSTEM S_LLITE #include "../include/obd.h" #include "../include/lustre_lite.h" #include "vvp_internal.h" static struct vvp_io *cl2vvp_io(const struct lu_env *env, const struct cl_io_slice *slice); /** * True, if \a io is a normal io, False for splice_{read,write} */ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) { struct vvp_io *vio = vvp_env_io(env); LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); return vio->cui_io_subtype == IO_NORMAL; } /** * For swapping layout. The file's layout may have changed. * To avoid populating pages to a wrong stripe, we have to verify the * correctness of layout. It works because swapping layout processes * have to acquire group lock. */ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_io *cio = ccc_env_io(env); bool rc = true; switch (io->ci_type) { case CIT_READ: case CIT_WRITE: /* don't need lock here to check lli_layout_gen as we have held * extent lock and GROUP lock has to hold to swap layout */ if (ll_layout_version_get(lli) != cio->cui_layout_gen) { io->ci_need_restart = 1; /* this will return application a short read/write */ io->ci_continue = 0; rc = false; } case CIT_FAULT: /* fault is okay because we've already had a page. */ default: break; } return rc; } /***************************************************************************** * * io operations. * */ static int vvp_io_fault_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = ccc_object_inode(ios->cis_obj); LASSERT(inode == file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file)); vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); return 0; } static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct ccc_io *cio = cl2ccc_io(env, ios); CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, DFID " ignore/verify layout %d/%d, layout version %d restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen, io->ci_restore_needed); if (io->ci_restore_needed == 1) { int rc; /* file was detected release, we need to restore it * before finishing the io */ rc = ll_layout_restore(ccc_object_inode(obj)); /* if restore registration failed, no restart, * we will return -ENODATA */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) * we have to explicitly fetch it, all this will be done * by ll_layout_refresh() */ if (rc == 0) { io->ci_restore_needed = 0; io->ci_need_restart = 1; io->ci_verify_layout = 1; } else { io->ci_restore_needed = 1; io->ci_need_restart = 0; io->ci_verify_layout = 0; io->ci_result = rc; } } if (!io->ci_ignore_layout && io->ci_verify_layout) { __u32 gen = 0; /* check layout version */ ll_layout_refresh(ccc_object_inode(obj), &gen); io->ci_need_restart = cio->cui_layout_gen != gen; if (io->ci_need_restart) { CDEBUG(D_VFSTRACE, DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); /* today successful restore is the only possible * case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; } } } static void vvp_io_fault_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_page *page = io->u.ci_fault.ft_page; CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); if (page != NULL) { lu_ref_del(&page->cp_reference, "fault", io); cl_page_put(env, page); io->u.ci_fault.ft_page = NULL; } vvp_io_fini(env, ios); } static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) { /* * we only want to hold PW locks if the mmap() can generate * writes back to the file and that only happens in shared * writable vmas */ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) return CLM_WRITE; return CLM_READ; } static int vvp_mmap_locks(const struct lu_env *env, struct ccc_io *vio, struct cl_io *io) { struct ccc_thread_info *cti = ccc_env_info(env); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct cl_lock_descr *descr = &cti->cti_descr; ldlm_policy_data_t policy; unsigned long addr; ssize_t count; int result; struct iov_iter i; struct iovec iov; LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); if (!cl_is_normalio(env, io)) return 0; if (vio->cui_iter == NULL) /* nfs or loop back device write */ return 0; /* No MM (e.g. NFS)? No vmas too. */ if (mm == NULL) return 0; iov_for_each(iov, i, *(vio->cui_iter)) { addr = (unsigned long)iov.iov_base; count = iov.iov_len; if (count == 0) continue; count += addr & (~CFS_PAGE_MASK); addr &= CFS_PAGE_MASK; down_read(&mm->mmap_sem); while ((vma = our_vma(mm, addr, count)) != NULL) { struct inode *inode = file_inode(vma->vm_file); int flags = CEF_MUST; if (ll_file_nolock(vma->vm_file)) { /* * For no lock case, a lockless lock will be * generated. */ flags = CEF_NEVER; } /* * XXX: Required lock mode can be weakened: CIT_WRITE * io only ever reads user level buffer, and CIT_READ * only writes on it. */ policy_from_vma(&policy, vma, addr, count); descr->cld_mode = vvp_mode_from_vma(vma); descr->cld_obj = ll_i2info(inode)->lli_clob; descr->cld_start = cl_index(descr->cld_obj, policy.l_extent.start); descr->cld_end = cl_index(descr->cld_obj, policy.l_extent.end); descr->cld_enq_flags = flags; result = cl_io_lock_alloc_add(env, io, descr); CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", descr->cld_mode, descr->cld_start, descr->cld_end); if (result < 0) { up_read(&mm->mmap_sem); return result; } if (vma->vm_end - addr >= count) break; count -= vma->vm_end - addr; addr = vma->vm_end; } up_read(&mm->mmap_sem); } return 0; } static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, enum cl_lock_mode mode, loff_t start, loff_t end) { struct ccc_io *cio = ccc_env_io(env); int result; int ast_flags = 0; LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); ccc_io_update_iov(env, cio, io); if (io->u.ci_rw.crw_nonblock) ast_flags |= CEF_NONBLOCK; result = vvp_mmap_locks(env, cio, io); if (result == 0) result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); return result; } static int vvp_io_read_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_io_rw_common *rd = &io->u.ci_rd.rd; int result; result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos, rd->crw_pos + rd->crw_count - 1); return result; } static int vvp_io_fault_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct vvp_io *vio = cl2vvp_io(env, ios); /* * XXX LDLM_FL_CBPENDING */ return ccc_io_one_lock_index (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); } static int vvp_io_write_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; loff_t start; loff_t end; if (io->u.ci_wr.wr_append) { start = 0; end = OBD_OBJECT_EOF; } else { start = io->u.ci_wr.wr.crw_pos; end = start + io->u.ci_wr.wr.crw_count - 1; } return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); } static int vvp_io_setattr_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { return 0; } /** * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io. * * Handles "lockless io" mode when extent locking is done by server. */ static int vvp_io_setattr_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = ccc_env_io(env); struct cl_io *io = ios->cis_io; __u64 new_size; __u32 enqflags = 0; if (cl_io_is_trunc(io)) { new_size = io->u.ci_setattr.sa_attr.lvb_size; if (new_size == 0) enqflags = CEF_DISCARD_DATA; } else { if ((io->u.ci_setattr.sa_attr.lvb_mtime >= io->u.ci_setattr.sa_attr.lvb_ctime) || (io->u.ci_setattr.sa_attr.lvb_atime >= io->u.ci_setattr.sa_attr.lvb_ctime)) return 0; new_size = 0; } cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK; return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, new_size, OBD_OBJECT_EOF); } static int vvp_do_vmtruncate(struct inode *inode, size_t size) { int result; /* * Only ll_inode_size_lock is taken at this level. */ ll_inode_size_lock(inode); result = inode_newsize_ok(inode, size); if (result < 0) { ll_inode_size_unlock(inode); return result; } truncate_setsize(inode, size); ll_inode_size_unlock(inode); return result; } static int vvp_io_setattr_trunc(const struct lu_env *env, const struct cl_io_slice *ios, struct inode *inode, loff_t size) { inode_dio_wait(inode); return 0; } static int vvp_io_setattr_time(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct cl_attr *attr = ccc_env_thread_attr(env); int result; unsigned valid = CAT_CTIME; cl_object_attr_lock(obj); attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime; if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) { attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime; valid |= CAT_ATIME; } if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) { attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime; valid |= CAT_MTIME; } result = cl_object_attr_set(env, obj, attr, valid); cl_object_attr_unlock(obj); return result; } static int vvp_io_setattr_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct inode *inode = ccc_object_inode(io->ci_obj); int result = 0; mutex_lock(&inode->i_mutex); if (cl_io_is_trunc(io)) result = vvp_io_setattr_trunc(env, ios, inode, io->u.ci_setattr.sa_attr.lvb_size); if (result == 0) result = vvp_io_setattr_time(env, ios); return result; } static void vvp_io_setattr_end(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct inode *inode = ccc_object_inode(io->ci_obj); if (cl_io_is_trunc(io)) { /* Truncate in memory pages - they must be clean pages * because osc has already notified to destroy osc_extents. */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_dio_write_done(inode); } mutex_unlock(&inode->i_mutex); } static void vvp_io_setattr_fini(const struct lu_env *env, const struct cl_io_slice *ios) { vvp_io_fini(env, ios); } static int vvp_io_read_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct ll_ra_read *bead = &vio->cui_bead; struct file *file = cio->cui_fd->fd_file; int result; loff_t pos = io->u.ci_rd.rd.crw_pos; long cnt = io->u.ci_rd.rd.crw_count; long tot = cio->cui_tot_count; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); if (!can_populate_pages(env, io, inode)) return 0; result = ccc_prep_size(env, obj, io, pos, tot, &exceed); if (result != 0) return result; else if (exceed != 0) goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "Read ino %lu, %lu bytes, offset %lld, size %llu\n", inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; /* initialize read-ahead window once per syscall */ if (!vio->cui_ra_window_set) { vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); /* * XXX: explicit PAGE_CACHE_SIZE */ bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); ll_ra_read_in(file, bead); } /* BUG: 5972 */ file_accessed(file); switch (vio->cui_io_subtype) { case IO_NORMAL: LASSERT(cio->cui_iocb->ki_pos == pos); result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter); break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, vio->u.splice.cui_pipe, cnt, vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe * buffers. */ io->ci_continue = 0; break; default: CERROR("Wrong IO type %u\n", vio->cui_io_subtype); LBUG(); } out: if (result >= 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, READ); result = 0; } return result; } static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct ccc_io *cio = cl2ccc_io(env, ios); if (vio->cui_ra_window_set) ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead); vvp_io_fini(env, ios); } static int vvp_io_write_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); ssize_t result = 0; loff_t pos = io->u.ci_wr.wr.crw_pos; size_t cnt = io->u.ci_wr.wr.crw_count; if (!can_populate_pages(env, io, inode)) return 0; if (cl_io_is_append(io)) { /* * PARALLEL IO This has to be changed for parallel IO doing * out-of-order writes. */ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); cio->cui_iocb->ki_pos = pos; } else { LASSERT(cio->cui_iocb->ki_pos == pos); } CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */ result = 0; else result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter); if (result > 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, WRITE); result = 0; } return result; } static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { struct vm_fault *vmf = cfio->fault.ft_vmf; cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf); cfio->fault.ft_flags_valid = 1; if (vmf->page) { CDEBUG(D_PAGE, "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n", vmf->page, vmf->page->mapping, vmf->page->index, (long)vmf->page->flags, page_count(vmf->page), page_private(vmf->page), vmf->virtual_address); if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) { lock_page(vmf->page); cfio->fault.ft_flags |= VM_FAULT_LOCKED; } cfio->ft_vmpage = vmf->page; return 0; } if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); return -EFAULT; } if (cfio->fault.ft_flags & VM_FAULT_OOM) { CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); return -ENOMEM; } if (cfio->fault.ft_flags & VM_FAULT_RETRY) return -EAGAIN; CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags); return -EINVAL; } static int vvp_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct cl_fault_io *fio = &io->u.ci_fault; struct vvp_fault_io *cfio = &vio->u.fault; loff_t offset; int result = 0; struct page *vmpage = NULL; struct cl_page *page; loff_t size; pgoff_t last; /* last page in a file data region */ if (fio->ft_executable && LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime) CWARN("binary "DFID " changed while waiting for the page fault lock\n", PFID(lu_object_fid(&obj->co_lu))); /* offset of the last byte on the page */ offset = cl_offset(obj, fio->ft_index + 1) - 1; LASSERT(cl_index(obj, offset) == fio->ft_index); result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); if (result != 0) return result; /* must return locked page */ if (fio->ft_mkwrite) { LASSERT(cfio->ft_vmpage != NULL); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); if (result != 0) return result; } vmpage = cfio->ft_vmpage; LASSERT(PageLocked(vmpage)); if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) ll_invalidate_page(vmpage); size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but * it still can be truncated locally. */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch * and retry. */ result = +1; goto out; } if (fio->ft_mkwrite) { pgoff_t last_index; /* * Capture the size while holding the lli_trunc_sem from above * we want to make sure that we complete the mkwrite action * while holding this lock. We need to make sure that we are * not past the end of the file. */ last_index = cl_index(obj, size - 1); if (last_index < fio->ft_index) { CDEBUG(D_PAGE, "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n", vmpage->mapping, fio->ft_index, last_index); /* * We need to return if we are * passed the end of the file. This will propagate * up the call stack to ll_page_mkwrite where * we will return VM_FAULT_NOPAGE. Any non-negative * value returned here will be silently * converted to 0. If the vmpage->mapping is null * the error code would be converted back to ENODATA * in ll_page_mkwrite0. Thus we return -ENODATA * to handle both cases */ result = -ENODATA; goto out; } } page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); if (IS_ERR(page)) { result = PTR_ERR(page); goto out; } /* if page is going to be written, we should add this page into cache * earlier. */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { struct ccc_page *cp; /* vvp_page_assume() calls wait_on_page_writeback(). */ cl_page_assume(env, io, page); cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vvp_write_pending(cl2ccc(obj), cp); /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we * still have chance to detect it. */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); vmpage = NULL; if (result < 0) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); cl_page_disown(env, io, page); cl_page_put(env, page); /* we're in big trouble, what can we do now? */ if (result == -EDQUOT) result = -ENOSPC; goto out; } else cl_page_disown(env, io, page); } } last = cl_index(obj, size - 1); /* * The ft_index is only used in the case of * a mkwrite action. We need to check * our assertions are correct, since * we should have caught this above */ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); if (fio->ft_index == last) /* * Last page is mapped partially. */ fio->ft_nob = size - cl_offset(obj, fio->ft_index); else fio->ft_nob = cl_page_size(obj); lu_ref_add(&page->cp_reference, "fault", io); fio->ft_page = page; out: /* return unlocked vmpage to avoid deadlocking */ if (vmpage != NULL) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; } static int vvp_io_fsync_start(const struct lu_env *env, const struct cl_io_slice *ios) { /* we should mark TOWRITE bit to each dirty page in radix tree to * verify pages have been written, but this is difficult because of * race. */ return 0; } static int vvp_io_read_page(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice) { struct cl_io *io = ios->cis_io; struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *page = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; struct ll_readahead_state *ras = &fd->fd_ras; struct page *vmpage = cp->cpg_page; struct cl_2queue *queue = &io->ci_queue; int rc; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); LASSERT(slice->cpl_obj == obj); if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ras_update(sbi, inode, ras, page->cp_index, cp->cpg_defer_uptodate); /* Sanity check whether the page is protected by a lock. */ rc = cl_page_is_under_lock(env, io, page); if (rc != -EBUSY) { CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n", rc == -ENODATA ? "without a lock" : "match failed", rc); if (rc != -ENODATA) return rc; } if (cp->cpg_defer_uptodate) { cp->cpg_ra_used = 1; cl_page_export(env, page, 1); } /* * Add page into the queue even when it is marked uptodate above. * this will unlock it automatically as part of cl_page_list_disown(). */ cl_2queue_add(queue, page); if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ll_readahead(env, io, ras, vmpage->mapping, &queue->c2_qin, fd->fd_flags); return 0; } static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, struct cl_page *page, struct ccc_page *cp, enum cl_req_type crt) { struct cl_2queue *queue; int result; LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); queue = &io->ci_queue; cl_2queue_init_page(queue, page); result = cl_io_submit_sync(env, io, crt, queue, 0); LASSERT(cl_page_is_owned(page, io)); if (crt == CRT_READ) /* * in CRT_WRITE case page is left locked even in case of * error. */ cl_page_list_disown(env, io, &queue->c2_qin); cl_2queue_fini(env, queue); return result; } /** * Prepare partially written-to page for a write. */ static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, struct cl_object *obj, struct cl_page *pg, struct ccc_page *cp, unsigned from, unsigned to) { struct cl_attr *attr = ccc_env_thread_attr(env); loff_t offset = cl_offset(obj, pg->cp_index); int result; cl_object_attr_lock(obj); result = cl_object_attr_get(env, obj, attr); cl_object_attr_unlock(obj); if (result == 0) { /* * If are writing to a new page, no need to read old data. * The extent locking will have updated the KMS, and for our * purposes here we can treat it like i_size. */ if (attr->cat_kms <= offset) { char *kaddr = kmap_atomic(cp->cpg_page); memset(kaddr, 0, cl_page_size(obj)); kunmap_atomic(kaddr); } else if (cp->cpg_defer_uptodate) cp->cpg_ra_used = 1; else result = vvp_page_sync_io(env, io, pg, cp, CRT_READ); /* * In older implementations, obdo_refresh_inode is called here * to update the inode because the write might modify the * object info at OST. However, this has been proven useless, * since LVB functions will be called when user space program * tries to retrieve inode attribute. Also, see bug 15909 for * details. -jay */ if (result == 0) cl_page_export(env, pg, 1); } return result; } static int vvp_io_prepare_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; struct page *vmpage = cp->cpg_page; int result; LINVRNT(cl_page_is_vmlocked(env, pg)); LASSERT(vmpage->mapping->host == ccc_object_inode(obj)); result = 0; CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to); if (!PageUptodate(vmpage)) { /* * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ if (from == 0 && to == PAGE_CACHE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else result = vvp_io_prepare_partial(env, ios->cis_io, obj, pg, cp, from, to); } else CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n"); return result; } static int vvp_io_commit_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct cl_object *obj = slice->cpl_obj; struct cl_io *io = ios->cis_io; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); struct page *vmpage = cp->cpg_page; int result; int tallyop; loff_t size; LINVRNT(cl_page_is_vmlocked(env, pg)); LASSERT(vmpage->mapping->host == inode); LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n"); CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to); /* * queue a write for some time in the future the first time we * dirty the page. * * This is different from what other file systems do: they usually * just mark page (and some of its buffers) dirty and rely on * balance_dirty_pages() to start a write-back. Lustre wants write-back * to be started earlier for the following reasons: * * (1) with a large number of clients we need to limit the amount * of cached data on the clients a lot; * * (2) large compute jobs generally want compute-only then io-only * and the IO should complete as quickly as possible; * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { tallyop = LPROC_LL_DIRTY_MISSES; result = cl_page_cache_add(env, io, pg, CRT_WRITE); if (result == 0) { /* page was added into cache successfully. */ set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; bool need_clip = true; /* * Client ran out of disk space grant. Possible * strategies are: * * (a) do a sync write, renewing grant; * * (b) stop writing on this stripe, switch to the * next one. * * (b) is a part of "parallel io" design that is the * ultimate goal. (a) is what "old" client did, and * what the new code continues to do for the time * being. */ if (last_index > pg->cp_index) { to = PAGE_CACHE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; if (to < size_to) to = size_to; } if (need_clip) cl_page_clip(env, pg, 0, to); result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); if (result) CERROR("Write page %lu of inode %p failed %d\n", pg->cp_index, inode, result); } } else { tallyop = LPROC_LL_DIRTY_HITS; result = 0; } ll_stats_ops_tally(sbi, tallyop, 1); /* Inode should be marked DIRTY even if no new page was marked DIRTY * because page could have been not flushed between 2 modifications. * It is important the file is marked DIRTY as soon as the I/O is done * Indeed, when cache is flushed, file could be already closed and it * is too late to warn the MDT. * It is acceptable that file is marked DIRTY even if I/O is dropped * for some reasons before being flushed to OST. */ if (result == 0) { spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_DATA_MODIFIED; spin_unlock(&lli->lli_lock); } size = cl_offset(obj, pg->cp_index) + to; ll_inode_size_lock(inode); if (result == 0) { if (size > i_size_read(inode)) { cl_isize_write_nolock(inode, size); CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n", PFID(lu_object_fid(&obj->co_lu)), (unsigned long)size); } cl_page_export(env, pg, 1); } else { if (size > i_size_read(inode)) cl_page_discard(env, io, pg); } ll_inode_size_unlock(inode); return result; } static const struct cl_io_operations vvp_io_ops = { .op = { [CIT_READ] = { .cio_fini = vvp_io_read_fini, .cio_lock = vvp_io_read_lock, .cio_start = vvp_io_read_start, .cio_advance = ccc_io_advance }, [CIT_WRITE] = { .cio_fini = vvp_io_fini, .cio_lock = vvp_io_write_lock, .cio_start = vvp_io_write_start, .cio_advance = ccc_io_advance }, [CIT_SETATTR] = { .cio_fini = vvp_io_setattr_fini, .cio_iter_init = vvp_io_setattr_iter_init, .cio_lock = vvp_io_setattr_lock, .cio_start = vvp_io_setattr_start, .cio_end = vvp_io_setattr_end }, [CIT_FAULT] = { .cio_fini = vvp_io_fault_fini, .cio_iter_init = vvp_io_fault_iter_init, .cio_lock = vvp_io_fault_lock, .cio_start = vvp_io_fault_start, .cio_end = ccc_io_end }, [CIT_FSYNC] = { .cio_start = vvp_io_fsync_start, .cio_fini = vvp_io_fini }, [CIT_MISC] = { .cio_fini = vvp_io_fini } }, .cio_read_page = vvp_io_read_page, .cio_prepare_write = vvp_io_prepare_write, .cio_commit_write = vvp_io_commit_write }; int vvp_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) { struct vvp_io *vio = vvp_env_io(env); struct ccc_io *cio = ccc_env_io(env); struct inode *inode = ccc_object_inode(obj); int result; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, DFID " ignore/verify layout %d/%d, layout version %d restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen, io->ci_restore_needed); CL_IO_SLICE_CLEAN(cio, cui_cl); cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); vio->cui_ra_window_set = 0; result = 0; if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { size_t count; struct ll_inode_info *lli = ll_i2info(inode); count = io->u.ci_rw.crw_count; /* "If nbyte is 0, read() will return 0 and have no other * results." -- Single Unix Spec */ if (count == 0) result = 1; else cio->cui_tot_count = count; /* for read/write, we store the jobid in the inode, and * it'll be fetched by osc when building RPC. * * it's not accurate if the file is shared by different * jobs. */ lustre_get_jobid(lli->lli_jobid); } else if (io->ci_type == CIT_SETATTR) { if (!cl_io_is_trunc(io)) io->ci_lockreq = CILR_MANDATORY; } /* ignore layout change for generic CIT_MISC but not for glimpse. * io context for glimpse must set ci_verify_layout to true, * see cl_glimpse_size0() for details. */ if (io->ci_type == CIT_MISC && !io->ci_verify_layout) io->ci_ignore_layout = 1; /* Enqueue layout lock and get layout version. We need to do this * even for operations requiring to open file, such as read and write, * because it might not grant layout lock in IT_OPEN. */ if (result == 0 && !io->ci_ignore_layout) { result = ll_layout_refresh(inode, &cio->cui_layout_gen); if (result == -ENOENT) /* If the inode on MDS has been removed, but the objects * on OSTs haven't been destroyed (async unlink), layout * fetch will return -ENOENT, we'd ignore this error * and continue with dirty flush. LU-3230. */ result = 0; if (result < 0) CERROR("%s: refresh file layout " DFID " error %d.\n", ll_get_fsname(inode->i_sb, NULL, 0), PFID(lu_object_fid(&obj->co_lu)), result); } return result; } static struct vvp_io *cl2vvp_io(const struct lu_env *env, const struct cl_io_slice *slice) { /* Calling just for assertion */ cl2ccc_io(env, slice); return vvp_env_io(env); }
gpl-2.0
OMAP4-AOSP/android_kernel_omap4_common
security/selinux/ss/conditional.c
530
14837
/* Authors: Karl MacMillan <kmacmillan@tresys.com> * Frank Mayer <mayerf@tresys.com> * * Copyright (C) 2003 - 2004 Tresys Technology, LLC * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/slab.h> #include "security.h" #include "conditional.h" #include "services.h" /* * cond_evaluate_expr evaluates a conditional expr * in reverse polish notation. It returns true (1), false (0), * or undefined (-1). Undefined occurs when the expression * exceeds the stack depth of COND_EXPR_MAXDEPTH. */ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) { struct cond_expr *cur; int s[COND_EXPR_MAXDEPTH]; int sp = -1; for (cur = expr; cur; cur = cur->next) { switch (cur->expr_type) { case COND_BOOL: if (sp == (COND_EXPR_MAXDEPTH - 1)) return -1; sp++; s[sp] = p->bool_val_to_struct[cur->bool - 1]->state; break; case COND_NOT: if (sp < 0) return -1; s[sp] = !s[sp]; break; case COND_OR: if (sp < 1) return -1; sp--; s[sp] |= s[sp + 1]; break; case COND_AND: if (sp < 1) return -1; sp--; s[sp] &= s[sp + 1]; break; case COND_XOR: if (sp < 1) return -1; sp--; s[sp] ^= s[sp + 1]; break; case COND_EQ: if (sp < 1) return -1; sp--; s[sp] = (s[sp] == s[sp + 1]); break; case COND_NEQ: if (sp < 1) return -1; sp--; s[sp] = (s[sp] != s[sp + 1]); break; default: return -1; } } return s[0]; } /* * evaluate_cond_node evaluates the conditional stored in * a struct cond_node and if the result is different than the * current state of the node it sets the rules in the true/false * list appropriately. If the result of the expression is undefined * all of the rules are disabled for safety. */ int evaluate_cond_node(struct policydb *p, struct cond_node *node) { int new_state; struct cond_av_list *cur; new_state = cond_evaluate_expr(p, node->expr); if (new_state != node->cur_state) { node->cur_state = new_state; if (new_state == -1) printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n"); /* turn the rules on or off */ for (cur = node->true_list; cur; cur = cur->next) { if (new_state <= 0) cur->node->key.specified &= ~AVTAB_ENABLED; else cur->node->key.specified |= AVTAB_ENABLED; } for (cur = node->false_list; cur; cur = cur->next) { /* -1 or 1 */ if (new_state) cur->node->key.specified &= ~AVTAB_ENABLED; else cur->node->key.specified |= AVTAB_ENABLED; } } return 0; } int cond_policydb_init(struct policydb *p) { int rc; p->bool_val_to_struct = NULL; p->cond_list = NULL; rc = avtab_init(&p->te_cond_avtab); if (rc) return rc; return 0; } static void cond_av_list_destroy(struct cond_av_list *list) { struct cond_av_list *cur, *next; for (cur = list; cur; cur = next) { next = cur->next; /* the avtab_ptr_t node is destroy by the avtab */ kfree(cur); } } static void cond_node_destroy(struct cond_node *node) { struct cond_expr *cur_expr, *next_expr; for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) { next_expr = cur_expr->next; kfree(cur_expr); } cond_av_list_destroy(node->true_list); cond_av_list_destroy(node->false_list); kfree(node); } static void cond_list_destroy(struct cond_node *list) { struct cond_node *next, *cur; if (list == NULL) return; for (cur = list; cur; cur = next) { next = cur->next; cond_node_destroy(cur); } } void cond_policydb_destroy(struct policydb *p) { kfree(p->bool_val_to_struct); avtab_destroy(&p->te_cond_avtab); cond_list_destroy(p->cond_list); } int cond_init_bool_indexes(struct policydb *p) { kfree(p->bool_val_to_struct); p->bool_val_to_struct = kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL); if (!p->bool_val_to_struct) return -ENOMEM; return 0; } int cond_destroy_bool(void *key, void *datum, void *p) { kfree(key); kfree(datum); return 0; } int cond_index_bool(void *key, void *datum, void *datap) { struct policydb *p; struct cond_bool_datum *booldatum; struct flex_array *fa; booldatum = datum; p = datap; if (!booldatum->value || booldatum->value > p->p_bools.nprim) return -EINVAL; fa = p->sym_val_to_name[SYM_BOOLS]; if (flex_array_put_ptr(fa, booldatum->value - 1, key, GFP_KERNEL | __GFP_ZERO)) BUG(); p->bool_val_to_struct[booldatum->value - 1] = booldatum; return 0; } static int bool_isvalid(struct cond_bool_datum *b) { if (!(b->state == 0 || b->state == 1)) return 0; return 1; } int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp) { char *key = NULL; struct cond_bool_datum *booldatum; __le32 buf[3]; u32 len; int rc; booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL); if (!booldatum) return -ENOMEM; rc = next_entry(buf, fp, sizeof buf); if (rc) goto err; booldatum->value = le32_to_cpu(buf[0]); booldatum->state = le32_to_cpu(buf[1]); rc = -EINVAL; if (!bool_isvalid(booldatum)) goto err; len = le32_to_cpu(buf[2]); rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); if (!key) goto err; rc = next_entry(key, fp, len); if (rc) goto err; key[len] = '\0'; rc = hashtab_insert(h, key, booldatum); if (rc) goto err; return 0; err: cond_destroy_bool(key, booldatum, NULL); return rc; } struct cond_insertf_data { struct policydb *p; struct cond_av_list *other; struct cond_av_list *head; struct cond_av_list *tail; }; static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr) { struct cond_insertf_data *data = ptr; struct policydb *p = data->p; struct cond_av_list *other = data->other, *list, *cur; struct avtab_node *node_ptr; u8 found; int rc = -EINVAL; /* * For type rules we have to make certain there aren't any * conflicting rules by searching the te_avtab and the * cond_te_avtab. */ if (k->specified & AVTAB_TYPE) { if (avtab_search(&p->te_avtab, k)) { printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n"); goto err; } /* * If we are reading the false list other will be a pointer to * the true list. We can have duplicate entries if there is only * 1 other entry and it is in our true list. * * If we are reading the true list (other == NULL) there shouldn't * be any other entries. */ if (other) { node_ptr = avtab_search_node(&p->te_cond_avtab, k); if (node_ptr) { if (avtab_search_node_next(node_ptr, k->specified)) { printk(KERN_ERR "SELinux: too many conflicting type rules.\n"); goto err; } found = 0; for (cur = other; cur; cur = cur->next) { if (cur->node == node_ptr) { found = 1; break; } } if (!found) { printk(KERN_ERR "SELinux: conflicting type rules.\n"); goto err; } } } else { if (avtab_search(&p->te_cond_avtab, k)) { printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n"); goto err; } } } node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d); if (!node_ptr) { printk(KERN_ERR "SELinux: could not insert rule.\n"); rc = -ENOMEM; goto err; } list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL); if (!list) { rc = -ENOMEM; goto err; } list->node = node_ptr; if (!data->head) data->head = list; else data->tail->next = list; data->tail = list; return 0; err: cond_av_list_destroy(data->head); data->head = NULL; return rc; } static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other) { int i, rc; __le32 buf[1]; u32 len; struct cond_insertf_data data; *ret_list = NULL; len = 0; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; len = le32_to_cpu(buf[0]); if (len == 0) return 0; data.p = p; data.other = other; data.head = NULL; data.tail = NULL; for (i = 0; i < len; i++) { rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf, &data); if (rc) return rc; } *ret_list = data.head; return 0; } static int expr_isvalid(struct policydb *p, struct cond_expr *expr) { if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) { printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n"); return 0; } if (expr->bool > p->p_bools.nprim) { printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n"); return 0; } return 1; } static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) { __le32 buf[2]; u32 len, i; int rc; struct cond_expr *expr = NULL, *last = NULL; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; node->cur_state = le32_to_cpu(buf[0]); len = 0; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; /* expr */ len = le32_to_cpu(buf[0]); for (i = 0; i < len; i++) { rc = next_entry(buf, fp, sizeof(u32) * 2); if (rc) goto err; rc = -ENOMEM; expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL); if (!expr) goto err; expr->expr_type = le32_to_cpu(buf[0]); expr->bool = le32_to_cpu(buf[1]); if (!expr_isvalid(p, expr)) { rc = -EINVAL; kfree(expr); goto err; } if (i == 0) node->expr = expr; else last->next = expr; last = expr; } rc = cond_read_av_list(p, fp, &node->true_list, NULL); if (rc) goto err; rc = cond_read_av_list(p, fp, &node->false_list, node->true_list); if (rc) goto err; return 0; err: cond_node_destroy(node); return rc; } int cond_read_list(struct policydb *p, void *fp) { struct cond_node *node, *last = NULL; __le32 buf[1]; u32 i, len; int rc; rc = next_entry(buf, fp, sizeof buf); if (rc) return rc; len = le32_to_cpu(buf[0]); rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel); if (rc) goto err; for (i = 0; i < len; i++) { rc = -ENOMEM; node = kzalloc(sizeof(struct cond_node), GFP_KERNEL); if (!node) goto err; rc = cond_read_node(p, node, fp); if (rc) goto err; if (i == 0) p->cond_list = node; else last->next = node; last = node; } return 0; err: cond_list_destroy(p->cond_list); p->cond_list = NULL; return rc; } int cond_write_bool(void *vkey, void *datum, void *ptr) { char *key = vkey; struct cond_bool_datum *booldatum = datum; struct policy_data *pd = ptr; void *fp = pd->fp; __le32 buf[3]; u32 len; int rc; len = strlen(key); buf[0] = cpu_to_le32(booldatum->value); buf[1] = cpu_to_le32(booldatum->state); buf[2] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; rc = put_entry(key, 1, len, fp); if (rc) return rc; return 0; } /* * cond_write_cond_av_list doesn't write out the av_list nodes. * Instead it writes out the key/value pairs from the avtab. This * is necessary because there is no way to uniquely identifying rules * in the avtab so it is not possible to associate individual rules * in the avtab with a conditional without saving them as part of * the conditional. This means that the avtab with the conditional * rules will not be saved but will be rebuilt on policy load. */ static int cond_write_av_list(struct policydb *p, struct cond_av_list *list, struct policy_file *fp) { __le32 buf[1]; struct cond_av_list *cur_list; u32 len; int rc; len = 0; for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; if (len == 0) return 0; for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) { rc = avtab_write_item(p, cur_list->node, fp); if (rc) return rc; } return 0; } static int cond_write_node(struct policydb *p, struct cond_node *node, struct policy_file *fp) { struct cond_expr *cur_expr; __le32 buf[2]; int rc; u32 len = 0; buf[0] = cpu_to_le32(node->cur_state); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) { buf[0] = cpu_to_le32(cur_expr->expr_type); buf[1] = cpu_to_le32(cur_expr->bool); rc = put_entry(buf, sizeof(u32), 2, fp); if (rc) return rc; } rc = cond_write_av_list(p, node->true_list, fp); if (rc) return rc; rc = cond_write_av_list(p, node->false_list, fp); if (rc) return rc; return 0; } int cond_write_list(struct policydb *p, struct cond_node *list, void *fp) { struct cond_node *cur; u32 len; __le32 buf[1]; int rc; len = 0; for (cur = list; cur != NULL; cur = cur->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur = list; cur != NULL; cur = cur->next) { rc = cond_write_node(p, cur, fp); if (rc) return rc; } return 0; } void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key, struct extended_perms_decision *xpermd) { struct avtab_node *node; if (!ctab || !key || !xpermd) return; for (node = avtab_search_node(ctab, key); node; node = avtab_search_node_next(node, key->specified)) { if (node->key.specified & AVTAB_ENABLED) services_compute_xperms_decision(xpermd, node); } return; } /* Determine whether additional permissions are granted by the conditional * av table, and if so, add them to the result */ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd, struct extended_perms *xperms) { struct avtab_node *node; if (!ctab || !key || !avd) return; for (node = avtab_search_node(ctab, key); node; node = avtab_search_node_next(node, key->specified)) { if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) == (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED))) avd->allowed |= node->datum.u.data; if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED))) /* Since a '0' in an auditdeny mask represents a * permission we do NOT want to audit (dontaudit), we use * the '&' operand to ensure that all '0's in the mask * are retained (much unlike the allow and auditallow cases). */ avd->auditdeny &= node->datum.u.data; if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) avd->auditallow |= node->datum.u.data; if (xperms && (node->key.specified & AVTAB_ENABLED) && (node->key.specified & AVTAB_XPERMS)) services_compute_xperms_drivers(xperms, node); } return; }
gpl-2.0
sysexits/cs530
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
786
1923
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #include "ram.h" #include "regsnv04.h" bool nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags) { if (!(tile_flags & 0xff00)) return true; return false; } static void nv04_fb_init(struct nvkm_fb *fb) { struct nvkm_device *device = fb->subdev.device; /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows * nvidia reading PFB_CFG_0, then writing back its original value. * (which was 0x701114 in this case) */ nvkm_wr32(device, NV04_PFB_CFG0, 0x1114); } static const struct nvkm_fb_func nv04_fb = { .init = nv04_fb_init, .ram_new = nv04_ram_new, .memtype_valid = nv04_fb_memtype_valid, }; int nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { return nvkm_fb_new_(&nv04_fb, device, index, pfb); }
gpl-2.0
MAKO-MM/android_kernel_lge_mako
drivers/staging/prima/CORE/WDI/WPAL/src/wlan_qct_pal_trace.c
1298
12366
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= \file wlan_qct_pal_trace.c \brief Implementation trace/logging APIs PAL exports. wpt = (Wlan Pal Type) wpal = (Wlan PAL) Definitions for Linux/Android platform Copyright 2010-2011 (c) Qualcomm, Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary. ========================================================================*/ #include "wlan_qct_pal_trace.h" #include "i_vos_types.h" #ifdef WLAN_DEBUG /*-------------------------------------------------------------------------- Preprocessor definitions and constants ------------------------------------------------------------------------*/ #define WPAL_TRACE_BUFFER_SIZE ( 512 ) // macro to map wpal trace levels into the bitmask #define WPAL_TRACE_LEVEL_TO_MODULE_BITMASK( _level ) ( ( 1 << (_level) ) ) typedef struct { // Trace level for a module, as a bitmask. The bits in this mask // are ordered by wpt_tracelevel. For example, each bit represents // one of the bits in wpt_tracelevel that may be turned on to have // traces at that level logged, i.e. if eWLAN_PAL_TRACE_LEVEL_ERROR is // == 2, then if bit 2 (low order) is turned ON, then ERROR traces // will be printed to the trace log. // // Note that all bits turned OFF means no traces. wpt_uint16 moduleTraceLevel; // 3 character string name for the module wpt_uint8 moduleNameStr[ 4 ]; // 3 chars plus the NULL } moduleTraceInfo; // Array of static data that contains all of the per module trace // information. This includes the trace level for the module and // the 3 character 'name' of the module for marking the trace logs. moduleTraceInfo gTraceInfo[ eWLAN_MODULE_COUNT ] = { { (1<<eWLAN_PAL_TRACE_LEVEL_FATAL)|(1<<eWLAN_PAL_TRACE_LEVEL_ERROR), "DAL" }, { (1<<eWLAN_PAL_TRACE_LEVEL_FATAL)|(1<<eWLAN_PAL_TRACE_LEVEL_ERROR), "CTL" }, { (1<<eWLAN_PAL_TRACE_LEVEL_FATAL)|(1<<eWLAN_PAL_TRACE_LEVEL_ERROR), "DAT" }, { (1<<eWLAN_PAL_TRACE_LEVEL_FATAL)|(1<<eWLAN_PAL_TRACE_LEVEL_ERROR), "PAL" }, }; // the trace level strings in an array. these are ordered in the same order // as the trace levels are defined in the enum (see wpt_tracelevel) so we // can index into this array with the level and get the right string. The // trace levels are... // none, Fatal, Error, Warning, Info, InfoHigh, InfoMed, InfoLow static const char * TRACE_LEVEL_STR[] = { " ", "F ", "E ", "W ", "I ", "IH", "IM", "IL" }; /*------------------------------------------------------------------------- Functions ------------------------------------------------------------------------*/ static void wpalOutput(wpt_tracelevel level, char *strBuffer) { switch(level) { default: printk(KERN_CRIT "%s: Unknown trace level passed in!\n", __func__); // fall thru and use FATAL case eWLAN_PAL_TRACE_LEVEL_FATAL: printk(KERN_CRIT "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_ERROR: printk(KERN_ERR "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_WARN: printk(KERN_WARNING "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_INFO: printk(KERN_INFO "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_INFO_HIGH: printk(KERN_NOTICE "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_INFO_MED: printk(KERN_NOTICE "%s\n", strBuffer); break; case eWLAN_PAL_TRACE_LEVEL_INFO_LOW: printk(KERN_INFO "%s\n", strBuffer); break; } } void wpalTraceSetLevel( wpt_moduleid module, wpt_tracelevel level, wpt_boolean on ) { // Make sure the caller is passing in a valid LEVEL and MODULE. if ( (eWLAN_PAL_TRACE_LEVEL_COUNT <= level) || (eWLAN_MODULE_COUNT <= module) ) { return; } if ( eWLAN_PAL_TRACE_LEVEL_NONE == level ) { // Treat 'none' differently. NONE means we have to turn off all // the bits in the bit mask so none of the traces appear. gTraceInfo[ module ].moduleTraceLevel = 0; } else if ( eWLAN_PAL_TRACE_LEVEL_ALL == level ) { // Treat 'all' differently. ALL means we have to turn on all // the bits in the bit mask so all of the traces appear. gTraceInfo[ module ].moduleTraceLevel = 0xFFFF; } else { // We are turning a particular trace level on or off if (on) { // Set the desired bit in the bit mask for the module trace level. gTraceInfo[ module ].moduleTraceLevel |= WPAL_TRACE_LEVEL_TO_MODULE_BITMASK( level ); } else { // Clear the desired bit in the bit mask for the module trace level. gTraceInfo[ module ].moduleTraceLevel &= ~(WPAL_TRACE_LEVEL_TO_MODULE_BITMASK( level )); } } } wpt_boolean wpalTraceCheckLevel( wpt_moduleid module, wpt_tracelevel level ) { wpt_boolean traceOn = eWLAN_PAL_FALSE; if ( ( eWLAN_PAL_TRACE_LEVEL_NONE == level ) || ( level >= eWLAN_PAL_TRACE_LEVEL_COUNT )) { traceOn = eWLAN_PAL_FALSE; } else { traceOn = ( level & gTraceInfo[ module ].moduleTraceLevel ) ? eWLAN_PAL_TRUE : eWLAN_PAL_FALSE; } return( traceOn ); } void wpalTraceDisplay(void) { wpt_moduleid moduleId; printk(KERN_CRIT " 1)FATAL 2)ERROR 3)WARN 4)INFO " "5)INFO_H 6)INFO_M 7)INFO_L\n"); for (moduleId = 0; moduleId < eWLAN_MODULE_COUNT; ++moduleId) { printk(KERN_CRIT "%2d)%s %s %s %s " "%s %s %s %s\n", (int)moduleId, gTraceInfo[moduleId].moduleNameStr, (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_FATAL)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_ERROR)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_WARN)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_INFO)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_INFO_HIGH)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_INFO_MED)) ? "X":" ", (gTraceInfo[moduleId].moduleTraceLevel & (1 << eWLAN_PAL_TRACE_LEVEL_INFO_LOW)) ? "X":" " ); } } /*---------------------------------------------------------------------------- \brief wpalTrace() - Externally called trace function Checks the level of severity and accordingly prints the trace messages \param module - module identifier. A member of the wpt_moduleid enumeration that identifies the module issuing the trace message. \param level - trace level. A member of the wpt_tracelevel enumeration indicating the severity of the condition causing the trace message to be issued. More severe conditions are more likely to be logged. \param strFormat - format string. The message to be logged. This format string contains printf-like replacement parameters, which follow this parameter in the variable argument list. \return nothing \sa --------------------------------------------------------------------------*/ void wpalTrace( wpt_moduleid module, wpt_tracelevel level, char *strFormat, ... ) { wpt_uint8 strBuffer[ WPAL_TRACE_BUFFER_SIZE ]; int n; // Print the trace message when the desired level bit is set in the module // tracel level mask. if ( gTraceInfo[ module ].moduleTraceLevel & WPAL_TRACE_LEVEL_TO_MODULE_BITMASK( level ) ) { va_list val; va_start(val, strFormat); // print the prefix string into the string buffer... n = snprintf(strBuffer, WPAL_TRACE_BUFFER_SIZE, "wlan: [%d:%2s:%3s] ", in_interrupt() ? 0 : current->pid, (char *) TRACE_LEVEL_STR[ level ], (char *) gTraceInfo[ module ].moduleNameStr); // print the formatted log message after the prefix string. // note we reserve space for the terminating NUL if ((n >= 0) && (n < WPAL_TRACE_BUFFER_SIZE)) { vsnprintf(strBuffer + n, WPAL_TRACE_BUFFER_SIZE - n - 1, strFormat, val); wpalOutput(level, strBuffer); } va_end(val); } } /**---------------------------------------------------------------------------- \brief WPAL_DUMP() / wpalDump() - Trace / logging API Users wishing to add tracing memory dumps to their code should use WPAL_DUMP. WPAL_DUMP() will compile into a call to wpalDump() when tracing is enabled. \param module - module identifier. A member of the wpt_moduleid enumeration that identifies the module performing the dump \param level - trace level. A member of the wpt_tracelevel enumeration indicating the severity of the condition causing the memory to be dumped. More severe conditions are more likely to be logged. \param pMemory - memory. A pointer to the memory to be dumped \param length - length. How many bytes of memory to be dumped \return nothing --------------------------------------------------------------------------*/ // how many bytes do we output per line #define BYTES_PER_LINE 16 // each byte takes 2 characters plus a space, plus need room for NUL #define CHARS_PER_LINE ((BYTES_PER_LINE * 3) + 1) void wpalDump( wpt_moduleid module, wpt_tracelevel level, wpt_uint8 *pMemory, wpt_uint32 length) { char strBuffer[CHARS_PER_LINE]; int n, num, offset; // Dump the memory when the desired level bit is set in the module // tracel level mask. if ( gTraceInfo[ module ].moduleTraceLevel & WPAL_TRACE_LEVEL_TO_MODULE_BITMASK( level ) ) { num = 0; offset = 0; while (length > 0) { n = snprintf(strBuffer + offset, CHARS_PER_LINE - offset - 1, "%02X ", *pMemory); offset += n; num++; length--; pMemory++; if (BYTES_PER_LINE == num) { wpalOutput(level, strBuffer); num = 0; offset = 0; } } if (offset > 0) { // partial line remains wpalOutput(level, strBuffer); } } } #endif //WLAN_DEBUG
gpl-2.0
vathpela/linux-esrt
arch/s390/crypto/sha1_s390.c
1554
2813
/* * Cryptographic API. * * s390 implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. Originally based on the public domain * implementation written by Steve Reid. * * s390 Version: * Copyright IBM Corp. 2003, 2007 * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) * * Derived from "crypto/sha1_generic.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <crypto/sha.h> #include "crypt_s390.h" #include "sha.h" static int sha1_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA1_H0; sctx->state[1] = SHA1_H1; sctx->state[2] = SHA1_H2; sctx->state[3] = SHA1_H3; sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = KIMD_SHA_1; return 0; } static int sha1_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *octx = out; octx->count = sctx->count; memcpy(octx->state, sctx->state, sizeof(octx->state)); memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); return 0; } static int sha1_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha1_state *ictx = in; sctx->count = ictx->count; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sctx->func = KIMD_SHA_1; return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha1_export, .import = sha1_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_s390_init(void) { if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA)) return -EOPNOTSUPP; return crypto_register_shash(&alg); } static void __exit sha1_s390_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_s390_init); module_exit(sha1_s390_fini); MODULE_ALIAS_CRYPTO("sha1"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
gpl-2.0
jiankangshiye/linux-2.6.32.63-mini2440
drivers/scsi/aacraid/rkt.c
1810
3069
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * rkt.c * * Abstract: Hardware miniport for Drawbridge specific hardware functions. * */ #include <linux/blkdev.h> #include <scsi/scsi_host.h> #include "aacraid.h" #define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) /** * aac_rkt_select_comm - Select communications method * @dev: Adapter * @comm: communications method */ static int aac_rkt_select_comm(struct aac_dev *dev, int comm) { int retval; retval = aac_rx_select_comm(dev, comm); if (comm == AAC_COMM_MESSAGE) { /* * FIB Setup has already been done, but we can minimize the * damage by at least ensuring the OS never issues more * commands than we can handle. The Rocket adapters currently * can only handle 246 commands and 8 AIFs at the same time, * and in fact do notify us accordingly if we negotiate the * FIB size. The problem that causes us to add this check is * to ensure that we do not overdo it with the adapter when a * hard coded FIB override is being utilized. This special * case warrants this half baked, but convenient, check here. */ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { dev->init->MaxIoCommands = cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; } } return retval; } /** * aac_rkt_ioremap * @size: mapping resize request * */ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.rkt); return 0; } dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rkt->IndexRegs; return 0; } /** * aac_rkt_init - initialize an i960 based AAC card * @dev: device to configure * * Allocate and set up resources for the i960 based AAC variants. The * device_interface in the commregion will be allocated and linked * to the comm region. */ int aac_rkt_init(struct aac_dev *dev) { /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rkt_ioremap; dev->a_ops.adapter_comm = aac_rkt_select_comm; return _aac_rx_init(dev); }
gpl-2.0
garwynn/L900_NE2_Kernel
drivers/net/wireless/libertas/ethtool.c
2322
3255
#include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/delay.h> #include "decl.h" #include "cmd.h" static void lbs_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct lbs_private *priv = dev->ml_priv; snprintf(info->fw_version, 32, "%u.%u.%u.p%u", priv->fwrelease >> 24 & 0xff, priv->fwrelease >> 16 & 0xff, priv->fwrelease >> 8 & 0xff, priv->fwrelease & 0xff); strcpy(info->driver, "libertas"); strcpy(info->version, lbs_driver_version); } /* * All 8388 parts have 16KiB EEPROM size at the time of writing. * In case that changes this needs fixing. */ #define LBS_EEPROM_LEN 16384 static int lbs_ethtool_get_eeprom_len(struct net_device *dev) { return LBS_EEPROM_LEN; } static int lbs_ethtool_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 * bytes) { struct lbs_private *priv = dev->ml_priv; struct cmd_ds_802_11_eeprom_access cmd; int ret; lbs_deb_enter(LBS_DEB_ETHTOOL); if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN || eeprom->len > LBS_EEPROM_READ_LEN) { ret = -EINVAL; goto out; } cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) - LBS_EEPROM_READ_LEN + eeprom->len); cmd.action = cpu_to_le16(CMD_ACT_GET); cmd.offset = cpu_to_le16(eeprom->offset); cmd.len = cpu_to_le16(eeprom->len); ret = lbs_cmd_with_response(priv, CMD_802_11_EEPROM_ACCESS, &cmd); if (!ret) memcpy(bytes, cmd.value, eeprom->len); out: lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret); return ret; } static void lbs_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct lbs_private *priv = dev->ml_priv; wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; if (priv->wol_criteria == EHS_REMOVE_WAKEUP) return; if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA) wol->wolopts |= WAKE_UCAST; if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA) wol->wolopts |= WAKE_MCAST; if (priv->wol_criteria & EHS_WAKE_ON_BROADCAST_DATA) wol->wolopts |= WAKE_BCAST; if (priv->wol_criteria & EHS_WAKE_ON_MAC_EVENT) wol->wolopts |= WAKE_PHY; } static int lbs_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct lbs_private *priv = dev->ml_priv; if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) return -EOPNOTSUPP; priv->wol_criteria = 0; if (wol->wolopts & WAKE_UCAST) priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA; if (wol->wolopts & WAKE_MCAST) priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA; if (wol->wolopts & WAKE_BCAST) priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA; if (wol->wolopts & WAKE_PHY) priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT; if (wol->wolopts == 0) priv->wol_criteria |= EHS_REMOVE_WAKEUP; return 0; } const struct ethtool_ops lbs_ethtool_ops = { .get_drvinfo = lbs_ethtool_get_drvinfo, .get_eeprom = lbs_ethtool_get_eeprom, .get_eeprom_len = lbs_ethtool_get_eeprom_len, #ifdef CONFIG_LIBERTAS_MESH .get_sset_count = lbs_mesh_ethtool_get_sset_count, .get_ethtool_stats = lbs_mesh_ethtool_get_stats, .get_strings = lbs_mesh_ethtool_get_strings, #endif .get_wol = lbs_ethtool_get_wol, .set_wol = lbs_ethtool_set_wol, };
gpl-2.0
os2sd/android_kernel_lge_msm7x27-3.0.x
arch/sparc/kernel/traps_64.c
2834
76556
/* arch/sparc64/kernel/traps.c * * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net) * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) */ /* * I like traps on v9, :)))) */ #include <linux/module.h> #include <linux/sched.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kdebug.h> #include <linux/ftrace.h> #include <linux/gfp.h> #include <asm/smp.h> #include <asm/delay.h> #include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/uaccess.h> #include <asm/fpumacro.h> #include <asm/lsu.h> #include <asm/dcu.h> #include <asm/estate.h> #include <asm/chafsr.h> #include <asm/sfafsr.h> #include <asm/psrcompat.h> #include <asm/processor.h> #include <asm/timer.h> #include <asm/head.h> #include <asm/prom.h> #include <asm/memctrl.h> #include "entry.h" #include "kstack.h" /* When an irrecoverable trap occurs at tl > 0, the trap entry * code logs the trap state registers at every level in the trap * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout * is as follows: */ struct tl1_traplog { struct { unsigned long tstate; unsigned long tpc; unsigned long tnpc; unsigned long tt; } trapstack[4]; unsigned long tl; }; static void dump_tl1_traplog(struct tl1_traplog *p) { int i, limit; printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " "dumping track stack.\n", p->tl); limit = (tlb_type == hypervisor) ? 2 : 4; for (i = 0; i < limit; i++) { printk(KERN_EMERG "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " "TNPC[%016lx] TT[%lx]\n", i + 1, p->trapstack[i].tstate, p->trapstack[i].tpc, p->trapstack[i].tnpc, p->trapstack[i].tt); printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); } } void bad_trap(struct pt_regs *regs, long lvl) { char buffer[32]; siginfo_t info; if (notify_die(DIE_TRAP, "bad trap", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; if (lvl < 0x100) { sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl); die_if_kernel(buffer, regs); } lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLTRP; info.si_addr = (void __user *)regs->tpc; info.si_trapno = lvl; force_sig_info(SIGILL, &info, current); } void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[32]; if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); die_if_kernel (buffer, regs); } #ifdef CONFIG_DEBUG_BUGVERBOSE void do_BUG(const char *file, int line) { bust_spinlocks(1); printk("kernel BUG at %s:%d!\n", file, line); } EXPORT_SYMBOL(do_BUG); #endif static DEFINE_SPINLOCK(dimm_handler_lock); static dimm_printer_t dimm_handler; static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen) { unsigned long flags; int ret = -ENODEV; spin_lock_irqsave(&dimm_handler_lock, flags); if (dimm_handler) { ret = dimm_handler(synd_code, paddr, buf, buflen); } else if (tlb_type == spitfire) { if (prom_getunumber(synd_code, paddr, buf, buflen) == -1) ret = -EINVAL; else ret = 0; } else ret = -ENODEV; spin_unlock_irqrestore(&dimm_handler_lock, flags); return ret; } int register_dimm_printer(dimm_printer_t func) { unsigned long flags; int ret = 0; spin_lock_irqsave(&dimm_handler_lock, flags); if (!dimm_handler) dimm_handler = func; else ret = -EEXIST; spin_unlock_irqrestore(&dimm_handler_lock, flags); return ret; } EXPORT_SYMBOL_GPL(register_dimm_printer); void unregister_dimm_printer(dimm_printer_t func) { unsigned long flags; spin_lock_irqsave(&dimm_handler_lock, flags); if (dimm_handler == func) dimm_handler = NULL; spin_unlock_irqrestore(&dimm_handler_lock, flags); } EXPORT_SYMBOL_GPL(unregister_dimm_printer); void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { printk("spitfire_insn_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Iax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); spitfire_insn_access_exception(regs, sfsr, sfar); } void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { printk("sun4v_insn_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); die_if_kernel("Iax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sun4v_insn_access_exception(regs, addr, type_ctx); } void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Dax", regs); } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); spitfire_data_access_exception(regs, sfsr, sfar); } void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } printk("sun4v_data_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); die_if_kernel("Dax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sun4v_data_access_exception(regs, addr, type_ctx); } #ifdef CONFIG_PCI #include "pci_impl.h" #endif /* When access exceptions happen, we must do this. */ static void spitfire_clean_and_reenable_l1_caches(void) { unsigned long va; if (tlb_type != spitfire) BUG(); /* Clean 'em. */ for (va = 0; va < (PAGE_SIZE << 1); va += 32) { spitfire_put_icache_tag(va, 0x0); spitfire_put_dcache_tag(va, 0x0); } /* Re-enable in LSU. */ __asm__ __volatile__("flush %%g6\n\t" "membar #Sync\n\t" "stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), "i" (ASI_LSU_CONTROL) : "memory"); } static void spitfire_enable_estate_errors(void) { __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (ESTATE_ERR_ALL), "i" (ASI_ESTATE_ERROR_EN)); } static char ecc_syndrome_table[] = { 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49, 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a, 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48, 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c, 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48, 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29, 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b, 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48, 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48, 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e, 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b, 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36, 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48, 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48, 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b, 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32, 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48, 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b, 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48, 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49, 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48, 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48, 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b, 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a }; static char *syndrome_unknown = "<Unknown>"; static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) { unsigned short scode; char memmod_str[64], *p; if (udbl & bit) { scode = ecc_syndrome_table[udbl & 0xff]; if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) p = syndrome_unknown; else p = memmod_str; printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] " "Memory Module \"%s\"\n", smp_processor_id(), scode, p); } if (udbh & bit) { scode = ecc_syndrome_table[udbh & 0xff]; if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) p = syndrome_unknown; else p = memmod_str; printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " "Memory Module \"%s\"\n", smp_processor_id(), scode, p); } } static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) { printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", smp_processor_id(), afsr, afar, udbl, udbh, tl1); spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); /* We always log it, even if someone is listening for this * trap. */ notify_die(DIE_TRAP, "Correctable ECC Error", regs, 0, TRAP_TYPE_CEE, SIGTRAP); /* The Correctable ECC Error trap does not disable I/D caches. So * we only have to restore the ESTATE Error Enable register. */ spitfire_enable_estate_errors(); } static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) { siginfo_t info; printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); /* XXX add more human friendly logging of the error status * XXX as is implemented for cheetah */ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); /* We always log it, even if someone is listening for this * trap. */ notify_die(DIE_TRAP, "Uncorrectable Error", regs, 0, tt, SIGTRAP); if (regs->tstate & TSTATE_PRIV) { if (tl1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("UE", regs); } /* XXX need more intelligent processing here, such as is implemented * XXX for cheetah errors, in fact if the E-cache still holds the * XXX line with bad parity this will loop */ spitfire_clean_and_reenable_l1_caches(); spitfire_enable_estate_errors(); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_OBJERR; info.si_addr = (void *)0; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) { unsigned long afsr, tt, udbh, udbl; int tl1; afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; #ifdef CONFIG_PCI if (tt == TRAP_TYPE_DAE && pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { spitfire_clean_and_reenable_l1_caches(); spitfire_enable_estate_errors(); pci_poke_faulted = 1; regs->tnpc = regs->tpc + 4; return; } #endif if (afsr & SFAFSR_UE) spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); if (tt == TRAP_TYPE_CEE) { /* Handle the case where we took a CEE trap, but ACK'd * only the UE state in the UDB error registers. */ if (afsr & SFAFSR_UE) { if (udbh & UDBE_CE) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (udbh & UDBE_CE), "r" (0x0), "i" (ASI_UDB_ERROR_W)); } if (udbl & UDBE_CE) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (udbl & UDBE_CE), "r" (0x18), "i" (ASI_UDB_ERROR_W)); } } spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); } } int cheetah_pcache_forced_on; void cheetah_enable_pcache(void) { unsigned long dcr; printk("CHEETAH: Enabling P-Cache on cpu %d.\n", smp_processor_id()); __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (dcr) : "i" (ASI_DCU_CONTROL_REG)); dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); } /* Cheetah error trap handling. */ static unsigned long ecache_flush_physbase; static unsigned long ecache_flush_linesize; static unsigned long ecache_flush_size; /* This table is ordered in priority of errors and matches the * AFAR overwrite policy as well. */ struct afsr_error_table { unsigned long mask; const char *name; }; static const char CHAFSR_PERR_msg[] = "System interface protocol error"; static const char CHAFSR_IERR_msg[] = "Internal processor error"; static const char CHAFSR_ISAP_msg[] = "System request parity error on incoming address"; static const char CHAFSR_UCU_msg[] = "Uncorrectable E-cache ECC error for ifetch/data"; static const char CHAFSR_UCC_msg[] = "SW Correctable E-cache ECC error for ifetch/data"; static const char CHAFSR_UE_msg[] = "Uncorrectable system bus data ECC error for read"; static const char CHAFSR_EDU_msg[] = "Uncorrectable E-cache ECC error for stmerge/blkld"; static const char CHAFSR_EMU_msg[] = "Uncorrectable system bus MTAG error"; static const char CHAFSR_WDU_msg[] = "Uncorrectable E-cache ECC error for writeback"; static const char CHAFSR_CPU_msg[] = "Uncorrectable ECC error for copyout"; static const char CHAFSR_CE_msg[] = "HW corrected system bus data ECC error for read"; static const char CHAFSR_EDC_msg[] = "HW corrected E-cache ECC error for stmerge/blkld"; static const char CHAFSR_EMC_msg[] = "HW corrected system bus MTAG ECC error"; static const char CHAFSR_WDC_msg[] = "HW corrected E-cache ECC error for writeback"; static const char CHAFSR_CPC_msg[] = "HW corrected ECC error for copyout"; static const char CHAFSR_TO_msg[] = "Unmapped error from system bus"; static const char CHAFSR_BERR_msg[] = "Bus error response from system bus"; static const char CHAFSR_IVC_msg[] = "HW corrected system bus data ECC error for ivec read"; static const char CHAFSR_IVU_msg[] = "Uncorrectable system bus data ECC error for ivec read"; static struct afsr_error_table __cheetah_error_table[] = { { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { CHAFSR_EMU, CHAFSR_EMU_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { CHAFSR_EMC, CHAFSR_EMC_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVC, CHAFSR_IVC_msg }, { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static const char CHPAFSR_DTO_msg[] = "System bus unmapped error for prefetch/storequeue-read"; static const char CHPAFSR_DBERR_msg[] = "System bus error for prefetch/storequeue-read"; static const char CHPAFSR_THCE_msg[] = "Hardware corrected E-cache Tag ECC error"; static const char CHPAFSR_TSCE_msg[] = "SW handled correctable E-cache Tag ECC error"; static const char CHPAFSR_TUE_msg[] = "Uncorrectable E-cache Tag ECC error"; static const char CHPAFSR_DUE_msg[] = "System bus uncorrectable data ECC error due to prefetch/store-fill"; static struct afsr_error_table __cheetah_plus_error_table[] = { { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { CHAFSR_EMU, CHAFSR_EMU_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { CHAFSR_EMC, CHAFSR_EMC_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, { CHPAFSR_DTO, CHPAFSR_DTO_msg }, { CHPAFSR_DBERR, CHPAFSR_DBERR_msg }, { CHPAFSR_THCE, CHPAFSR_THCE_msg }, { CHPAFSR_TSCE, CHPAFSR_TSCE_msg }, { CHPAFSR_TUE, CHPAFSR_TUE_msg }, { CHPAFSR_DUE, CHPAFSR_DUE_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVC, CHAFSR_IVC_msg }, { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static const char JPAFSR_JETO_msg[] = "System interface protocol error, hw timeout caused"; static const char JPAFSR_SCE_msg[] = "Parity error on system snoop results"; static const char JPAFSR_JEIC_msg[] = "System interface protocol error, illegal command detected"; static const char JPAFSR_JEIT_msg[] = "System interface protocol error, illegal ADTYPE detected"; static const char JPAFSR_OM_msg[] = "Out of range memory error has occurred"; static const char JPAFSR_ETP_msg[] = "Parity error on L2 cache tag SRAM"; static const char JPAFSR_UMS_msg[] = "Error due to unsupported store"; static const char JPAFSR_RUE_msg[] = "Uncorrectable ECC error from remote cache/memory"; static const char JPAFSR_RCE_msg[] = "Correctable ECC error from remote cache/memory"; static const char JPAFSR_BP_msg[] = "JBUS parity error on returned read data"; static const char JPAFSR_WBP_msg[] = "JBUS parity error on data for writeback or block store"; static const char JPAFSR_FRC_msg[] = "Foreign read to DRAM incurring correctable ECC error"; static const char JPAFSR_FRU_msg[] = "Foreign read to DRAM incurring uncorrectable ECC error"; static struct afsr_error_table __jalapeno_error_table[] = { { JPAFSR_JETO, JPAFSR_JETO_msg }, { JPAFSR_SCE, JPAFSR_SCE_msg }, { JPAFSR_JEIC, JPAFSR_JEIC_msg }, { JPAFSR_JEIT, JPAFSR_JEIT_msg }, { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { JPAFSR_OM, JPAFSR_OM_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { JPAFSR_ETP, JPAFSR_ETP_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, { JPAFSR_UMS, JPAFSR_UMS_msg }, { JPAFSR_RUE, JPAFSR_RUE_msg }, { JPAFSR_RCE, JPAFSR_RCE_msg }, { JPAFSR_BP, JPAFSR_BP_msg }, { JPAFSR_WBP, JPAFSR_WBP_msg }, { JPAFSR_FRC, JPAFSR_FRC_msg }, { JPAFSR_FRU, JPAFSR_FRU_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static struct afsr_error_table *cheetah_error_table; static unsigned long cheetah_afsr_errors; struct cheetah_err_info *cheetah_error_log; static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) { struct cheetah_err_info *p; int cpu = smp_processor_id(); if (!cheetah_error_log) return NULL; p = cheetah_error_log + (cpu * 2); if ((afsr & CHAFSR_TL1) != 0UL) p++; return p; } extern unsigned int tl0_icpe[], tl1_icpe[]; extern unsigned int tl0_dcpe[], tl1_dcpe[]; extern unsigned int tl0_fecc[], tl1_fecc[]; extern unsigned int tl0_cee[], tl1_cee[]; extern unsigned int tl0_iae[], tl1_iae[]; extern unsigned int tl0_dae[], tl1_dae[]; extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[]; extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[]; extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[]; extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[]; extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[]; void __init cheetah_ecache_flush_init(void) { unsigned long largest_size, smallest_linesize, order, ver; int i, sz; /* Scan all cpu device tree nodes, note two values: * 1) largest E-cache size * 2) smallest E-cache line size */ largest_size = 0UL; smallest_linesize = ~0UL; for (i = 0; i < NR_CPUS; i++) { unsigned long val; val = cpu_data(i).ecache_size; if (!val) continue; if (val > largest_size) largest_size = val; val = cpu_data(i).ecache_line_size; if (val < smallest_linesize) smallest_linesize = val; } if (largest_size == 0UL || smallest_linesize == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache " "parameters.\n"); prom_halt(); } ecache_flush_size = (2 * largest_size); ecache_flush_linesize = smallest_linesize; ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); if (ecache_flush_physbase == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " "contiguous physical memory.\n", ecache_flush_size); prom_halt(); } /* Now allocate error trap reporting scoreboard. */ sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); for (order = 0; order < MAX_ORDER; order++) { if ((PAGE_SIZE << order) >= sz) break; } cheetah_error_log = (struct cheetah_err_info *) __get_free_pages(GFP_KERNEL, order); if (!cheetah_error_log) { prom_printf("cheetah_ecache_flush_init: Failed to allocate " "error logging scoreboard (%d bytes).\n", sz); prom_halt(); } memset(cheetah_error_log, 0, PAGE_SIZE << order); /* Mark all AFSRs as invalid so that the trap handler will * log new new information there. */ for (i = 0; i < 2 * NR_CPUS; i++) cheetah_error_log[i].afsr = CHAFSR_INVALID; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32) == __JALAPENO_ID || (ver >> 32) == __SERRANO_ID) { cheetah_error_table = &__jalapeno_error_table[0]; cheetah_afsr_errors = JPAFSR_ERRORS; } else if ((ver >> 32) == 0x003e0015) { cheetah_error_table = &__cheetah_plus_error_table[0]; cheetah_afsr_errors = CHPAFSR_ERRORS; } else { cheetah_error_table = &__cheetah_error_table[0]; cheetah_afsr_errors = CHAFSR_ERRORS; } /* Now patch trap tables. */ memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4)); memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4)); memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4)); memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4)); memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4)); memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4)); memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4)); memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4)); if (tlb_type == cheetah_plus) { memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4)); memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4)); memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4)); memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4)); } flushi(PAGE_OFFSET); } static void cheetah_flush_ecache(void) { unsigned long flush_base = ecache_flush_physbase; unsigned long flush_linesize = ecache_flush_linesize; unsigned long flush_size = ecache_flush_size; __asm__ __volatile__("1: subcc %0, %4, %0\n\t" " bne,pt %%xcc, 1b\n\t" " ldxa [%2 + %0] %3, %%g0\n\t" : "=&r" (flush_size) : "0" (flush_size), "r" (flush_base), "i" (ASI_PHYS_USE_EC), "r" (flush_linesize)); } static void cheetah_flush_ecache_line(unsigned long physaddr) { unsigned long alias; physaddr &= ~(8UL - 1UL); physaddr = (ecache_flush_physbase + (physaddr & ((ecache_flush_size>>1UL) - 1UL))); alias = physaddr + (ecache_flush_size >> 1UL); __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t" "ldxa [%1] %2, %%g0\n\t" "membar #Sync" : /* no outputs */ : "r" (physaddr), "r" (alias), "i" (ASI_PHYS_USE_EC)); } /* Unfortunately, the diagnostic access to the I-cache tags we need to * use to clear the thing interferes with I-cache coherency transactions. * * So we must only flush the I-cache when it is disabled. */ static void __cheetah_flush_icache(void) { unsigned int icache_size, icache_line_size; unsigned long addr; icache_size = local_cpu_data().icache_size; icache_line_size = local_cpu_data().icache_line_size; /* Clear the valid bits in all the tags. */ for (addr = 0; addr < icache_size; addr += icache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (addr | (2 << 3)), "i" (ASI_IC_TAG)); } } static void cheetah_flush_icache(void) { unsigned long dcu_save; /* Save current DCU, disable I-cache. */ __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" "or %0, %2, %%g1\n\t" "stxa %%g1, [%%g0] %1\n\t" "membar #Sync" : "=r" (dcu_save) : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) : "g1"); __cheetah_flush_icache(); /* Restore DCU register */ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG)); } static void cheetah_flush_dcache(void) { unsigned int dcache_size, dcache_line_size; unsigned long addr; dcache_size = local_cpu_data().dcache_size; dcache_line_size = local_cpu_data().dcache_line_size; for (addr = 0; addr < dcache_size; addr += dcache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (addr), "i" (ASI_DCACHE_TAG)); } } /* In order to make the even parity correct we must do two things. * First, we clear DC_data_parity and set DC_utag to an appropriate value. * Next, we clear out all 32-bytes of data for that line. Data of * all-zero + tag parity value of zero == correct parity. */ static void cheetah_plus_zap_dcache_parity(void) { unsigned int dcache_size, dcache_line_size; unsigned long addr; dcache_size = local_cpu_data().dcache_size; dcache_line_size = local_cpu_data().dcache_line_size; for (addr = 0; addr < dcache_size; addr += dcache_line_size) { unsigned long tag = (addr >> 14); unsigned long line; __asm__ __volatile__("membar #Sync\n\t" "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (tag), "r" (addr), "i" (ASI_DCACHE_UTAG)); for (line = addr; line < addr + dcache_line_size; line += 8) __asm__ __volatile__("membar #Sync\n\t" "stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (line), "i" (ASI_DCACHE_DATA)); } } /* Conversion tables used to frob Cheetah AFSR syndrome values into * something palatable to the memory controller driver get_unumber * routine. */ #define MT0 137 #define MT1 138 #define MT2 139 #define NONE 254 #define MTC0 140 #define MTC1 141 #define MTC2 142 #define MTC3 143 #define C0 128 #define C1 129 #define C2 130 #define C3 131 #define C4 132 #define C5 133 #define C6 134 #define C7 135 #define C8 136 #define M2 144 #define M3 145 #define M4 146 #define M 147 static unsigned char cheetah_ecc_syntab[] = { /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M, /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16, /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10, /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M, /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6, /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4, /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4, /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3, /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5, /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M, /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2, /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3, /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M, /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3, /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M, /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M, /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4, /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M, /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2, /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M, /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4, /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3, /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3, /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2, /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4, /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M, /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3, /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M, /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3, /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M, /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M, /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M }; static unsigned char cheetah_mtag_syntab[] = { NONE, MTC0, MTC1, NONE, MTC2, NONE, NONE, MT0, MTC3, NONE, NONE, MT1, NONE, MT2, NONE, NONE }; /* Return the highest priority error conditon mentioned. */ static inline unsigned long cheetah_get_hipri(unsigned long afsr) { unsigned long tmp = 0; int i; for (i = 0; cheetah_error_table[i].mask; i++) { if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL) return tmp; } return tmp; } static const char *cheetah_get_string(unsigned long bit) { int i; for (i = 0; cheetah_error_table[i].mask; i++) { if ((bit & cheetah_error_table[i].mask) != 0UL) return cheetah_error_table[i].name; } return "???"; } static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info, unsigned long afsr, unsigned long afar, int recoverable) { unsigned long hipri; char unum[256]; printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), afsr, afar, (afsr & CHAFSR_TL1) ? 1 : 0); printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); printk("%s" "ERROR(%d): ", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); printk("TPC<%pS>\n", (void *) regs->tpc); printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT, (afsr & CHAFSR_ME) ? ", Multiple Errors" : "", (afsr & CHAFSR_PRIV) ? ", Privileged" : ""); hipri = cheetah_get_hipri(afsr); printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), hipri, cheetah_get_string(hipri)); /* Try to get unumber if relevant. */ #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \ CHAFSR_CPC | CHAFSR_CPU | \ CHAFSR_UE | CHAFSR_CE | \ CHAFSR_EDC | CHAFSR_EDU | \ CHAFSR_UCC | CHAFSR_UCU | \ CHAFSR_WDU | CHAFSR_WDC) #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU) if (afsr & ESYND_ERRORS) { int syndrome; int ret; syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT; syndrome = cheetah_ecc_syntab[syndrome]; ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); if (ret != -1) printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), unum); } else if (afsr & MSYND_ERRORS) { int syndrome; int ret; syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT; syndrome = cheetah_mtag_syntab[syndrome]; ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); if (ret != -1) printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), unum); } /* Now dump the cache snapshots. */ printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->dcache_index, info->dcache_tag, info->dcache_utag, info->dcache_stag); printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->dcache_data[0], info->dcache_data[1], info->dcache_data[2], info->dcache_data[3]); printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] " "u[%016llx] l[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->icache_index, info->icache_tag, info->icache_utag, info->icache_stag, info->icache_upper, info->icache_lower); printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[0], info->icache_data[1], info->icache_data[2], info->icache_data[3]); printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[4], info->icache_data[5], info->icache_data[6], info->icache_data[7]); printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->ecache_index, info->ecache_tag); printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->ecache_data[0], info->ecache_data[1], info->ecache_data[2], info->ecache_data[3]); afsr = (afsr & ~hipri) & cheetah_afsr_errors; while (afsr != 0UL) { unsigned long bit = cheetah_get_hipri(afsr); printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n", (recoverable ? KERN_WARNING : KERN_CRIT), bit, cheetah_get_string(bit)); afsr &= ~bit; } if (!recoverable) printk(KERN_CRIT "ERROR: This condition is not recoverable.\n"); } static int cheetah_recheck_errors(struct cheetah_err_info *logp) { unsigned long afsr, afar; int ret = 0; __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afsr) : "i" (ASI_AFSR)); if ((afsr & cheetah_afsr_errors) != 0) { if (logp != NULL) { __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afar) : "i" (ASI_AFAR)); logp->afsr = afsr; logp->afar = afar; } ret = 1; } __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync\n\t" : : "r" (afsr), "i" (ASI_AFSR)); return ret; } void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable; /* Flush E-cache */ cheetah_flush_ecache(); p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Fast-ECC error trap.\n"); /* Flush E-cache to kick the error trap handlers out. */ cheetah_flush_ecache(); } /* Try to fix a correctable error by pushing the line out from * the E-cache. Recheck error reporting registers to see if the * problem is intermittent. */ static int cheetah_fix_ce(unsigned long physaddr) { unsigned long orig_estate; unsigned long alias1, alias2; int ret; /* Make sure correctable error traps are disabled. */ __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t" "andn %0, %1, %%g1\n\t" "stxa %%g1, [%%g0] %2\n\t" "membar #Sync" : "=&r" (orig_estate) : "i" (ESTATE_ERROR_CEEN), "i" (ASI_ESTATE_ERROR_EN) : "g1"); /* We calculate alias addresses that will force the * cache line in question out of the E-cache. Then * we bring it back in with an atomic instruction so * that we get it in some modified/exclusive state, * then we displace it again to try and get proper ECC * pushed back into the system. */ physaddr &= ~(8UL - 1UL); alias1 = (ecache_flush_physbase + (physaddr & ((ecache_flush_size >> 1) - 1))); alias2 = alias1 + (ecache_flush_size >> 1); __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "casxa [%2] %3, %%g0, %%g0\n\t" "ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "membar #Sync" : /* no outputs */ : "r" (alias1), "r" (alias2), "r" (physaddr), "i" (ASI_PHYS_USE_EC)); /* Did that trigger another error? */ if (cheetah_recheck_errors(NULL)) { /* Try one more time. */ __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t" "membar #Sync" : : "r" (physaddr), "i" (ASI_PHYS_USE_EC)); if (cheetah_recheck_errors(NULL)) ret = 2; else ret = 1; } else { /* No new error, intermittent problem. */ ret = 0; } /* Restore error enables. */ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN)); return ret; } /* Return non-zero if PADDR is a valid physical memory address. */ static int cheetah_check_main_memory(unsigned long paddr) { unsigned long vaddr = PAGE_OFFSET + paddr; if (vaddr > (unsigned long) high_memory) return 0; return kern_addr_valid(vaddr); } void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); if (is_memory && (afsr & CHAFSR_CE) != 0UL) { /* XXX Might want to log the results of this operation * XXX somewhere... -DaveM */ cheetah_fix_ce(afar); } { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_CPC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC) flush_line = 1; else flush_all = 1; } /* Trap handler only disabled I-cache, flush it. */ cheetah_flush_icache(); /* Re-enable I-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR */ (void) cheetah_recheck_errors(&local_snapshot); /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Correctable-ECC error trap.\n"); } void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); (void) cheetah_recheck_errors(NULL); pci_poke_faulted = 1; regs->tpc += 4; regs->tnpc = regs->tpc + 4; return; } #endif p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDU) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_BERR) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR) flush_line = 1; else flush_all = 1; } cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I/D caches */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC | DCU_DC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); /* "Recoverable" here means we try to yank the page from ever * being newly used again. This depends upon a few things: * 1) Must be main memory, and AFAR must be valid. * 2) If we trapped from user, OK. * 3) Else, if we trapped from kernel we must find exception * table entry (ie. we have to have been accessing user * space). * * If AFAR is not in main memory, or we trapped from kernel * and cannot find an exception table entry, it is unacceptable * to try and continue. */ if (recoverable && is_memory) { if ((regs->tstate & TSTATE_PRIV) == 0UL) { /* OK, usermode access. */ recoverable = 1; } else { const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* OK, kernel access to userspace. */ recoverable = 1; } else { /* BAD, privileged state is corrupted. */ recoverable = 0; } if (recoverable) { if (pfn_valid(afar >> PAGE_SHIFT)) get_page(pfn_to_page(afar >> PAGE_SHIFT)); else recoverable = 0; /* Only perform fixup if we still have a * recoverable condition. */ if (recoverable) { regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; } } } } else { recoverable = 0; } if (!recoverable) panic("Irrecoverable deferred error trap.\n"); } /* Handle a D/I cache parity error trap. TYPE is encoded as: * * Bit0: 0=dcache,1=icache * Bit1: 0=recoverable,1=unrecoverable * * The hardware has disabled both the I-cache and D-cache in * the %dcr register. */ void cheetah_plus_parity_error(int type, struct pt_regs *regs) { if (type & 0x1) __cheetah_flush_icache(); else cheetah_plus_zap_dcache_parity(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); if (type & 0x2) { printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); panic("Irrecoverable Cheetah+ parity error."); } printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); } struct sun4v_error_entry { u64 err_handle; u64 err_stick; u32 err_type; #define SUN4V_ERR_TYPE_UNDEFINED 0 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 #define SUN4V_ERR_TYPE_WARNING_RES 4 u32 err_attrs; #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 #define SUN4V_ERR_ATTRS_PIO 0x00000004 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 u64 err_raddr; u32 err_size; u16 err_cpu; u16 err_pad; }; static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); static const char *sun4v_err_type_to_str(u32 type) { switch (type) { case SUN4V_ERR_TYPE_UNDEFINED: return "undefined"; case SUN4V_ERR_TYPE_UNCORRECTED_RES: return "uncorrected resumable"; case SUN4V_ERR_TYPE_PRECISE_NONRES: return "precise nonresumable"; case SUN4V_ERR_TYPE_DEFERRED_NONRES: return "deferred nonresumable"; case SUN4V_ERR_TYPE_WARNING_RES: return "warning resumable"; default: return "unknown"; } } static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) { int cnt; printk("%s: Reporting on cpu %d\n", pfx, cpu); printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n", pfx, ent->err_handle, ent->err_stick, ent->err_type, sun4v_err_type_to_str(ent->err_type)); printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", pfx, ent->err_attrs, ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? "processor" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? "memory" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? "pio" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? "integer-regs" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? "fpu-regs" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? "user" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? "privileged" : ""), ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? "queue-full" : "")); printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n", pfx, ent->err_raddr, ent->err_size, ent->err_cpu); show_regs(regs); if ((cnt = atomic_read(ocnt)) != 0) { atomic_set(ocnt, 0); wmb(); printk("%s: Queue overflowed %d times.\n", pfx, cnt); } } /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event and clear the first word of the entry. */ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) { struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; int cpu; cpu = get_cpu(); tb = &trap_block[cpu]; paddr = tb->resum_kernel_buf_pa + offset; ent = __va(paddr); memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); /* We have a local copy now, so release the entry. */ ent->err_handle = 0; wmb(); put_cpu(); if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) { /* If err_type is 0x4, it's a powerdown request. Do * not do the usual resumable error log because that * makes it look like some abnormal error. */ printk(KERN_INFO "Power down request...\n"); kill_cad_pid(SIGINT, 1); return; } sun4v_log_error(regs, &local_copy, cpu, KERN_ERR "RESUMABLE ERROR", &sun4v_resum_oflow_cnt); } /* If we try to printk() we'll probably make matters worse, by trying * to retake locks this cpu already holds or causing more errors. So * just bump a counter, and we'll report these counter bumps above. */ void sun4v_resum_overflow(struct pt_regs *regs) { atomic_inc(&sun4v_resum_oflow_cnt); } /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event, clear the first word of the entry, and die. */ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) { struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; int cpu; cpu = get_cpu(); tb = &trap_block[cpu]; paddr = tb->nonresum_kernel_buf_pa + offset; ent = __va(paddr); memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); /* We have a local copy now, so release the entry. */ ent->err_handle = 0; wmb(); put_cpu(); #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { pci_poke_faulted = 1; regs->tpc += 4; regs->tnpc = regs->tpc + 4; return; } #endif sun4v_log_error(regs, &local_copy, cpu, KERN_EMERG "NON-RESUMABLE ERROR", &sun4v_nonresum_oflow_cnt); panic("Non-resumable error."); } /* If we try to printk() we'll probably make matters worse, by trying * to retake locks this cpu already holds or causing more errors. So * just bump a counter, and we'll report these counter bumps above. */ void sun4v_nonresum_overflow(struct pt_regs *regs) { /* XXX Actually even this can make not that much sense. Perhaps * XXX we should just pull the plug and panic directly from here? */ atomic_inc(&sun4v_nonresum_oflow_cnt); } unsigned long sun4v_err_itlb_vaddr; unsigned long sun4v_err_itlb_ctx; unsigned long sun4v_err_itlb_pte; unsigned long sun4v_err_itlb_error; void sun4v_itlb_error_report(struct pt_regs *regs, int tl) { if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, sun4v_err_itlb_pte, sun4v_err_itlb_error); prom_halt(); } unsigned long sun4v_err_dtlb_vaddr; unsigned long sun4v_err_dtlb_ctx; unsigned long sun4v_err_dtlb_pte; unsigned long sun4v_err_dtlb_error; void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) { if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, sun4v_err_dtlb_pte, sun4v_err_dtlb_error); prom_halt(); } void hypervisor_tlbop_error(unsigned long err, unsigned long op) { printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", err, op); } void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) { printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", err, op); } void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { regs->tpc = regs->tnpc; regs->tnpc += 4; } else { unsigned long fsr = current_thread_info()->xfsr[0]; siginfo_t info; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } force_sig_info(SIGFPE, &info, current); } } void do_fpieee(struct pt_regs *regs) { if (notify_die(DIE_TRAP, "fpu exception ieee", regs, 0, 0x24, SIGFPE) == NOTIFY_STOP) return; do_fpe_common(regs); } extern int do_mathemu(struct pt_regs *, struct fpustate *); void do_fpother(struct pt_regs *regs) { struct fpustate *f = FPUSTATE; int ret = 0; if (notify_die(DIE_TRAP, "fpu exception other", regs, 0, 0x25, SIGFPE) == NOTIFY_STOP) return; switch ((current_thread_info()->xfsr[0] & 0x1c000)) { case (2 << 14): /* unfinished_FPop */ case (3 << 14): /* unimplemented_FPop */ ret = do_mathemu(regs, f); break; } if (ret) return; do_fpe_common(regs); } void do_tof(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, 0, 0x26, SIGEMT) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) die_if_kernel("Penguin overflow trap from kernel mode", regs); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGEMT, &info, current); } void do_div0(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "integer division by zero", regs, 0, 0x28, SIGFPE) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) die_if_kernel("TL0: Kernel divide by zero.", regs); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_INTDIV; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGFPE, &info, current); } static void instruction_dump(unsigned int *pc) { int i; if ((((unsigned long) pc) & 3)) return; printk("Instruction DUMP:"); for (i = -3; i < 6; i++) printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>'); printk("\n"); } static void user_instruction_dump(unsigned int __user *pc) { int i; unsigned int buf[9]; if ((((unsigned long) pc) & 3)) return; if (copy_from_user(buf, pc - 3, sizeof(buf))) return; printk("Instruction DUMP:"); for (i = 0; i < 9; i++) printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>'); printk("\n"); } void show_stack(struct task_struct *tsk, unsigned long *_ksp) { unsigned long fp, ksp; struct thread_info *tp; int count = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph = 0; #endif ksp = (unsigned long) _ksp; if (!tsk) tsk = current; tp = task_thread_info(tsk); if (ksp == 0UL) { if (tsk == current) asm("mov %%fp, %0" : "=r" (ksp)); else ksp = tp->ksp; } if (tp == current_thread_info()) flushw_all(); fp = ksp + STACK_BIAS; printk("Call Trace:\n"); do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; if (!kstack_valid(tp, fp)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); if (kstack_is_trap_frame(tp, regs)) { if (!(regs->tstate & TSTATE_PRIV)) break; pc = regs->tpc; fp = regs->u_regs[UREG_I6] + STACK_BIAS; } else { pc = sf->callers_pc; fp = (unsigned long)sf->fp + STACK_BIAS; } printk(" [%016lx] %pS\n", pc, (void *) pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = tsk->curr_ret_stack; if (tsk->ret_stack && index >= graph) { pc = tsk->ret_stack[index - graph].ret; printk(" [%016lx] %pS\n", pc, (void *) pc); graph++; } } #endif } while (++count < 16); } void dump_stack(void) { show_stack(current, NULL); } EXPORT_SYMBOL(dump_stack); static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; if (!fp) return NULL; return (struct reg_window *) (fp + STACK_BIAS); } void die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; /* Amuse the user. */ printk( " \\|/ ____ \\|/\n" " \"@'/ .. \\`@\"\n" " /_| \\__/ |_\\\n" " \\__U_/\n"); printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); __asm__ __volatile__("flushw"); show_regs(regs); add_taint(TAINT_DIE); if (regs->tstate & TSTATE_PRIV) { struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); /* Stop the back trace when we hit userland or we * find some badly aligned kernel stack. */ while (rw && count++ < 30 && kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); rw = kernel_stack_up(rw); } instruction_dump ((unsigned int *) regs->tpc); } else { if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } user_instruction_dump ((unsigned int __user *) regs->tpc); } if (regs->tstate & TSTATE_PRIV) do_exit(SIGKILL); do_exit(SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) extern int handle_popc(u32 insn, struct pt_regs *regs); extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); void do_illegal_instruction(struct pt_regs *regs) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; siginfo_t info; if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) return; if (tstate & TSTATE_PRIV) die_if_kernel("Kernel illegal instruction", regs); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { if (handle_popc(insn, regs)) return; } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { if (handle_ldf_stq(insn, regs)) return; } else if (tlb_type == hypervisor) { if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { if (!vis_emul(regs, insn)) return; } else { struct fpustate *f = FPUSTATE; /* XXX maybe verify XFSR bits like * XXX do_fpother() does? */ if (do_mathemu(regs, f)) return; } } } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *)pc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); } extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void do_privop(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "privileged operation", regs, 0, 0x11, SIGILL) == NOTIFY_STOP) return; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_PRVOPC; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); } void do_privact(struct pt_regs *regs) { do_privop(regs); } /* Trap level 1 stuff or other traps we should never see... */ void do_cee(struct pt_regs *regs) { die_if_kernel("TL0: Cache Error Exception", regs); } void do_cee_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Cache Error Exception", regs); } void do_dae_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Data Access Exception", regs); } void do_iae_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Instruction Access Exception", regs); } void do_div0_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: DIV0 Exception", regs); } void do_fpdis_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Disabled", regs); } void do_fpieee_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU IEEE Exception", regs); } void do_fpother_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Other Exception", regs); } void do_ill_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Illegal Instruction Exception", regs); } void do_irq_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: IRQ Exception", regs); } void do_lddfmna_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: LDDF Exception", regs); } void do_stdfmna_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: STDF Exception", regs); } void do_paw(struct pt_regs *regs) { die_if_kernel("TL0: Phys Watchpoint Exception", regs); } void do_paw_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Phys Watchpoint Exception", regs); } void do_vaw(struct pt_regs *regs) { die_if_kernel("TL0: Virt Watchpoint Exception", regs); } void do_vaw_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Virt Watchpoint Exception", regs); } void do_tof_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Tag Overflow Exception", regs); } void do_getpsr(struct pt_regs *regs) { regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate); regs->tpc = regs->tnpc; regs->tnpc += 4; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } } struct trap_per_cpu trap_block[NR_CPUS]; EXPORT_SYMBOL(trap_block); /* This can get invoked before sched_init() so play it super safe * and use hard_smp_processor_id(). */ void notrace init_cur_cpu_trap(struct thread_info *t) { int cpu = hard_smp_processor_id(); struct trap_per_cpu *p = &trap_block[cpu]; p->thread = t; p->pgd_paddr = 0; } extern void thread_info_offsets_are_bolixed_dave(void); extern void trap_per_cpu_offsets_are_bolixed_dave(void); extern void tsb_config_offsets_are_bolixed_dave(void); /* Only invoked on boot processor. */ void __init trap_init(void) { /* Compile time sanity check. */ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_FPSAVED != offsetof(struct thread_info, fpsaved) || TI_KSP != offsetof(struct thread_info, ksp) || TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || TI_KREGS != offsetof(struct thread_info, kregs) || TI_UTRAPS != offsetof(struct thread_info, utraps) || TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || TI_GSR != offsetof(struct thread_info, gsr) || TI_XFSR != offsetof(struct thread_info, xfsr) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))); BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || (TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr)) || (TRAP_PER_CPU_CPU_MONDO_PA != offsetof(struct trap_per_cpu, cpu_mondo_pa)) || (TRAP_PER_CPU_DEV_MONDO_PA != offsetof(struct trap_per_cpu, dev_mondo_pa)) || (TRAP_PER_CPU_RESUM_MONDO_PA != offsetof(struct trap_per_cpu, resum_mondo_pa)) || (TRAP_PER_CPU_RESUM_KBUF_PA != offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || (TRAP_PER_CPU_NONRESUM_MONDO_PA != offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || (TRAP_PER_CPU_NONRESUM_KBUF_PA != offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || (TRAP_PER_CPU_FAULT_INFO != offsetof(struct trap_per_cpu, fault_info)) || (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || (TRAP_PER_CPU_CPU_LIST_PA != offsetof(struct trap_per_cpu, cpu_list_pa)) || (TRAP_PER_CPU_TSB_HUGE != offsetof(struct trap_per_cpu, tsb_huge)) || (TRAP_PER_CPU_TSB_HUGE_TEMP != offsetof(struct trap_per_cpu, tsb_huge_temp)) || (TRAP_PER_CPU_IRQ_WORKLIST_PA != offsetof(struct trap_per_cpu, irq_worklist_pa)) || (TRAP_PER_CPU_CPU_MONDO_QMASK != offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || (TRAP_PER_CPU_DEV_MONDO_QMASK != offsetof(struct trap_per_cpu, dev_mondo_qmask)) || (TRAP_PER_CPU_RESUM_QMASK != offsetof(struct trap_per_cpu, resum_qmask)) || (TRAP_PER_CPU_NONRESUM_QMASK != offsetof(struct trap_per_cpu, nonresum_qmask)) || (TRAP_PER_CPU_PER_CPU_BASE != offsetof(struct trap_per_cpu, __per_cpu_base))); BUILD_BUG_ON((TSB_CONFIG_TSB != offsetof(struct tsb_config, tsb)) || (TSB_CONFIG_RSS_LIMIT != offsetof(struct tsb_config, tsb_rss_limit)) || (TSB_CONFIG_NENTRIES != offsetof(struct tsb_config, tsb_nentries)) || (TSB_CONFIG_REG_VAL != offsetof(struct tsb_config, tsb_reg_val)) || (TSB_CONFIG_MAP_VADDR != offsetof(struct tsb_config, tsb_map_vaddr)) || (TSB_CONFIG_MAP_PTE != offsetof(struct tsb_config, tsb_map_pte))); /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; }
gpl-2.0
sunrunning/ok6410_linux
arch/powerpc/sysdev/fsl_85xx_cache_sram.c
2834
4368
/* * Copyright 2009-2010 Freescale Semiconductor, Inc. * * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM * * Author: Vivek Mahajan <vivek.mahajan@freescale.com> * * This file is derived from the original work done * by Sylvain Munaut for the Bestcomm SRAM allocator. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/of_platform.h> #include <asm/pgtable.h> #include <asm/fsl_85xx_cache_sram.h> #include "fsl_85xx_cache_ctlr.h" struct mpc85xx_cache_sram *cache_sram; void *mpc85xx_cache_sram_alloc(unsigned int size, phys_addr_t *phys, unsigned int align) { unsigned long offset; unsigned long flags; if (unlikely(cache_sram == NULL)) return NULL; if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", __func__, size, align); return NULL; } if ((align & (align - 1)) || align <= 1) { pr_err("%s(): align(=%x) must be power of two and >1\n", __func__, align); return NULL; } spin_lock_irqsave(&cache_sram->lock, flags); offset = rh_alloc_align(cache_sram->rh, size, align, NULL); spin_unlock_irqrestore(&cache_sram->lock, flags); if (IS_ERR_VALUE(offset)) return NULL; *phys = cache_sram->base_phys + offset; return (unsigned char *)cache_sram->base_virt + offset; } EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); void mpc85xx_cache_sram_free(void *ptr) { unsigned long flags; BUG_ON(!ptr); spin_lock_irqsave(&cache_sram->lock, flags); rh_free(cache_sram->rh, ptr - cache_sram->base_virt); spin_unlock_irqrestore(&cache_sram->lock, flags); } EXPORT_SYMBOL(mpc85xx_cache_sram_free); int __init instantiate_cache_sram(struct platform_device *dev, struct sram_parameters sram_params) { int ret = 0; if (cache_sram) { dev_err(&dev->dev, "Already initialized cache-sram\n"); return -EBUSY; } cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); if (!cache_sram) { dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); return -ENOMEM; } cache_sram->base_phys = sram_params.sram_offset; cache_sram->size = sram_params.sram_size; if (!request_mem_region(cache_sram->base_phys, cache_sram->size, "fsl_85xx_cache_sram")) { dev_err(&dev->dev, "%s: request memory failed\n", dev->dev.of_node->full_name); ret = -ENXIO; goto out_free; } cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { dev_err(&dev->dev, "%s: ioremap_prot failed\n", dev->dev.of_node->full_name); ret = -ENOMEM; goto out_release; } cache_sram->rh = rh_create(sizeof(unsigned int)); if (IS_ERR(cache_sram->rh)) { dev_err(&dev->dev, "%s: Unable to create remote heap\n", dev->dev.of_node->full_name); ret = PTR_ERR(cache_sram->rh); goto out_unmap; } rh_attach_region(cache_sram->rh, 0, cache_sram->size); spin_lock_init(&cache_sram->lock); dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", (unsigned long long)cache_sram->base_phys, cache_sram->size); return 0; out_unmap: iounmap(cache_sram->base_virt); out_release: release_mem_region(cache_sram->base_phys, cache_sram->size); out_free: kfree(cache_sram); return ret; } void remove_cache_sram(struct platform_device *dev) { BUG_ON(!cache_sram); rh_detach_region(cache_sram->rh, 0, cache_sram->size); rh_destroy(cache_sram->rh); iounmap(cache_sram->base_virt); release_mem_region(cache_sram->base_phys, cache_sram->size); kfree(cache_sram); cache_sram = NULL; dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); }
gpl-2.0
Krabappel2548/u8500_kernel_sources
sound/soc/au1x/psc-i2s.c
3090
10669
/* * Au12x0/Au1550 PSC ALSA ASoC audio support. * * (c) 2007-2008 MSC Vertriebsges.m.b.H., * Manuel Lauss <manuel.lauss@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Au1xxx-PSC I2S glue. * * NOTE: so far only PSC slave mode (bit- and frameclock) is supported. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/suspend.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include "psc.h" /* supported I2S DAI hardware formats */ #define AU1XPSC_I2S_DAIFMT \ (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_LEFT_J | \ SND_SOC_DAIFMT_NB_NF) /* supported I2S direction */ #define AU1XPSC_I2S_DIR \ (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE) #define AU1XPSC_I2S_RATES \ SNDRV_PCM_RATE_8000_192000 #define AU1XPSC_I2S_FMTS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) #define I2SSTAT_BUSY(stype) \ ((stype) == PCM_TX ? PSC_I2SSTAT_TB : PSC_I2SSTAT_RB) #define I2SPCR_START(stype) \ ((stype) == PCM_TX ? PSC_I2SPCR_TS : PSC_I2SPCR_RS) #define I2SPCR_STOP(stype) \ ((stype) == PCM_TX ? PSC_I2SPCR_TP : PSC_I2SPCR_RP) #define I2SPCR_CLRFIFO(stype) \ ((stype) == PCM_TX ? PSC_I2SPCR_TC : PSC_I2SPCR_RC) static int au1xpsc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(cpu_dai); unsigned long ct; int ret; ret = -EINVAL; ct = pscdata->cfg; ct &= ~(PSC_I2SCFG_XM | PSC_I2SCFG_MLJ); /* left-justified */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ct |= PSC_I2SCFG_XM; /* enable I2S mode */ break; case SND_SOC_DAIFMT_MSB: break; case SND_SOC_DAIFMT_LSB: ct |= PSC_I2SCFG_MLJ; /* LSB (right-) justified */ break; default: goto out; } ct &= ~(PSC_I2SCFG_BI | PSC_I2SCFG_WI); /* IB-IF */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: ct |= PSC_I2SCFG_BI | PSC_I2SCFG_WI; break; case SND_SOC_DAIFMT_NB_IF: ct |= PSC_I2SCFG_BI; break; case SND_SOC_DAIFMT_IB_NF: ct |= PSC_I2SCFG_WI; break; case SND_SOC_DAIFMT_IB_IF: break; default: goto out; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: /* CODEC master */ ct |= PSC_I2SCFG_MS; /* PSC I2S slave mode */ break; case SND_SOC_DAIFMT_CBS_CFS: /* CODEC slave */ ct &= ~PSC_I2SCFG_MS; /* PSC I2S Master mode */ break; default: goto out; } pscdata->cfg = ct; ret = 0; out: return ret; } static int au1xpsc_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); int cfgbits; unsigned long stat; /* check if the PSC is already streaming data */ stat = au_readl(I2S_STAT(pscdata)); if (stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB)) { /* reject parameters not currently set up in hardware */ cfgbits = au_readl(I2S_CFG(pscdata)); if ((PSC_I2SCFG_GET_LEN(cfgbits) != params->msbits) || (params_rate(params) != pscdata->rate)) return -EINVAL; } else { /* set sample bitdepth */ pscdata->cfg &= ~(0x1f << 4); pscdata->cfg |= PSC_I2SCFG_SET_LEN(params->msbits); /* remember current rate for other stream */ pscdata->rate = params_rate(params); } return 0; } /* Configure PSC late: on my devel systems the codec is I2S master and * supplies the i2sbitclock __AND__ i2sMclk (!) to the PSC unit. ASoC * uses aggressive PM and switches the codec off when it is not in use * which also means the PSC unit doesn't get any clocks and is therefore * dead. That's why this chunk here gets called from the trigger callback * because I can be reasonably certain the codec is driving the clocks. */ static int au1xpsc_i2s_configure(struct au1xpsc_audio_data *pscdata) { unsigned long tmo; /* bring PSC out of sleep, and configure I2S unit */ au_writel(PSC_CTRL_ENABLE, PSC_CTRL(pscdata)); au_sync(); tmo = 1000000; while (!(au_readl(I2S_STAT(pscdata)) & PSC_I2SSTAT_SR) && tmo) tmo--; if (!tmo) goto psc_err; au_writel(0, I2S_CFG(pscdata)); au_sync(); au_writel(pscdata->cfg | PSC_I2SCFG_DE_ENABLE, I2S_CFG(pscdata)); au_sync(); /* wait for I2S controller to become ready */ tmo = 1000000; while (!(au_readl(I2S_STAT(pscdata)) & PSC_I2SSTAT_DR) && tmo) tmo--; if (tmo) return 0; psc_err: au_writel(0, I2S_CFG(pscdata)); au_writel(PSC_CTRL_SUSPEND, PSC_CTRL(pscdata)); au_sync(); return -ETIMEDOUT; } static int au1xpsc_i2s_start(struct au1xpsc_audio_data *pscdata, int stype) { unsigned long tmo, stat; int ret; ret = 0; /* if both TX and RX are idle, configure the PSC */ stat = au_readl(I2S_STAT(pscdata)); if (!(stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB))) { ret = au1xpsc_i2s_configure(pscdata); if (ret) goto out; } au_writel(I2SPCR_CLRFIFO(stype), I2S_PCR(pscdata)); au_sync(); au_writel(I2SPCR_START(stype), I2S_PCR(pscdata)); au_sync(); /* wait for start confirmation */ tmo = 1000000; while (!(au_readl(I2S_STAT(pscdata)) & I2SSTAT_BUSY(stype)) && tmo) tmo--; if (!tmo) { au_writel(I2SPCR_STOP(stype), I2S_PCR(pscdata)); au_sync(); ret = -ETIMEDOUT; } out: return ret; } static int au1xpsc_i2s_stop(struct au1xpsc_audio_data *pscdata, int stype) { unsigned long tmo, stat; au_writel(I2SPCR_STOP(stype), I2S_PCR(pscdata)); au_sync(); /* wait for stop confirmation */ tmo = 1000000; while ((au_readl(I2S_STAT(pscdata)) & I2SSTAT_BUSY(stype)) && tmo) tmo--; /* if both TX and RX are idle, disable PSC */ stat = au_readl(I2S_STAT(pscdata)); if (!(stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB))) { au_writel(0, I2S_CFG(pscdata)); au_sync(); au_writel(PSC_CTRL_SUSPEND, PSC_CTRL(pscdata)); au_sync(); } return 0; } static int au1xpsc_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); int ret, stype = SUBSTREAM_TYPE(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: ret = au1xpsc_i2s_start(pscdata, stype); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: ret = au1xpsc_i2s_stop(pscdata, stype); break; default: ret = -EINVAL; } return ret; } static struct snd_soc_dai_ops au1xpsc_i2s_dai_ops = { .trigger = au1xpsc_i2s_trigger, .hw_params = au1xpsc_i2s_hw_params, .set_fmt = au1xpsc_i2s_set_fmt, }; static const struct snd_soc_dai_driver au1xpsc_i2s_dai_template = { .playback = { .rates = AU1XPSC_I2S_RATES, .formats = AU1XPSC_I2S_FMTS, .channels_min = 2, .channels_max = 8, /* 2 without external help */ }, .capture = { .rates = AU1XPSC_I2S_RATES, .formats = AU1XPSC_I2S_FMTS, .channels_min = 2, .channels_max = 8, /* 2 without external help */ }, .ops = &au1xpsc_i2s_dai_ops, }; static int __devinit au1xpsc_i2s_drvprobe(struct platform_device *pdev) { struct resource *r; unsigned long sel; int ret; struct au1xpsc_audio_data *wd; wd = kzalloc(sizeof(struct au1xpsc_audio_data), GFP_KERNEL); if (!wd) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { ret = -ENODEV; goto out0; } ret = -EBUSY; if (!request_mem_region(r->start, resource_size(r), pdev->name)) goto out0; wd->mmio = ioremap(r->start, resource_size(r)); if (!wd->mmio) goto out1; /* preserve PSC clock source set up by platform (dev.platform_data * is already occupied by soc layer) */ sel = au_readl(PSC_SEL(wd)) & PSC_SEL_CLK_MASK; au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); au_writel(PSC_SEL_PS_I2SMODE | sel, PSC_SEL(wd)); au_writel(0, I2S_CFG(wd)); au_sync(); /* preconfigure: set max rx/tx fifo depths */ wd->cfg |= PSC_I2SCFG_RT_FIFO8 | PSC_I2SCFG_TT_FIFO8; /* don't wait for I2S core to become ready now; clocks may not * be running yet; depending on clock input for PSC a wait might * time out. */ /* name the DAI like this device instance ("au1xpsc-i2s.PSCINDEX") */ memcpy(&wd->dai_drv, &au1xpsc_i2s_dai_template, sizeof(struct snd_soc_dai_driver)); wd->dai_drv.name = dev_name(&pdev->dev); platform_set_drvdata(pdev, wd); ret = snd_soc_register_dai(&pdev->dev, &wd->dai_drv); if (ret) goto out1; /* finally add the DMA device for this PSC */ wd->dmapd = au1xpsc_pcm_add(pdev); if (wd->dmapd) return 0; snd_soc_unregister_dai(&pdev->dev); out1: release_mem_region(r->start, resource_size(r)); out0: kfree(wd); return ret; } static int __devexit au1xpsc_i2s_drvremove(struct platform_device *pdev) { struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev); struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (wd->dmapd) au1xpsc_pcm_destroy(wd->dmapd); snd_soc_unregister_dai(&pdev->dev); au_writel(0, I2S_CFG(wd)); au_sync(); au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); iounmap(wd->mmio); release_mem_region(r->start, resource_size(r)); kfree(wd); return 0; } #ifdef CONFIG_PM static int au1xpsc_i2s_drvsuspend(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* save interesting register and disable PSC */ wd->pm[0] = au_readl(PSC_SEL(wd)); au_writel(0, I2S_CFG(wd)); au_sync(); au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); return 0; } static int au1xpsc_i2s_drvresume(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* select I2S mode and PSC clock */ au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); au_writel(0, PSC_SEL(wd)); au_sync(); au_writel(wd->pm[0], PSC_SEL(wd)); au_sync(); return 0; } static struct dev_pm_ops au1xpsci2s_pmops = { .suspend = au1xpsc_i2s_drvsuspend, .resume = au1xpsc_i2s_drvresume, }; #define AU1XPSCI2S_PMOPS &au1xpsci2s_pmops #else #define AU1XPSCI2S_PMOPS NULL #endif static struct platform_driver au1xpsc_i2s_driver = { .driver = { .name = "au1xpsc_i2s", .owner = THIS_MODULE, .pm = AU1XPSCI2S_PMOPS, }, .probe = au1xpsc_i2s_drvprobe, .remove = __devexit_p(au1xpsc_i2s_drvremove), }; static int __init au1xpsc_i2s_load(void) { return platform_driver_register(&au1xpsc_i2s_driver); } static void __exit au1xpsc_i2s_unload(void) { platform_driver_unregister(&au1xpsc_i2s_driver); } module_init(au1xpsc_i2s_load); module_exit(au1xpsc_i2s_unload); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Au12x0/Au1550 PSC I2S ALSA ASoC audio driver"); MODULE_AUTHOR("Manuel Lauss");
gpl-2.0
Frank77GLD/android_kernel_YG_m805_892x
arch/powerpc/kernel/ptrace32.c
3090
9353
/* * ptrace for 32-bit processes running on a 64-bit kernel. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, * we mark them as obsolete now, they will be removed in a future version */ static long compat_ptrace_old(struct task_struct *child, long request, long addr, long data) { switch (request) { case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ return copy_regset_to_user(child, task_user_regset_view(current), 0, 0, 32 * sizeof(compat_long_t), compat_ptr(data)); case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ return copy_regset_from_user(child, task_user_regset_view(current), 0, 0, 32 * sizeof(compat_long_t), compat_ptr(data)); } return -EPERM; } /* Macros to workout the correct index for the FPR in the thread struct */ #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) #define FPRHALF(i) (((i) - PT_FPR0) & 1) #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) #define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; int ret; switch (request) { /* * Read 4 bytes of the other process' storage * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is a pointer in the user's storage that contains an 8 byte * address in the other process of the 4 bytes that is to be read * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_PEEKTEXT_3264: case PPC_PTRACE_PEEKDATA_3264: { u32 tmp; int copied; u32 __user * addrOthers; ret = -EIO; /* Get the addr in the other process that we want to read */ if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; copied = access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) break; ret = put_user(tmp, (u32 __user *)data); break; } /* Read a register (specified by ADDR) out of the "user area" */ case PTRACE_PEEKUSR: { int index; unsigned long tmp; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = ptrace_get_reg(child, index); } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ tmp = ((unsigned int *)child->thread.fpr) [FPRINDEX(index)]; } ret = put_user((unsigned int)tmp, (u32 __user *)data); break; } /* * Read 4 bytes out of the other process' pt_regs area * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is the offset into the other process' pt_regs structure * that is to be read * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_PEEKUSR_3264: { u32 index; u32 reg32bits; u64 tmp; u32 numReg; u32 part; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* Determine which part of the register the user wants */ if (index % 2) part = 1; /* want the 2nd half of the register (right-most). */ else part = 0; /* want the 1st half of the register (left-most). */ /* Validate the input - check to see if address is on the wrong boundary * or beyond the end of the user area */ if ((addr & 3) || numReg > PT_FPSCR) break; CHECK_FULL_REGS(child->thread.regs); if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ tmp = ((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]; } else { /* register within PT_REGS struct */ tmp = ptrace_get_reg(child, numReg); } reg32bits = ((u32*)&tmp)[part]; ret = put_user(reg32bits, (u32 __user *)data); break; } /* * Write 4 bytes into the other process' storage * data is the 4 bytes that the user wants written * addr is a pointer in the user's storage that contains an * 8 byte address in the other process where the 4 bytes * that is to be written * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_POKETEXT_3264: case PPC_PTRACE_POKEDATA_3264: { u32 tmp = data; u32 __user * addrOthers; /* Get the addr in the other process that we want to write into */ ret = -EIO; if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; ret = 0; if (access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 1) == sizeof(tmp)) break; ret = -EIO; break; } /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ ((unsigned int *)child->thread.fpr) [FPRINDEX(index)] = data; ret = 0; } break; } /* * Write 4 bytes into the other process' pt_regs area * data is the 4 bytes that the user wants written * addr is the offset into the other process' pt_regs structure * that is to be written into * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_POKEUSR_3264: { u32 index; u32 numReg; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* * Validate the input - check to see if address is on the * wrong boundary or beyond the end of the user area */ if ((addr & 3) || (numReg > PT_FPSCR)) break; CHECK_FULL_REGS(child->thread.regs); if (numReg < PT_FPR0) { unsigned long freg = ptrace_get_reg(child, numReg); if (index % 2) freg = (freg & ~0xfffffffful) | (data & 0xfffffffful); else freg = (freg & 0xfffffffful) | (data << 32); ret = ptrace_put_reg(child, numReg, freg); } else { u64 *tmp; flush_fp_to_thread(child); /* get 64 bit FPR ... */ tmp = &(((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]); /* ... write the 32 bit part we want */ ((u32 *)tmp)[index % 2] = data; ret = 0; } break; } case PTRACE_GET_DEBUGREG: { ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, (u32 __user *)data); #else ret = put_user(child->thread.dabr, (u32 __user *)data); #endif break; } case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_GETFPREGS: case PTRACE_SETFPREGS: case PTRACE_GETVRREGS: case PTRACE_SETVRREGS: case PTRACE_GETVSRREGS: case PTRACE_SETVSRREGS: case PTRACE_GETREGS64: case PTRACE_SETREGS64: case PPC_PTRACE_GETFPREGS: case PPC_PTRACE_SETFPREGS: case PTRACE_KILL: case PTRACE_SINGLESTEP: case PTRACE_DETACH: case PTRACE_SET_DEBUGREG: case PTRACE_SYSCALL: case PTRACE_CONT: case PPC_PTRACE_GETHWDBGINFO: case PPC_PTRACE_SETHWDEBUG: case PPC_PTRACE_DELHWDEBUG: ret = arch_ptrace(child, request, addr, data); break; /* Old reverse args ptrace callss */ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ ret = compat_ptrace_old(child, request, addr, data); break; default: ret = compat_ptrace_request(child, request, addr, data); break; } return ret; }
gpl-2.0
gundal/zerofltetmo
drivers/video/auo_k1901fb.c
3090
6915
/* * auok190xfb.c -- FB driver for AUO-K1901 controllers * * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> * * based on broadsheetfb.c * * Copyright (C) 2008, Jaya Kumar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. * * This driver is written to be used with the AUO-K1901 display controller. * * It is intended to be architecture independent. A board specific driver * must be used to perform all the physical IO interactions. * * The controller supports different update modes: * mode0+1 16 step gray (4bit) * mode2+3 4 step gray (2bit) * mode4+5 2 step gray (1bit) * - mode4 is described as "without LUT" * mode7 automatic selection of update mode * * The most interesting difference to the K1900 is the ability to do screen * updates in an asynchronous fashion. Where the K1900 needs to wait for the * current update to complete, the K1901 can process later updates already. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/firmware.h> #include <linux/gpio.h> #include <linux/pm_runtime.h> #include <video/auo_k190xfb.h> #include "auo_k190x.h" /* * AUO-K1901 specific commands */ #define AUOK1901_CMD_LUT_INTERFACE 0x0005 #define AUOK1901_CMD_DMA_START 0x1001 #define AUOK1901_CMD_CURSOR_START 0x1007 #define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP #define AUOK1901_CMD_DDMA_START 0x1009 #define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14) #define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14) #define AUOK1901_INIT_SINGLE_GATE (0 << 13) #define AUOK1901_INIT_DOUBLE_GATE (1 << 13) /* Bits to pixels * Mode 15-12 11-8 7-4 3-0 * format2 2 T 1 T * format3 1 T 2 T * format4 T 2 T 1 * format5 T 1 T 2 * * halftone modes: * format6 2 2 1 1 * format7 1 1 2 2 */ #define AUOK1901_INIT_FORMAT2 (1 << 7) #define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6)) #define AUOK1901_INIT_FORMAT4 (1 << 8) #define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6)) #define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7)) #define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6)) /* res[4] to bit 10 * res[3-0] to bits 5-2 */ #define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \ | ((_res & 0xf) << 2)) /* * portrait / landscape orientation in AUOK1901_CMD_DMA_START */ #define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13) /* * equivalent to 1 << 11, needs the ~ to have same rotation like K1900 */ #define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10) static void auok1901_init(struct auok190xfb_par *par) { struct device *dev = par->info->device; struct auok190x_board *board = par->board; u16 init_param = 0; pm_runtime_get_sync(dev); init_param |= AUOK190X_INIT_INVERSE_WHITE; init_param |= AUOK190X_INIT_FORMAT0; init_param |= AUOK1901_INIT_RESOLUTION(par->resolution); init_param |= AUOK190X_INIT_SHIFT_LEFT; auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); /* let the controller finish */ board->wait_for_rdy(par); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } static void auok1901_update_region(struct auok190xfb_par *par, int mode, u16 y1, u16 y2) { struct device *dev = par->info->device; unsigned char *buf = (unsigned char *)par->info->screen_base; int xres = par->info->var.xres; int line_length = par->info->fix.line_length; u16 args[5]; pm_runtime_get_sync(dev); mutex_lock(&(par->io_lock)); /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ y1 &= 0xfffe; y2 &= 0xfffe; dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", 1, y1+1, xres, y2-y1, mode); /* K1901: first transfer the region data */ args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1; args[1] = y1 + 1; args[2] = xres; args[3] = y2 - y1; buf += y1 * line_length; auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4, args, ((y2 - y1) * line_length)/2, (u16 *) buf); auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP); /* K1901: second tell the controller to update the region with mode */ args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation); args[1] = 1; args[2] = y1 + 1; args[3] = xres; args[4] = y2 - y1; auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args); par->update_cnt++; mutex_unlock(&(par->io_lock)); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par, u16 y1, u16 y2) { int mode; if (par->update_mode < 0) { mode = AUOK190X_UPDATE_MODE(1); par->last_mode = -1; } else { mode = AUOK190X_UPDATE_MODE(par->update_mode); par->last_mode = par->update_mode; } if (par->flash) mode |= AUOK190X_UPDATE_NONFLASH; auok1901_update_region(par, mode, y1, y2); } static void auok1901fb_dpy_update(struct auok190xfb_par *par) { int mode; /* When doing full updates, wait for the controller to be ready * This will hopefully catch some hangs of the K1901 */ par->board->wait_for_rdy(par); if (par->update_mode < 0) { mode = AUOK190X_UPDATE_MODE(0); par->last_mode = -1; } else { mode = AUOK190X_UPDATE_MODE(par->update_mode); par->last_mode = par->update_mode; } if (par->flash) mode |= AUOK190X_UPDATE_NONFLASH; auok1901_update_region(par, mode, 0, par->info->var.yres); par->update_cnt = 0; } static bool auok1901fb_need_refresh(struct auok190xfb_par *par) { return (par->update_cnt > 10); } static int auok1901fb_probe(struct platform_device *pdev) { struct auok190x_init_data init; struct auok190x_board *board; /* pick up board specific routines */ board = pdev->dev.platform_data; if (!board) return -EINVAL; /* fill temporary init struct for common init */ init.id = "auo_k1901fb"; init.board = board; init.update_partial = auok1901fb_dpy_update_pages; init.update_all = auok1901fb_dpy_update; init.need_refresh = auok1901fb_need_refresh; init.init = auok1901_init; return auok190x_common_probe(pdev, &init); } static int auok1901fb_remove(struct platform_device *pdev) { return auok190x_common_remove(pdev); } static struct platform_driver auok1901fb_driver = { .probe = auok1901fb_probe, .remove = auok1901fb_remove, .driver = { .owner = THIS_MODULE, .name = "auo_k1901fb", .pm = &auok190x_pm, }, }; module_platform_driver(auok1901fb_driver); MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller"); MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); MODULE_LICENSE("GPL");
gpl-2.0
SlimRoms/kernel_lge_msm7x27a-common
fs/btrfs/struct-funcs.c
4882
5093
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/highmem.h> /* this is some deeply nasty code. ctree.h has a different * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef * * The end result is that anyone who #includes ctree.h gets a * declaration for the btrfs_set_foo functions and btrfs_foo functions * * This file declares the macros and then #includes ctree.h, which results * in cpp creating the function here based on the template below. * * These setget functions do all the extent_buffer related mapping * required to efficiently read and write specific fields in the extent * buffers. Every pointer to metadata items in btrfs is really just * an unsigned long offset into the extent buffer which has been * cast to a specific type. This gives us all the gcc type checking. * * The extent buffer api is used to do all the kmapping and page * spanning work required to get extent buffers in highmem and have * a metadata blocksize different from the page size. * * The macro starts with a simple function prototype declaration so that * sparse won't complain about it being static. */ #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ u##bits btrfs_##name(struct extent_buffer *eb, type *s); \ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \ void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token); \ u##bits btrfs_token_##name(struct extent_buffer *eb, \ type *s, struct btrfs_map_token *token) \ { \ unsigned long part_offset = (unsigned long)s; \ unsigned long offset = part_offset + offsetof(type, member); \ type *p; \ int err; \ char *kaddr; \ unsigned long map_start; \ unsigned long map_len; \ unsigned long mem_len = sizeof(((type *)0)->member); \ u##bits res; \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \ kaddr = token->kaddr; \ p = (type *)(kaddr + part_offset - token->offset); \ res = le##bits##_to_cpu(p->member); \ return res; \ } \ err = map_private_extent_buffer(eb, offset, \ mem_len, \ &kaddr, &map_start, &map_len); \ if (err) { \ __le##bits leres; \ read_eb_member(eb, s, type, member, &leres); \ return le##bits##_to_cpu(leres); \ } \ p = (type *)(kaddr + part_offset - map_start); \ res = le##bits##_to_cpu(p->member); \ if (token) { \ token->kaddr = kaddr; \ token->offset = map_start; \ token->eb = eb; \ } \ return res; \ } \ void btrfs_set_token_##name(struct extent_buffer *eb, \ type *s, u##bits val, struct btrfs_map_token *token) \ { \ unsigned long part_offset = (unsigned long)s; \ unsigned long offset = part_offset + offsetof(type, member); \ type *p; \ int err; \ char *kaddr; \ unsigned long map_start; \ unsigned long map_len; \ unsigned long mem_len = sizeof(((type *)0)->member); \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \ kaddr = token->kaddr; \ p = (type *)(kaddr + part_offset - token->offset); \ p->member = cpu_to_le##bits(val); \ return; \ } \ err = map_private_extent_buffer(eb, offset, \ mem_len, \ &kaddr, &map_start, &map_len); \ if (err) { \ __le##bits val2; \ val2 = cpu_to_le##bits(val); \ write_eb_member(eb, s, type, member, &val2); \ return; \ } \ p = (type *)(kaddr + part_offset - map_start); \ p->member = cpu_to_le##bits(val); \ if (token) { \ token->kaddr = kaddr; \ token->offset = map_start; \ token->eb = eb; \ } \ } \ void btrfs_set_##name(struct extent_buffer *eb, \ type *s, u##bits val) \ { \ btrfs_set_token_##name(eb, s, val, NULL); \ } \ u##bits btrfs_##name(struct extent_buffer *eb, \ type *s) \ { \ return btrfs_token_##name(eb, s, NULL); \ } \ #include "ctree.h" void btrfs_node_key(struct extent_buffer *eb, struct btrfs_disk_key *disk_key, int nr) { unsigned long ptr = btrfs_node_key_ptr_offset(nr); read_eb_member(eb, (struct btrfs_key_ptr *)ptr, struct btrfs_key_ptr, key, disk_key); }
gpl-2.0
touchpro/android_kernel_lge_x5_old
drivers/net/ethernet/8390/pcnet_cs.c
5138
61761
/*====================================================================== A PCMCIA ethernet driver for NS8390-based cards This driver supports the D-Link DE-650 and Linksys EthernetCard cards, the newer D-Link and Linksys combo cards, Accton EN2212 cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory mode, and the IBM Credit Card Adapter, the NE4100, the Thomas Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory mode. It will also handle the Socket EA card in either mode. Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net pcnet_cs.c 1.153 2003/11/09 18:53:09 The network driver code is based on Donald Becker's NE2000 code: Written 1992,1993 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Donald Becker may be reached at becker@scyld.com Based also on Keith Moore's changes to Don Becker's code, for IBM CCAE support. Drivers merged back together, and shared-memory Socket EA support added, by Ken Raeburn, September 1995. ======================================================================*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/log2.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include "8390.h" #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #define PCNET_CMD 0x00 #define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ #define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ #define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */ #define PCNET_START_PG 0x40 /* First page of TX buffer */ #define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */ /* Socket EA cards have a larger packet buffer */ #define SOCKET_START_PG 0x01 #define SOCKET_STOP_PG 0xff #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ static const char *if_names[] = { "auto", "10baseT", "10base2"}; /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(if_port, 1); /* Transceiver type */ INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */ INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */ INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */ INT_MODULE_PARM(delay_time, 4); /* in usec */ INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */ INT_MODULE_PARM(full_duplex, 0); /* full duplex? */ /* Ugh! Let the user hardwire the hardware address for queer cards */ static int hw_addr[6] = { 0, /* ... */ }; module_param_array(hw_addr, int, NULL, 0); /*====================================================================*/ static void mii_phy_probe(struct net_device *dev); static int pcnet_config(struct pcmcia_device *link); static void pcnet_release(struct pcmcia_device *link); static int pcnet_open(struct net_device *dev); static int pcnet_close(struct net_device *dev); static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); static void ei_watchdog(u_long arg); static void pcnet_reset_8390(struct net_device *dev); static int set_config(struct net_device *dev, struct ifmap *map); static int setup_shmem_window(struct pcmcia_device *link, int start_pg, int stop_pg, int cm_offset); static int setup_dma_config(struct pcmcia_device *link, int start_pg, int stop_pg); static void pcnet_detach(struct pcmcia_device *p_dev); /*====================================================================*/ typedef struct hw_info_t { u_int offset; u_char a0, a1, a2; u_int flags; } hw_info_t; #define DELAY_OUTPUT 0x01 #define HAS_MISC_REG 0x02 #define USE_BIG_BUF 0x04 #define HAS_IBM_MISC 0x08 #define IS_DL10019 0x10 #define IS_DL10022 0x20 #define HAS_MII 0x40 #define USE_SHMEM 0x80 /* autodetected */ #define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */ #define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */ #define MII_PHYID_REV_MASK 0xfffffff0 #define MII_PHYID_REG1 0x02 #define MII_PHYID_REG2 0x03 static hw_info_t hw_info[] = { { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, DELAY_OUTPUT | HAS_IBM_MISC }, { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 }, { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 }, { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 }, { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 }, { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 }, { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 }, { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48, HAS_MISC_REG | HAS_IBM_MISC }, { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 }, { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 }, { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a, HAS_MISC_REG | HAS_IBM_MISC }, { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac, HAS_MISC_REG | HAS_IBM_MISC }, { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29, HAS_MISC_REG | HAS_IBM_MISC }, { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a, HAS_MISC_REG | HAS_IBM_MISC }, { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac, HAS_MISC_REG | HAS_IBM_MISC }, { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87, HAS_MISC_REG | HAS_IBM_MISC }, { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17, HAS_MISC_REG | HAS_IBM_MISC }, { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8, HAS_MISC_REG | HAS_IBM_MISC }, { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0, HAS_MISC_REG | HAS_IBM_MISC }, { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0, HAS_MISC_REG | HAS_IBM_MISC }, { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 }, { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 }, { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0, HAS_MISC_REG | HAS_IBM_MISC }, { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f, HAS_MISC_REG | HAS_IBM_MISC }, { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 }, { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 }, { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 }, { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, HAS_MISC_REG | HAS_IBM_MISC }, { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, HAS_MISC_REG | HAS_IBM_MISC }, { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 }, { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b, DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF }, { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 }, { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 }, { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 }, { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 }, { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } }; #define NR_INFO ARRAY_SIZE(hw_info) static hw_info_t default_info = { 0, 0, 0, 0, 0 }; static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; typedef struct pcnet_dev_t { struct pcmcia_device *p_dev; u_int flags; void __iomem *base; struct timer_list watchdog; int stale, fast_poll; u_char phy_id; u_char eth_phy, pna_phy; u_short link_status; u_long mii_reset; } pcnet_dev_t; static inline pcnet_dev_t *PRIV(struct net_device *dev) { char *p = netdev_priv(dev); return (pcnet_dev_t *)(p + sizeof(struct ei_device)); } static const struct net_device_ops pcnet_netdev_ops = { .ndo_open = pcnet_open, .ndo_stop = pcnet_close, .ndo_set_config = set_config, .ndo_start_xmit = ei_start_xmit, .ndo_get_stats = ei_get_stats, .ndo_do_ioctl = ei_ioctl, .ndo_set_rx_mode = ei_set_multicast_list, .ndo_tx_timeout = ei_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ei_poll, #endif }; static int pcnet_probe(struct pcmcia_device *link) { pcnet_dev_t *info; struct net_device *dev; dev_dbg(&link->dev, "pcnet_attach()\n"); /* Create new ethernet device */ dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); if (!dev) return -ENOMEM; info = PRIV(dev); info->p_dev = link; link->priv = dev; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; dev->netdev_ops = &pcnet_netdev_ops; return pcnet_config(link); } /* pcnet_attach */ static void pcnet_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "pcnet_detach\n"); unregister_netdev(dev); pcnet_release(link); free_netdev(dev); } /* pcnet_detach */ /*====================================================================== This probes for a card's hardware address, for card types that encode this information in their CIS. ======================================================================*/ static hw_info_t *get_hwinfo(struct pcmcia_device *link) { struct net_device *dev = link->priv; u_char __iomem *base, *virt; int i, j; /* Allocate a small memory window */ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[2]->start = 0; link->resource[2]->end = 0; i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return NULL; virt = ioremap(link->resource[2]->start, resource_size(link->resource[2])); for (i = 0; i < NR_INFO; i++) { pcmcia_map_mem_page(link, link->resource[2], hw_info[i].offset & ~(resource_size(link->resource[2])-1)); base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)]; if ((readb(base+0) == hw_info[i].a0) && (readb(base+2) == hw_info[i].a1) && (readb(base+4) == hw_info[i].a2)) { for (j = 0; j < 6; j++) dev->dev_addr[j] = readb(base + (j<<1)); break; } } iounmap(virt); j = pcmcia_release_window(link, link->resource[2]); return (i < NR_INFO) ? hw_info+i : NULL; } /* get_hwinfo */ /*====================================================================== This probes for a card's hardware address by reading the PROM. It checks the address against a list of known types, then falls back to a simple NE2000 clone signature check. ======================================================================*/ static hw_info_t *get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; u_char prom[32]; int i, j; /* This is lifted straight from drivers/net/ethernet/8390/ne.c */ struct { u_char value, offset; } program_seq[] = { {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ {0x00, EN0_RCNTLO}, /* Clear the count regs. */ {0x00, EN0_RCNTHI}, {0x00, EN0_IMR}, /* Mask completion irq. */ {0xFF, EN0_ISR}, {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ {32, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; pcnet_reset_8390(dev); mdelay(10); for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 32; i++) prom[i] = inb(ioaddr + PCNET_DATAPORT); for (i = 0; i < NR_INFO; i++) { if ((prom[0] == hw_info[i].a0) && (prom[2] == hw_info[i].a1) && (prom[4] == hw_info[i].a2)) break; } if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { for (j = 0; j < 6; j++) dev->dev_addr[j] = prom[j<<1]; return (i < NR_INFO) ? hw_info+i : &default_info; } return NULL; } /* get_prom */ /*====================================================================== For DL10019 based cards, like the Linksys EtherFast ======================================================================*/ static hw_info_t *get_dl10019(struct pcmcia_device *link) { struct net_device *dev = link->priv; int i; u_char sum; for (sum = 0, i = 0x14; i < 0x1c; i++) sum += inb_p(dev->base_addr + i); if (sum != 0xff) return NULL; for (i = 0; i < 6; i++) dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); i = inb(dev->base_addr + 0x1f); return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; } /*====================================================================== For Asix AX88190 based cards ======================================================================*/ static hw_info_t *get_ax88190(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; int i, j; /* Not much of a test, but the alternatives are messy */ if (link->config_base != 0x03c0) return NULL; outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */ outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */ outb_p(0x04, ioaddr + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD); for (i = 0; i < 6; i += 2) { j = inw(ioaddr + PCNET_DATAPORT); dev->dev_addr[i] = j & 0xff; dev->dev_addr[i+1] = j >> 8; } return NULL; } /*====================================================================== This should be totally unnecessary... but when we can't figure out the hardware address any other way, we'll let the user hard wire it when the module is initialized. ======================================================================*/ static hw_info_t *get_hwired(struct pcmcia_device *link) { struct net_device *dev = link->priv; int i; for (i = 0; i < 6; i++) if (hw_addr[i] != 0) break; if (i == 6) return NULL; for (i = 0; i < 6; i++) dev->dev_addr[i] = hw_addr[i]; return &default_info; } /* get_hwired */ static int try_io_port(struct pcmcia_device *link) { int j, ret; link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; if (link->resource[0]->end == 32) { link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (link->resource[1]->end > 0) { /* for master/slave multifunction cards */ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; } } else { /* This should be two 16-port windows */ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; } if (link->resource[0]->start == 0) { for (j = 0; j < 0x400; j += 0x20) { link->resource[0]->start = j ^ 0x300; link->resource[1]->start = (j ^ 0x300) + 0x10; link->io_lines = 16; ret = pcmcia_request_io(link); if (ret == 0) return ret; } return ret; } else { return pcmcia_request_io(link); } } static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) { int *priv = priv_data; int try = (*priv & 0x1); *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10; if (p_dev->config_index == 0) return -EINVAL; if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) return -EINVAL; if (try) p_dev->io_lines = 16; return try_io_port(p_dev); } static hw_info_t *pcnet_try_config(struct pcmcia_device *link, int *has_shmem, int try) { struct net_device *dev = link->priv; hw_info_t *local_hw_info; pcnet_dev_t *info = PRIV(dev); int priv = try; int ret; ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); if (ret) { dev_warn(&link->dev, "no useable port range found\n"); return NULL; } *has_shmem = (priv & 0x10); if (!link->irq) return NULL; if (resource_size(link->resource[1]) == 8) link->config_flags |= CONF_ENABLE_SPKR; if ((link->manf_id == MANFID_IBM) && (link->card_id == PRODID_IBM_HOME_AND_AWAY)) link->config_index |= 0x10; ret = pcmcia_enable_device(link); if (ret) return NULL; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (info->flags & HAS_MISC_REG) { if ((if_port == 1) || (if_port == 2)) dev->if_port = if_port; else dev_notice(&link->dev, "invalid if_port requested\n"); } else dev->if_port = 0; if ((link->config_base == 0x03c0) && (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { dev_info(&link->dev, "this is an AX88190 card - use axnet_cs instead.\n"); return NULL; } local_hw_info = get_hwinfo(link); if (!local_hw_info) local_hw_info = get_prom(link); if (!local_hw_info) local_hw_info = get_dl10019(link); if (!local_hw_info) local_hw_info = get_ax88190(link); if (!local_hw_info) local_hw_info = get_hwired(link); return local_hw_info; } static int pcnet_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; pcnet_dev_t *info = PRIV(dev); int start_pg, stop_pg, cm_offset; int has_shmem = 0; hw_info_t *local_hw_info; dev_dbg(&link->dev, "pcnet_config\n"); local_hw_info = pcnet_try_config(link, &has_shmem, 0); if (!local_hw_info) { /* check whether forcing io_lines to 16 helps... */ pcmcia_disable_device(link); local_hw_info = pcnet_try_config(link, &has_shmem, 1); if (local_hw_info == NULL) { dev_notice(&link->dev, "unable to read hardware net" " address for io base %#3lx\n", dev->base_addr); goto failed; } } info->flags = local_hw_info->flags; /* Check for user overrides */ info->flags |= (delay_output) ? DELAY_OUTPUT : 0; if ((link->manf_id == MANFID_SOCKET) && ((link->card_id == PRODID_SOCKET_LPE) || (link->card_id == PRODID_SOCKET_LPE_CF) || (link->card_id == PRODID_SOCKET_EIO))) info->flags &= ~USE_BIG_BUF; if (!use_big_buf) info->flags &= ~USE_BIG_BUF; if (info->flags & USE_BIG_BUF) { start_pg = SOCKET_START_PG; stop_pg = SOCKET_STOP_PG; cm_offset = 0x10000; } else { start_pg = PCNET_START_PG; stop_pg = PCNET_STOP_PG; cm_offset = 0; } /* has_shmem is ignored if use_shmem != -1 */ if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) || (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0)) setup_dma_config(link, start_pg, stop_pg); ei_status.name = "NE2000"; ei_status.word16 = 1; ei_status.reset_8390 = pcnet_reset_8390; if (info->flags & (IS_DL10019|IS_DL10022)) mii_phy_probe(dev); SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev) != 0) { pr_notice("register_netdev() failed\n"); goto failed; } if (info->flags & (IS_DL10019|IS_DL10022)) { u_char id = inb(dev->base_addr + 0x1a); netdev_info(dev, "NE2000 (DL100%d rev %02x): ", (info->flags & IS_DL10022) ? 22 : 19, id); if (info->pna_phy) pr_cont("PNA, "); } else { netdev_info(dev, "NE2000 Compatible: "); } pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq); if (info->flags & USE_SHMEM) pr_cont(" mem %#5lx,", dev->mem_start); if (info->flags & HAS_MISC_REG) pr_cont(" %s xcvr,", if_names[dev->if_port]); pr_cont(" hw_addr %pM\n", dev->dev_addr); return 0; failed: pcnet_release(link); return -ENODEV; } /* pcnet_config */ static void pcnet_release(struct pcmcia_device *link) { pcnet_dev_t *info = PRIV(link->priv); dev_dbg(&link->dev, "pcnet_release\n"); if (info->flags & USE_SHMEM) iounmap(info->base); pcmcia_disable_device(link); } static int pcnet_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int pcnet_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { pcnet_reset_8390(dev); NS8390_init(dev, 1); netif_device_attach(dev); } return 0; } /*====================================================================== MII interface support for DL10019 and DL10022 based cards On the DL10019, the MII IO direction bit is 0x10; on the DL10022 it is 0x20. Setting both bits seems to work on both card types. ======================================================================*/ #define DLINK_GPIO 0x1c #define DLINK_DIAG 0x1d #define DLINK_EEPROM 0x1e #define MDIO_SHIFT_CLK 0x80 #define MDIO_DATA_OUT 0x40 #define MDIO_DIR_WRITE 0x30 #define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) #define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) #define MDIO_DATA_READ 0x10 #define MDIO_MASK 0x0f static void mdio_sync(unsigned int addr) { int bits, mask = inb(addr) & MDIO_MASK; for (bits = 0; bits < 32; bits++) { outb(mask | MDIO_DATA_WRITE1, addr); outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); } } static int mdio_read(unsigned int addr, int phy_id, int loc) { u_int cmd = (0x06<<10)|(phy_id<<5)|loc; int i, retval = 0, mask = inb(addr) & MDIO_MASK; mdio_sync(addr); for (i = 13; i >= 0; i--) { int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; outb(mask | dat, addr); outb(mask | dat | MDIO_SHIFT_CLK, addr); } for (i = 19; i > 0; i--) { outb(mask, addr); retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); outb(mask | MDIO_SHIFT_CLK, addr); } return (retval>>1) & 0xffff; } static void mdio_write(unsigned int addr, int phy_id, int loc, int value) { u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; int i, mask = inb(addr) & MDIO_MASK; mdio_sync(addr); for (i = 31; i >= 0; i--) { int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; outb(mask | dat, addr); outb(mask | dat | MDIO_SHIFT_CLK, addr); } for (i = 1; i >= 0; i--) { outb(mask, addr); outb(mask | MDIO_SHIFT_CLK, addr); } } /*====================================================================== EEPROM access routines for DL10019 and DL10022 based cards ======================================================================*/ #define EE_EEP 0x40 #define EE_ASIC 0x10 #define EE_CS 0x08 #define EE_CK 0x04 #define EE_DO 0x02 #define EE_DI 0x01 #define EE_ADOT 0x01 /* DataOut for ASIC */ #define EE_READ_CMD 0x06 #define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ static int read_eeprom(unsigned int ioaddr, int location) { int i, retval = 0; unsigned int ee_addr = ioaddr + DLINK_EEPROM; int read_cmd = location | (EE_READ_CMD << 8); outb(0, ee_addr); outb(EE_EEP|EE_CS, ee_addr); /* Shift the read command bits out. */ for (i = 10; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_DO : 0; outb_p(EE_EEP|EE_CS|dataval, ee_addr); outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr); } outb(EE_EEP|EE_CS, ee_addr); for (i = 16; i > 0; i--) { outb_p(EE_EEP|EE_CS | EE_CK, ee_addr); retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0); outb_p(EE_EEP|EE_CS, ee_addr); } /* Terminate the EEPROM access. */ outb(0, ee_addr); return retval; } /* The internal ASIC registers can be changed by EEPROM READ access with EE_ASIC bit set. In ASIC mode, EE_ADOT is used to output the data to the ASIC. */ static void write_asic(unsigned int ioaddr, int location, short asic_data) { int i; unsigned int ee_addr = ioaddr + DLINK_EEPROM; short dataval; int read_cmd = location | (EE_READ_CMD << 8); asic_data |= read_eeprom(ioaddr, location); outb(0, ee_addr); outb(EE_ASIC|EE_CS|EE_DI, ee_addr); read_cmd = read_cmd >> 1; /* Shift the read command bits out. */ for (i = 9; i >= 0; i--) { dataval = (read_cmd & (1 << i)) ? EE_DO : 0; outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr); outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); } // sync outb(EE_ASIC|EE_CS, ee_addr); outb(EE_ASIC|EE_CS|EE_CK, ee_addr); outb(EE_ASIC|EE_CS, ee_addr); for (i = 15; i >= 0; i--) { dataval = (asic_data & (1 << i)) ? EE_ADOT : 0; outb_p(EE_ASIC|EE_CS|dataval, ee_addr); outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr); outb_p(EE_ASIC|EE_CS|dataval, ee_addr); } /* Terminate the ASIC access. */ outb(EE_ASIC|EE_DI, ee_addr); outb(EE_ASIC|EE_DI| EE_CK, ee_addr); outb(EE_ASIC|EE_DI, ee_addr); outb(0, ee_addr); } /*====================================================================*/ static void set_misc_reg(struct net_device *dev) { unsigned int nic_base = dev->base_addr; pcnet_dev_t *info = PRIV(dev); u_char tmp; if (info->flags & HAS_MISC_REG) { tmp = inb_p(nic_base + PCNET_MISC) & ~3; if (dev->if_port == 2) tmp |= 1; if (info->flags & USE_BIG_BUF) tmp |= 2; if (info->flags & HAS_IBM_MISC) tmp |= 8; outb_p(tmp, nic_base + PCNET_MISC); } if (info->flags & IS_DL10022) { if (info->flags & HAS_MII) { /* Advertise 100F, 100H, 10F, 10H */ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); /* Restart MII autonegotiation */ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); info->mii_reset = jiffies; } else { outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); } } else if (info->flags & IS_DL10019) { /* Advertise 100F, 100H, 10F, 10H */ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); /* Restart MII autonegotiation */ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); } } /*====================================================================*/ static void mii_phy_probe(struct net_device *dev) { pcnet_dev_t *info = PRIV(dev); unsigned int mii_addr = dev->base_addr + DLINK_GPIO; int i; u_int tmp, phyid; for (i = 31; i >= 0; i--) { tmp = mdio_read(mii_addr, i, 1); if ((tmp == 0) || (tmp == 0xffff)) continue; tmp = mdio_read(mii_addr, i, MII_PHYID_REG1); phyid = tmp << 16; phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); phyid &= MII_PHYID_REV_MASK; netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid); if (phyid == AM79C9XX_HOME_PHY) { info->pna_phy = i; } else if (phyid != AM79C9XX_ETH_PHY) { info->eth_phy = i; } } } static int pcnet_open(struct net_device *dev) { int ret; pcnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; unsigned int nic_base = dev->base_addr; dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; set_misc_reg(dev); outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev); if (ret) return ret; link->open++; info->phy_id = info->eth_phy; info->link_status = 0x00; init_timer(&info->watchdog); info->watchdog.function = ei_watchdog; info->watchdog.data = (u_long)dev; info->watchdog.expires = jiffies + HZ; add_timer(&info->watchdog); return ei_open(dev); } /* pcnet_open */ /*====================================================================*/ static int pcnet_close(struct net_device *dev) { pcnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); ei_close(dev); free_irq(dev->irq, dev); link->open--; netif_stop_queue(dev); del_timer_sync(&info->watchdog); return 0; } /* pcnet_close */ /*====================================================================== Hard reset the card. This used to pause for the same period that a 8390 reset command required, but that shouldn't be necessary. ======================================================================*/ static void pcnet_reset_8390(struct net_device *dev) { unsigned int nic_base = dev->base_addr; int i; ei_status.txing = ei_status.dmaing = 0; outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET); for (i = 0; i < 100; i++) { if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) break; udelay(100); } outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ if (i == 100) netdev_err(dev, "pcnet_reset_8390() did not complete.\n"); set_misc_reg(dev); } /* pcnet_reset_8390 */ /*====================================================================*/ static int set_config(struct net_device *dev, struct ifmap *map) { pcnet_dev_t *info = PRIV(dev); if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (!(info->flags & HAS_MISC_REG)) return -EOPNOTSUPP; else if ((map->port < 1) || (map->port > 2)) return -EINVAL; dev->if_port = map->port; netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); NS8390_init(dev, 1); } return 0; } /*====================================================================*/ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) { struct net_device *dev = dev_id; pcnet_dev_t *info; irqreturn_t ret = ei_interrupt(irq, dev_id); if (ret == IRQ_HANDLED) { info = PRIV(dev); info->stale = 0; } return ret; } static void ei_watchdog(u_long arg) { struct net_device *dev = (struct net_device *)arg; pcnet_dev_t *info = PRIV(dev); unsigned int nic_base = dev->base_addr; unsigned int mii_addr = nic_base + DLINK_GPIO; u_short link; if (!netif_device_present(dev)) goto reschedule; /* Check for pending interrupt with expired latency timer: with this, we can limp along even if the interrupt is blocked */ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { if (!info->fast_poll) netdev_info(dev, "interrupt(s) dropped!\n"); ei_irq_wrapper(dev->irq, dev); info->fast_poll = HZ; } if (info->fast_poll) { info->fast_poll--; info->watchdog.expires = jiffies + 1; add_timer(&info->watchdog); return; } if (!(info->flags & HAS_MII)) goto reschedule; mdio_read(mii_addr, info->phy_id, 1); link = mdio_read(mii_addr, info->phy_id, 1); if (!link || (link == 0xffff)) { if (info->eth_phy) { info->phy_id = info->eth_phy = 0; } else { netdev_info(dev, "MII is missing!\n"); info->flags &= ~HAS_MII; } goto reschedule; } link &= 0x0004; if (link != info->link_status) { u_short p = mdio_read(mii_addr, info->phy_id, 5); netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); if (link && (info->flags & IS_DL10022)) { /* Disable collision detection on full duplex links */ outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); } else if (link && (info->flags & IS_DL10019)) { /* Disable collision detection on full duplex links */ write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0); } if (link) { if (info->phy_id == info->eth_phy) { if (p) netdev_info(dev, "autonegotiation complete: " "%sbaseT-%cD selected\n", ((p & 0x0180) ? "100" : "10"), ((p & 0x0140) ? 'F' : 'H')); else netdev_info(dev, "link partner did not autonegotiate\n"); } NS8390_init(dev, 1); } info->link_status = link; } if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) { link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004; if (((info->phy_id == info->pna_phy) && link) || ((info->phy_id != info->pna_phy) && !link)) { /* isolate this MII and try flipping to the other one */ mdio_write(mii_addr, info->phy_id, 0, 0x0400); info->phy_id ^= info->pna_phy ^ info->eth_phy; netdev_info(dev, "switched to %s transceiver\n", (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); mdio_write(mii_addr, info->phy_id, 0, (info->phy_id == info->eth_phy) ? 0x1000 : 0); info->link_status = 0; info->mii_reset = jiffies; } } reschedule: info->watchdog.expires = jiffies + HZ; add_timer(&info->watchdog); } /*====================================================================*/ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { pcnet_dev_t *info = PRIV(dev); struct mii_ioctl_data *data = if_mii(rq); unsigned int mii_addr = dev->base_addr + DLINK_GPIO; if (!(info->flags & (IS_DL10019|IS_DL10022))) return -EINVAL; switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; case SIOCSMIIREG: /* Write MII PHY register. */ mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); return 0; } return -EOPNOTSUPP; } /*====================================================================*/ static void dma_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { unsigned int nic_base = dev->base_addr; if (ei_status.dmaing) { netdev_notice(dev, "DMAing conflict in dma_block_input." "[DMAstat:%1x][irqlock:%1x]\n", ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); outb_p(0, nic_base + EN0_RCNTHI); outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ outb_p(ring_page, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); insw(nic_base + PCNET_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); /* Fix for big endian systems */ hdr->count = le16_to_cpu(hdr->count); outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } /*====================================================================*/ static void dma_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { unsigned int nic_base = dev->base_addr; int xfer_count = count; char *buf = skb->data; if ((ei_debug > 4) && (count != 4)) netdev_dbg(dev, "[bi=%d]\n", count+4); if (ei_status.dmaing) { netdev_notice(dev, "DMAing conflict in dma_block_input." "[DMAstat:%1x][irqlock:%1x]\n", ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); outb_p(count & 0xff, nic_base + EN0_RCNTLO); outb_p(count >> 8, nic_base + EN0_RCNTHI); outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); insw(nic_base + PCNET_DATAPORT,buf,count>>1); if (count & 0x01) buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; /* This was for the ALPHA version only, but enough people have been encountering problems that it is still here. */ #ifdef PCMCIA_DEBUG if (ei_debug > 4) { /* DMA termination address check... */ int addr, tries = 20; do { /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken for Rx on some cards! */ int high = inb_p(nic_base + EN0_RSARHI); int low = inb_p(nic_base + EN0_RSARLO); addr = (high << 8) + low; if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff)) break; } while (--tries > 0); if (tries <= 0) netdev_notice(dev, "RX transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", ring_offset + xfer_count, addr); } #endif outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } /* dma_block_input */ /*====================================================================*/ static void dma_block_output(struct net_device *dev, int count, const u_char *buf, const int start_page) { unsigned int nic_base = dev->base_addr; pcnet_dev_t *info = PRIV(dev); #ifdef PCMCIA_DEBUG int retries = 0; #endif u_long dma_start; #ifdef PCMCIA_DEBUG if (ei_debug > 4) netdev_dbg(dev, "[bo=%d]\n", count); #endif /* Round the count up for word writes. Do we need to do this? What effect will an odd byte count have on the 8390? I should check someday. */ if (count & 0x01) count++; if (ei_status.dmaing) { netdev_notice(dev, "DMAing conflict in dma_block_output." "[DMAstat:%1x][irqlock:%1x]\n", ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD); #ifdef PCMCIA_DEBUG retry: #endif outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Now the normal output. */ outb_p(count & 0xff, nic_base + EN0_RCNTLO); outb_p(count >> 8, nic_base + EN0_RCNTHI); outb_p(0x00, nic_base + EN0_RSARLO); outb_p(start_page, nic_base + EN0_RSARHI); outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD); outsw(nic_base + PCNET_DATAPORT, buf, count>>1); dma_start = jiffies; #ifdef PCMCIA_DEBUG /* This was for the ALPHA version only, but enough people have been encountering problems that it is still here. */ if (ei_debug > 4) { /* DMA termination address check... */ int addr, tries = 20; do { int high = inb_p(nic_base + EN0_RSARHI); int low = inb_p(nic_base + EN0_RSARLO); addr = (high << 8) + low; if ((start_page << 8) + count == addr) break; } while (--tries > 0); if (tries <= 0) { netdev_notice(dev, "Tx packet transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", (start_page << 8) + count, addr); if (retries++ == 0) goto retry; } } #endif while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { netdev_notice(dev, "timeout waiting for Tx RDC.\n"); pcnet_reset_8390(dev); NS8390_init(dev, 1); break; } outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ if (info->flags & DELAY_OUTPUT) udelay((long)delay_time); ei_status.dmaing &= ~0x01; } /*====================================================================*/ static int setup_dma_config(struct pcmcia_device *link, int start_pg, int stop_pg) { struct net_device *dev = link->priv; ei_status.tx_start_page = start_pg; ei_status.rx_start_page = start_pg + TX_PAGES; ei_status.stop_page = stop_pg; /* set up block i/o functions */ ei_status.get_8390_hdr = dma_get_8390_hdr; ei_status.block_input = dma_block_input; ei_status.block_output = dma_block_output; return 0; } /*====================================================================*/ static void copyin(void *dest, void __iomem *src, int c) { u_short *d = dest; u_short __iomem *s = src; int odd; if (c <= 0) return; odd = (c & 1); c >>= 1; if (c) { do { *d++ = __raw_readw(s++); } while (--c); } /* get last byte by fetching a word and masking */ if (odd) *((u_char *)d) = readw(s) & 0xff; } static void copyout(void __iomem *dest, const void *src, int c) { u_short __iomem *d = dest; const u_short *s = src; int odd; if (c <= 0) return; odd = (c & 1); c >>= 1; if (c) { do { __raw_writew(*s++, d++); } while (--c); } /* copy last byte doing a read-modify-write */ if (odd) writew((readw(d) & 0xff00) | *(u_char *)s, d); } /*====================================================================*/ static void shmem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) + (ring_page << 8) - (ei_status.rx_start_page << 8); copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); /* Fix for big endian systems */ hdr->count = le16_to_cpu(hdr->count); } /*====================================================================*/ static void shmem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *base = ei_status.mem; unsigned long offset = (TX_PAGES<<8) + ring_offset - (ei_status.rx_start_page << 8); char *buf = skb->data; if (offset + count > ei_status.priv) { /* We must wrap the input move. */ int semi_count = ei_status.priv - offset; copyin(buf, base + offset, semi_count); buf += semi_count; offset = TX_PAGES<<8; count -= semi_count; } copyin(buf, base + offset, count); } /*====================================================================*/ static void shmem_block_output(struct net_device *dev, int count, const u_char *buf, const int start_page) { void __iomem *shmem = ei_status.mem + (start_page << 8); shmem -= ei_status.tx_start_page << 8; copyout(shmem, buf, count); } /*====================================================================*/ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, int stop_pg, int cm_offset) { struct net_device *dev = link->priv; pcnet_dev_t *info = PRIV(dev); int i, window_size, offset, ret; window_size = (stop_pg - start_pg) << 8; if (window_size > 32 * 1024) window_size = 32 * 1024; /* Make sure it's a power of two. */ window_size = roundup_pow_of_two(window_size); /* Allocate a memory window */ link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; link->resource[3]->flags |= WIN_USE_WAIT; link->resource[3]->start = 0; link->resource[3]->end = window_size; ret = pcmcia_request_window(link, link->resource[3], mem_speed); if (ret) goto failed; offset = (start_pg << 8) + cm_offset; offset -= offset % window_size; ret = pcmcia_map_mem_page(link, link->resource[3], offset); if (ret) goto failed; /* Try scribbling on the buffer */ info->base = ioremap(link->resource[3]->start, resource_size(link->resource[3])); for (i = 0; i < (TX_PAGES<<8); i += 2) __raw_writew((i>>1), info->base+offset+i); udelay(100); for (i = 0; i < (TX_PAGES<<8); i += 2) if (__raw_readw(info->base+offset+i) != (i>>1)) break; pcnet_reset_8390(dev); if (i != (TX_PAGES<<8)) { iounmap(info->base); pcmcia_release_window(link, link->resource[3]); info->base = NULL; goto failed; } ei_status.mem = info->base + offset; ei_status.priv = resource_size(link->resource[3]); dev->mem_start = (u_long)ei_status.mem; dev->mem_end = dev->mem_start + resource_size(link->resource[3]); ei_status.tx_start_page = start_pg; ei_status.rx_start_page = start_pg + TX_PAGES; ei_status.stop_page = start_pg + ( (resource_size(link->resource[3]) - offset) >> 8); /* set up block i/o functions */ ei_status.get_8390_hdr = shmem_get_8390_hdr; ei_status.block_input = shmem_block_input; ei_status.block_output = shmem_block_output; info->flags |= USE_SHMEM; return 0; failed: return 1; } /*====================================================================*/ static const struct pcmcia_device_id pcnet_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0x3341), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0xc0ab), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), PCMCIA_MFC_DEVICE_PROD_ID123(0, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), PCMCIA_MFC_DEVICE_PROD_ID2(0, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), PCMCIA_DEVICE_MANF_CARD(0x0057, 0x1004), PCMCIA_DEVICE_MANF_CARD(0x0104, 0x000d), PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0075), PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID123("CNet ", "CN30BC", "ETHERNET", 0x9fe55d3d, 0x85601198, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID123("Digital", "Ethernet", "Adapter", 0x9999ab35, 0x00b2e941, 0x4b0d829e), PCMCIA_DEVICE_PROD_ID123("Edimax Technology Inc.", "PCMCIA", "Ethernet Card", 0x738a0019, 0x281f1c5d, 0x5e9d92c0), PCMCIA_DEVICE_PROD_ID123("EFA ", "EFA207", "ETHERNET", 0x3d294be4, 0xeb9aab6c, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID123("I-O DATA", "PCLA", "ETHERNET", 0x1d55d7ec, 0xe4c64d34, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCLATE", "ETHERNET", 0x547e66dc, 0x6b260753, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID123("KingMax Technology Inc.", "EN10-T2", "PCMCIA Ethernet Card", 0x932b7189, 0x699e4436, 0x6f6652e0), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2216", 0x281f1c5d, 0xd4cd2f20, 0xb87add82), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2620", 0x281f1c5d, 0xd4cd2f20, 0x7d3d83a8), PCMCIA_DEVICE_PROD_ID1("2412LAN", 0x67f236ab), PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2212", 0xdfc6b5b2, 0xcb112a11), PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff), PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68), PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997), PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8), PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96), PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96), PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224), PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002", 0x93b15570, 0x75ec3efb), PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002T", 0x93b15570, 0x461c5247), PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8010", 0x93b15570, 0x82f96e96), PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet", 0x578ba6e7, 0x0a9888c1), PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet 10/100", 0x578ba6e7, 0x939fedbd), PCMCIA_DEVICE_PROD_ID12("AROWANA", "PCMCIA Ethernet LAN Card", 0x313adbc8, 0x08d9f190), PCMCIA_DEVICE_PROD_ID12("ASANTE", "FriendlyNet PC Card", 0x3a7ade0f, 0x41c64504), PCMCIA_DEVICE_PROD_ID12("Billionton", "LNT-10TB", 0x552ab682, 0xeeb1ba6a), PCMCIA_DEVICE_PROD_ID12("CF", "10Base-Ethernet", 0x44ebf863, 0x93ae4d79), PCMCIA_DEVICE_PROD_ID12("CNet", "CN40BC Ethernet", 0xbc477dde, 0xfba775a7), PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "BASEline PCMCIA 10 MBit Ethernetadapter", 0xfa2e424d, 0xe9190d8a), PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), PCMCIA_DEVICE_PROD_ID12("DataTrek.", "NetCard ", 0x5cd66d9d, 0x84697ce0), PCMCIA_DEVICE_PROD_ID12("Dayna Communications, Inc.", "CommuniCard E", 0x0c629325, 0xb4e7dbaf), PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100", 0x697403d8, 0xe160b995), PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100 Dongless", 0x697403d8, 0xa6d3b233), PCMCIA_DEVICE_PROD_ID12("DIGITAL", "DEPCM-XX", 0x69616cb3, 0xe600e76e), PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-650", 0x1a424a1c, 0xf28c8398), PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660", 0x1a424a1c, 0xd9a1d05b), PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660+", 0x1a424a1c, 0x50dcd0ec), PCMCIA_DEVICE_PROD_ID12("D-Link", "DFE-650", 0x1a424a1c, 0x0f0073f9), PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 PC Card", 0x725b842d, 0xf1efee84), PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 Port Attached PC Card", 0x725b842d, 0x2db1f8e9), PCMCIA_DEVICE_PROD_ID12("Dynalink", "L10BC", 0x55632fd5, 0xdc65f2b1), PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10BC", 0x6a26d1cf, 0xdc65f2b1), PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10C", 0x6a26d1cf, 0xc4f84efb), PCMCIA_DEVICE_PROD_ID12("E-CARD", "E-CARD", 0x6701da11, 0x6701da11), PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet 10BaseT card", 0x53c864c6, 0xedd059f6), PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet Combo card", 0x53c864c6, 0x929c486c), PCMCIA_DEVICE_PROD_ID12("Ethernet", "Adapter", 0x00b2e941, 0x4b0d829e), PCMCIA_DEVICE_PROD_ID12("Ethernet Adapter", "E2000 PCMCIA Ethernet", 0x96767301, 0x71fbbc61), PCMCIA_DEVICE_PROD_ID12("Ethernet PCMCIA adapter", "EP-210", 0x8dd86181, 0xf2b52517), PCMCIA_DEVICE_PROD_ID12("Fast Ethernet", "Adapter", 0xb4be14e3, 0x4b0d829e), PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2000", 0x2a151fac, 0xf00555cb), PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2220", 0x2a151fac, 0xc1b7e327), PCMCIA_DEVICE_PROD_ID12("GVC", "NIC-2000p", 0x76e171bd, 0x6eb1c947), PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "Ethernet", 0xe3736c88, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("IC-CARD", "IC-CARD", 0x60cb09a6, 0x60cb09a6), PCMCIA_DEVICE_PROD_ID12("IC-CARD+", "IC-CARD+", 0x93693494, 0x93693494), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), PCMCIA_DEVICE_PROD_ID12("Kingston Technology Corp.", "EtheRx PC Card Ethernet Adapter", 0x313c7be3, 0x0afb54a2), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-10/100CD", 0x1b7827b2, 0xcda71d1c), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100)", 0x0733cc81, 0x66c5a389), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), PCMCIA_DEVICE_PROD_ID12("Matsushita Electric Industrial Co.,LTD.", "CF-VEL211", 0x44445376, 0x8ded41d4), PCMCIA_DEVICE_PROD_ID12("MAXTECH", "PCN2000", 0x78d64bc0, 0xca0ca4b8), PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-T", 0x481e0094, 0xa2eb0cf3), PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-TX", 0x481e0094, 0x41a6916c), PCMCIA_DEVICE_PROD_ID12("Microcom C.E.", "Travel Card LAN 10/100", 0x4b91cec7, 0xe70220d6), PCMCIA_DEVICE_PROD_ID12("Microdyne", "NE4200", 0x2e6da59b, 0x0478e472), PCMCIA_DEVICE_PROD_ID12("MIDORI ELEC.", "LT-PCMT", 0x648d55c1, 0xbde526c7), PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover 4100", 0x36e1191f, 0x60c229b9), PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet", 0x281f1c5d, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET", 0x281f1c5d, 0x3ff7175b), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet 10BaseT Card", 0x281f1c5d, 0x4de2f6c8), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Card", 0x281f1c5d, 0x5e9d92c0), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Combo card", 0x281f1c5d, 0x929c486c), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET V1.0", 0x281f1c5d, 0x4d8817c8), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEthernet", 0x281f1c5d, 0xfe871eeb), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast-Ethernet", 0x281f1c5d, 0x45f1f3b4), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FAST ETHERNET CARD", 0x281f1c5d, 0xec5dbca7), PCMCIA_DEVICE_PROD_ID12("PCMCIA LAN", "Ethernet", 0x7500e246, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "LNT-10TN", 0x281f1c5d, 0xe707f641), PCMCIA_DEVICE_PROD_ID12("PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "UE2212", 0x281f1c5d, 0xbf17199b), PCMCIA_DEVICE_PROD_ID12("PCMCIA", " Ethernet NE2000 Compatible", 0x281f1c5d, 0x42d5d7e1), PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10baseT 3.3V", 0xebf91155, 0x30074c80), PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826), PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), PCMCIA_DEVICE_PROD_ID12("RPTI LTD.", "EP400", 0xc53ac515, 0x81e39388), PCMCIA_DEVICE_PROD_ID12("SCM", "Ethernet Combo card", 0xbdc3b102, 0x929c486c), PCMCIA_DEVICE_PROD_ID12("Seiko Epson Corp.", "Ethernet", 0x09928730, 0x00b2e941), PCMCIA_DEVICE_PROD_ID12("SMC", "EZCard-10-PCMCIA", 0xc4f8b18b, 0xfb21d265), PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision D", 0xc70a4760, 0x2ade483e), PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision E", 0xc70a4760, 0x5dd978a8), PCMCIA_DEVICE_PROD_ID12("TDK", "LAK-CD031 for PCMCIA", 0x1eae9475, 0x0ed386fa), PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE450T", 0x466b05f0, 0x8b74bc4f), PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE550T", 0x466b05f0, 0x33c8db2a), PCMCIA_DEVICE_PROD_ID13("Hypertec", "EP401", 0x8787bec7, 0xf6e4a31e), PCMCIA_DEVICE_PROD_ID13("KingMax Technology Inc.", "Ethernet Card", 0x932b7189, 0x5e9d92c0), PCMCIA_DEVICE_PROD_ID13("LONGSHINE", "EP401", 0xf866b0b0, 0xf6e4a31e), PCMCIA_DEVICE_PROD_ID13("Xircom", "CFE-10", 0x2e3ee845, 0x22a49f89), PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), /* too generic! */ /* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 0xb4be14e3, 0x43ac239b, 0x0877b627), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); MODULE_FIRMWARE("cis/PCMLM28.cis"); MODULE_FIRMWARE("cis/DP83903.cis"); MODULE_FIRMWARE("cis/LA-PCM.cis"); MODULE_FIRMWARE("cis/PE520.cis"); MODULE_FIRMWARE("cis/NE2K.cis"); MODULE_FIRMWARE("cis/PE-200.cis"); MODULE_FIRMWARE("cis/tamarack.cis"); static struct pcmcia_driver pcnet_driver = { .name = "pcnet_cs", .probe = pcnet_probe, .remove = pcnet_detach, .owner = THIS_MODULE, .id_table = pcnet_ids, .suspend = pcnet_suspend, .resume = pcnet_resume, }; static int __init init_pcnet_cs(void) { return pcmcia_register_driver(&pcnet_driver); } static void __exit exit_pcnet_cs(void) { pcmcia_unregister_driver(&pcnet_driver); } module_init(init_pcnet_cs); module_exit(exit_pcnet_cs);
gpl-2.0
SlimRoms/kernel_oneplus_msm8974
drivers/mtd/maps/solutionengine.c
5138
3079
/* * Flash and EPROM on Hitachi Solution Engine and similar boards. * * (C) 2001 Red Hat, Inc. * * GPL'd */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> static struct mtd_info *flash_mtd; static struct mtd_info *eprom_mtd; struct map_info soleng_eprom_map = { .name = "Solution Engine EPROM", .size = 0x400000, .bankwidth = 4, }; struct map_info soleng_flash_map = { .name = "Solution Engine FLASH", .size = 0x400000, .bankwidth = 4, }; static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; #ifdef CONFIG_MTD_SUPERH_RESERVE static struct mtd_partition superh_se_partitions[] = { /* Reserved for boot code, read-only */ { .name = "flash_boot", .offset = 0x00000000, .size = CONFIG_MTD_SUPERH_RESERVE, .mask_flags = MTD_WRITEABLE, }, /* All else is writable (e.g. JFFS) */ { .name = "Flash FS", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, } }; #define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions) #else #define superh_se_partitions NULL #define NUM_PARTITIONS 0 #endif /* CONFIG_MTD_SUPERH_RESERVE */ static int __init init_soleng_maps(void) { /* First probe at offset 0 */ soleng_flash_map.phys = 0; soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); soleng_eprom_map.phys = 0x01000000; soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000); simple_map_init(&soleng_eprom_map); simple_map_init(&soleng_flash_map); printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n"); flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map); if (!flash_mtd) { /* Not there. Try swapping */ printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n"); soleng_flash_map.phys = 0x01000000; soleng_flash_map.virt = P2SEGADDR(0x01000000); soleng_eprom_map.phys = 0; soleng_eprom_map.virt = P1SEGADDR(0); flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map); if (!flash_mtd) { /* Eep. */ printk(KERN_NOTICE "Flash chips not detected at either possible location.\n"); return -ENXIO; } } printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n", soleng_flash_map.phys & 0x1fffffff, soleng_eprom_map.phys & 0x1fffffff); flash_mtd->owner = THIS_MODULE; eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map); if (eprom_mtd) { eprom_mtd->owner = THIS_MODULE; mtd_device_register(eprom_mtd, NULL, 0); } mtd_device_parse_register(flash_mtd, probes, NULL, superh_se_partitions, NUM_PARTITIONS); return 0; } static void __exit cleanup_soleng_maps(void) { if (eprom_mtd) { mtd_device_unregister(eprom_mtd); map_destroy(eprom_mtd); } mtd_device_unregister(flash_mtd); map_destroy(flash_mtd); } module_init(init_soleng_maps); module_exit(cleanup_soleng_maps); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
gpl-2.0
Adrioid83/jflte_xxx
net/sunrpc/auth.c
5394
16286
/* * linux/net/sunrpc/auth.c * * Generic RPC client authentication API. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hash.h> #include <linux/sunrpc/clnt.h> #include <linux/spinlock.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif #define RPC_CREDCACHE_DEFAULT_HASHBITS (4) struct rpc_cred_cache { struct hlist_head *hashtable; unsigned int hashbits; spinlock_t lock; }; static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS; static DEFINE_SPINLOCK(rpc_authflavor_lock); static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { &authnull_ops, /* AUTH_NULL */ &authunix_ops, /* AUTH_UNIX */ NULL, /* others can be loadable modules */ }; static LIST_HEAD(cred_unused); static unsigned long number_cred_unused; #define MAX_HASHTABLE_BITS (14) static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) { unsigned long num; unsigned int nbits; int ret; if (!val) goto out_inval; ret = strict_strtoul(val, 0, &num); if (ret == -EINVAL) goto out_inval; nbits = fls(num); if (num > (1U << nbits)) nbits++; if (nbits > MAX_HASHTABLE_BITS || nbits < 2) goto out_inval; *(unsigned int *)kp->arg = nbits; return 0; out_inval: return -EINVAL; } static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp) { unsigned int nbits; nbits = *(unsigned int *)kp->arg; return sprintf(buffer, "%u", 1U << nbits); } #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int); static struct kernel_param_ops param_ops_hashtbl_sz = { .set = param_set_hashtbl_sz, .get = param_get_hashtbl_sz, }; module_param_named(auth_hashtable_size, auth_hashbits, hashtbl_sz, 0644); MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size"); static u32 pseudoflavor_to_flavor(u32 flavor) { if (flavor >= RPC_AUTH_MAXFLAVOR) return RPC_AUTH_GSS; return flavor; } int rpcauth_register(const struct rpc_authops *ops) { rpc_authflavor_t flavor; int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; spin_lock(&rpc_authflavor_lock); if (auth_flavors[flavor] == NULL) { auth_flavors[flavor] = ops; ret = 0; } spin_unlock(&rpc_authflavor_lock); return ret; } EXPORT_SYMBOL_GPL(rpcauth_register); int rpcauth_unregister(const struct rpc_authops *ops) { rpc_authflavor_t flavor; int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; spin_lock(&rpc_authflavor_lock); if (auth_flavors[flavor] == ops) { auth_flavors[flavor] = NULL; ret = 0; } spin_unlock(&rpc_authflavor_lock); return ret; } EXPORT_SYMBOL_GPL(rpcauth_unregister); struct rpc_auth * rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) { struct rpc_auth *auth; const struct rpc_authops *ops; u32 flavor = pseudoflavor_to_flavor(pseudoflavor); auth = ERR_PTR(-EINVAL); if (flavor >= RPC_AUTH_MAXFLAVOR) goto out; if ((ops = auth_flavors[flavor]) == NULL) request_module("rpc-auth-%u", flavor); spin_lock(&rpc_authflavor_lock); ops = auth_flavors[flavor]; if (ops == NULL || !try_module_get(ops->owner)) { spin_unlock(&rpc_authflavor_lock); goto out; } spin_unlock(&rpc_authflavor_lock); auth = ops->create(clnt, pseudoflavor); module_put(ops->owner); if (IS_ERR(auth)) return auth; if (clnt->cl_auth) rpcauth_release(clnt->cl_auth); clnt->cl_auth = auth; out: return auth; } EXPORT_SYMBOL_GPL(rpcauth_create); void rpcauth_release(struct rpc_auth *auth) { if (!atomic_dec_and_test(&auth->au_count)) return; auth->au_ops->destroy(auth); } static DEFINE_SPINLOCK(rpc_credcache_lock); static void rpcauth_unhash_cred_locked(struct rpc_cred *cred) { hlist_del_rcu(&cred->cr_hash); smp_mb__before_clear_bit(); clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); } static int rpcauth_unhash_cred(struct rpc_cred *cred) { spinlock_t *cache_lock; int ret; cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); ret = atomic_read(&cred->cr_count) == 0; if (ret) rpcauth_unhash_cred_locked(cred); spin_unlock(cache_lock); return ret; } /* * Initialize RPC credential cache */ int rpcauth_init_credcache(struct rpc_auth *auth) { struct rpc_cred_cache *new; unsigned int hashsize; new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out_nocache; new->hashbits = auth_hashbits; hashsize = 1U << new->hashbits; new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL); if (!new->hashtable) goto out_nohashtbl; spin_lock_init(&new->lock); auth->au_credcache = new; return 0; out_nohashtbl: kfree(new); out_nocache: return -ENOMEM; } EXPORT_SYMBOL_GPL(rpcauth_init_credcache); /* * Destroy a list of credentials */ static inline void rpcauth_destroy_credlist(struct list_head *head) { struct rpc_cred *cred; while (!list_empty(head)) { cred = list_entry(head->next, struct rpc_cred, cr_lru); list_del_init(&cred->cr_lru); put_rpccred(cred); } } /* * Clear the RPC credential cache, and delete those credentials * that are not referenced. */ void rpcauth_clear_credcache(struct rpc_cred_cache *cache) { LIST_HEAD(free); struct hlist_head *head; struct rpc_cred *cred; unsigned int hashsize = 1U << cache->hashbits; int i; spin_lock(&rpc_credcache_lock); spin_lock(&cache->lock); for (i = 0; i < hashsize; i++) { head = &cache->hashtable[i]; while (!hlist_empty(head)) { cred = hlist_entry(head->first, struct rpc_cred, cr_hash); get_rpccred(cred); if (!list_empty(&cred->cr_lru)) { list_del(&cred->cr_lru); number_cred_unused--; } list_add_tail(&cred->cr_lru, &free); rpcauth_unhash_cred_locked(cred); } } spin_unlock(&cache->lock); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); } /* * Destroy the RPC credential cache */ void rpcauth_destroy_credcache(struct rpc_auth *auth) { struct rpc_cred_cache *cache = auth->au_credcache; if (cache) { auth->au_credcache = NULL; rpcauth_clear_credcache(cache); kfree(cache->hashtable); kfree(cache); } } EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache); #define RPC_AUTH_EXPIRY_MORATORIUM (60 * HZ) /* * Remove stale credentials. Avoid sleeping inside the loop. */ static int rpcauth_prune_expired(struct list_head *free, int nr_to_scan) { spinlock_t *cache_lock; struct rpc_cred *cred, *next; unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM; list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { if (nr_to_scan-- == 0) break; /* * Enforce a 60 second garbage collection moratorium * Note that the cred_unused list must be time-ordered. */ if (time_in_range(cred->cr_expire, expired, jiffies) && test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) return 0; list_del_init(&cred->cr_lru); number_cred_unused--; if (atomic_read(&cred->cr_count) != 0) continue; cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); if (atomic_read(&cred->cr_count) == 0) { get_rpccred(cred); list_add_tail(&cred->cr_lru, free); rpcauth_unhash_cred_locked(cred); } spin_unlock(cache_lock); } return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; } /* * Run memory cache shrinker. */ static int rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc) { LIST_HEAD(free); int res; int nr_to_scan = sc->nr_to_scan; gfp_t gfp_mask = sc->gfp_mask; if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) return (nr_to_scan == 0) ? 0 : -1; if (list_empty(&cred_unused)) return 0; spin_lock(&rpc_credcache_lock); res = rpcauth_prune_expired(&free, nr_to_scan); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); return res; } /* * Look up a process' credentials in the authentication cache */ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, int flags) { LIST_HEAD(free); struct rpc_cred_cache *cache = auth->au_credcache; struct hlist_node *pos; struct rpc_cred *cred = NULL, *entry, *new; unsigned int nr; nr = hash_long(acred->uid, cache->hashbits); rcu_read_lock(); hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { if (!entry->cr_ops->crmatch(acred, entry, flags)) continue; spin_lock(&cache->lock); if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { spin_unlock(&cache->lock); continue; } cred = get_rpccred(entry); spin_unlock(&cache->lock); break; } rcu_read_unlock(); if (cred != NULL) goto found; new = auth->au_ops->crcreate(auth, acred, flags); if (IS_ERR(new)) { cred = new; goto out; } spin_lock(&cache->lock); hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { if (!entry->cr_ops->crmatch(acred, entry, flags)) continue; cred = get_rpccred(entry); break; } if (cred == NULL) { cred = new; set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); } else list_add_tail(&new->cr_lru, &free); spin_unlock(&cache->lock); found: if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && cred->cr_ops->cr_init != NULL && !(flags & RPCAUTH_LOOKUP_NEW)) { int res = cred->cr_ops->cr_init(auth, cred); if (res < 0) { put_rpccred(cred); cred = ERR_PTR(res); } } rpcauth_destroy_credlist(&free); out: return cred; } EXPORT_SYMBOL_GPL(rpcauth_lookup_credcache); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *auth, int flags) { struct auth_cred acred; struct rpc_cred *ret; const struct cred *cred = current_cred(); dprintk("RPC: looking up %s cred\n", auth->au_ops->au_name); memset(&acred, 0, sizeof(acred)); acred.uid = cred->fsuid; acred.gid = cred->fsgid; acred.group_info = get_group_info(((struct cred *)cred)->group_info); ret = auth->au_ops->lookup_cred(auth, &acred, flags); put_group_info(acred.group_info); return ret; } void rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, struct rpc_auth *auth, const struct rpc_credops *ops) { INIT_HLIST_NODE(&cred->cr_hash); INIT_LIST_HEAD(&cred->cr_lru); atomic_set(&cred->cr_count, 1); cred->cr_auth = auth; cred->cr_ops = ops; cred->cr_expire = jiffies; #ifdef RPC_DEBUG cred->cr_magic = RPCAUTH_CRED_MAGIC; #endif cred->cr_uid = acred->uid; } EXPORT_SYMBOL_GPL(rpcauth_init_cred); struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) { dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return get_rpccred(cred); } EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); static struct rpc_cred * rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred acred = { .uid = 0, .gid = 0, }; dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); return auth->au_ops->lookup_cred(auth, &acred, lookupflags); } static struct rpc_cred * rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, auth->au_ops->au_name); return rpcauth_lookupcred(auth, lookupflags); } static int rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_cred *new; int lookupflags = 0; if (flags & RPC_TASK_ASYNC) lookupflags |= RPCAUTH_LOOKUP_NEW; if (cred != NULL) new = cred->cr_ops->crbind(task, cred, lookupflags); else if (flags & RPC_TASK_ROOTCREDS) new = rpcauth_bind_root_cred(task, lookupflags); else new = rpcauth_bind_new_cred(task, lookupflags); if (IS_ERR(new)) return PTR_ERR(new); if (req->rq_cred != NULL) put_rpccred(req->rq_cred); req->rq_cred = new; return 0; } void put_rpccred(struct rpc_cred *cred) { /* Fast path for unhashed credentials */ if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) { if (atomic_dec_and_test(&cred->cr_count)) cred->cr_ops->crdestroy(cred); return; } if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) return; if (!list_empty(&cred->cr_lru)) { number_cred_unused--; list_del_init(&cred->cr_lru); } if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) { cred->cr_expire = jiffies; list_add_tail(&cred->cr_lru, &cred_unused); number_cred_unused++; goto out_nodestroy; } if (!rpcauth_unhash_cred(cred)) { /* We were hashed and someone looked us up... */ goto out_nodestroy; } } spin_unlock(&rpc_credcache_lock); cred->cr_ops->crdestroy(cred); return; out_nodestroy: spin_unlock(&rpc_credcache_lock); } EXPORT_SYMBOL_GPL(put_rpccred); __be32 * rpcauth_marshcred(struct rpc_task *task, __be32 *p) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u marshaling %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crmarshal(task, p); } __be32 * rpcauth_checkverf(struct rpc_task *task, __be32 *p) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u validating %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crvalidate(task, p); } static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, __be32 *data, void *obj) { struct xdr_stream xdr; xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data); encode(rqstp, &xdr, obj); } int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); if (cred->cr_ops->crwrap_req) return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); /* By default, we encode the arguments normally. */ rpcauth_wrap_req_encode(encode, rqstp, data, obj); return 0; } static int rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, __be32 *data, void *obj) { struct xdr_stream xdr; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data); return decode(rqstp, &xdr, obj); } int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); if (cred->cr_ops->crunwrap_resp) return cred->cr_ops->crunwrap_resp(task, decode, rqstp, data, obj); /* By default, we decode the arguments normally. */ return rpcauth_unwrap_req_decode(decode, rqstp, data, obj); } int rpcauth_refreshcred(struct rpc_task *task) { struct rpc_cred *cred; int err; cred = task->tk_rqstp->rq_cred; if (cred == NULL) { err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); if (err < 0) goto out; cred = task->tk_rqstp->rq_cred; } dprintk("RPC: %5u refreshing %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); err = cred->cr_ops->crrefresh(task); out: if (err < 0) task->tk_status = err; return err; } void rpcauth_invalcred(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u invalidating %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); if (cred) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); } int rpcauth_uptodatecred(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; return cred == NULL || test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; } static struct shrinker rpc_cred_shrinker = { .shrink = rpcauth_cache_shrinker, .seeks = DEFAULT_SEEKS, }; int __init rpcauth_init_module(void) { int err; err = rpc_init_authunix(); if (err < 0) goto out1; err = rpc_init_generic_auth(); if (err < 0) goto out2; register_shrinker(&rpc_cred_shrinker); return 0; out2: rpc_destroy_authunix(); out1: return err; } void rpcauth_remove_module(void) { rpc_destroy_authunix(); rpc_destroy_generic_auth(); unregister_shrinker(&rpc_cred_shrinker); }
gpl-2.0
maqiangddb/Android_kernel
arch/arm/mach-vt8500/timer.c
7954
4609
/* * arch/arm/mach-vt8500/timer.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/delay.h> #include <asm/mach/time.h> #include "devices.h" #define VT8500_TIMER_OFFSET 0x0100 #define TIMER_MATCH_VAL 0x0000 #define TIMER_COUNT_VAL 0x0010 #define TIMER_STATUS_VAL 0x0014 #define TIMER_IER_VAL 0x001c /* interrupt enable */ #define TIMER_CTRL_VAL 0x0020 #define TIMER_AS_VAL 0x0024 /* access status */ #define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */ #define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */ #define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */ #define VT8500_TIMER_HZ 3000000 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) static void __iomem *regbase; static cycle_t vt8500_timer_read(struct clocksource *cs) { int loops = msecs_to_loops(10); writel(3, regbase + TIMER_CTRL_VAL); while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) && --loops) cpu_relax(); return readl(regbase + TIMER_COUNT_VAL); } struct clocksource clocksource = { .name = "vt8500_timer", .rating = 200, .read = vt8500_timer_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int vt8500_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { int loops = msecs_to_loops(10); cycle_t alarm = clocksource.read(&clocksource) + cycles; while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) && --loops) cpu_relax(); writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) return -ETIME; writel(1, regbase + TIMER_IER_VAL); return 0; } static void vt8500_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL); writel(0, regbase + TIMER_IER_VAL); break; } } struct clock_event_device clockevent = { .name = "vt8500_timer", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_next_event = vt8500_timer_set_next_event, .set_mode = vt8500_timer_set_mode, }; static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; writel(0xf, regbase + TIMER_STATUS_VAL); evt->event_handler(evt); return IRQ_HANDLED; } struct irqaction irq = { .name = "vt8500_timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = vt8500_timer_interrupt, .dev_id = &clockevent, }; static void __init vt8500_timer_init(void) { regbase = ioremap(wmt_pmc_base + VT8500_TIMER_OFFSET, 0x28); if (!regbase) printk(KERN_ERR "vt8500_timer_init: failed to map MMIO registers\n"); writel(1, regbase + TIMER_CTRL_VAL); writel(0xf, regbase + TIMER_STATUS_VAL); writel(~0, regbase + TIMER_MATCH_VAL); if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) printk(KERN_ERR "vt8500_timer_init: clocksource_register failed for %s\n", clocksource.name); clockevents_calc_mult_shift(&clockevent, VT8500_TIMER_HZ, 4); /* copy-pasted from mach-msm; no idea */ clockevent.max_delta_ns = clockevent_delta2ns(0xf0000000, &clockevent); clockevent.min_delta_ns = clockevent_delta2ns(4, &clockevent); clockevent.cpumask = cpumask_of(0); if (setup_irq(wmt_timer_irq, &irq)) printk(KERN_ERR "vt8500_timer_init: setup_irq failed for %s\n", clockevent.name); clockevents_register_device(&clockevent); } struct sys_timer vt8500_timer = { .init = vt8500_timer_init };
gpl-2.0
Snuzzo/B14CKB1RD_kernel_m8
drivers/staging/usbip/userspace/src/usbip.c
7954
4057
/* * command structure borrowed from udev * (git://git.kernel.org/pub/scm/linux/hotplug/udev.git) * * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <syslog.h> #include "usbip_common.h" #include "usbip.h" static int usbip_help(int argc, char *argv[]); static int usbip_version(int argc, char *argv[]); static const char usbip_version_string[] = PACKAGE_STRING; static const char usbip_usage_string[] = "usbip [--debug] [--log] [version]\n" " [help] <command> <args>\n"; static void usbip_usage(void) { printf("usage: %s", usbip_usage_string); } struct command { const char *name; int (*fn)(int argc, char *argv[]); const char *help; void (*usage)(void); }; static const struct command cmds[] = { { .name = "help", .fn = usbip_help, .help = NULL, .usage = NULL }, { .name = "version", .fn = usbip_version, .help = NULL, .usage = NULL }, { .name = "attach", .fn = usbip_attach, .help = "Attach a remote USB device", .usage = usbip_attach_usage }, { .name = "detach", .fn = usbip_detach, .help = "Detach a remote USB device", .usage = usbip_detach_usage }, { .name = "list", .fn = usbip_list, .help = "List exportable or local USB devices", .usage = usbip_list_usage }, { .name = "bind", .fn = usbip_bind, .help = "Bind device to " USBIP_HOST_DRV_NAME ".ko", .usage = usbip_bind_usage }, { .name = "unbind", .fn = usbip_unbind, .help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko", .usage = usbip_unbind_usage }, { NULL, NULL, NULL, NULL } }; static int usbip_help(int argc, char *argv[]) { const struct command *cmd; int i; int ret = 0; if (argc > 1 && argv++) { for (i = 0; cmds[i].name != NULL; i++) if (!strcmp(cmds[i].name, argv[0]) && cmds[i].usage) { cmds[i].usage(); goto done; } ret = -1; } usbip_usage(); printf("\n"); for (cmd = cmds; cmd->name != NULL; cmd++) if (cmd->help != NULL) printf(" %-10s %s\n", cmd->name, cmd->help); printf("\n"); done: return ret; } static int usbip_version(int argc, char *argv[]) { (void) argc; (void) argv; printf(PROGNAME " (%s)\n", usbip_version_string); return 0; } static int run_command(const struct command *cmd, int argc, char *argv[]) { dbg("running command: `%s'", cmd->name); return cmd->fn(argc, argv); } int main(int argc, char *argv[]) { static const struct option opts[] = { { "debug", no_argument, NULL, 'd' }, { "log", no_argument, NULL, 'l' }, { NULL, 0, NULL, 0 } }; char *cmd; int opt; int i, rc = -1; usbip_use_stderr = 1; opterr = 0; for (;;) { opt = getopt_long(argc, argv, "+d", opts, NULL); if (opt == -1) break; switch (opt) { case 'd': usbip_use_debug = 1; break; case 'l': usbip_use_syslog = 1; openlog("", LOG_PID, LOG_USER); break; case '?': printf("usbip: invalid option\n"); default: usbip_usage(); goto out; } } cmd = argv[optind]; if (cmd) { for (i = 0; cmds[i].name != NULL; i++) if (!strcmp(cmds[i].name, cmd)) { argc -= optind; argv += optind; optind = 0; rc = run_command(&cmds[i], argc, argv); goto out; } } /* invalid command */ usbip_help(0, NULL); out: return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE); }
gpl-2.0
Phoenix-Silver/Zte-Blade-New-35-kernel
drivers/mtd/maps/map_funcs.c
14866
1078
/* * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS * is enabled. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/map.h> #include <linux/mtd/xip.h> static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs) { return inline_map_read(map, ofs); } static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) { inline_map_write(map, datum, ofs); } static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { inline_map_copy_from(map, to, from, len); } static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { inline_map_copy_to(map, to, from, len); } void simple_map_init(struct map_info *map) { BUG_ON(!map_bankwidth_supported(map->bankwidth)); map->read = simple_map_read; map->write = simple_map_write; map->copy_from = simple_map_copy_from; map->copy_to = simple_map_copy_to; } EXPORT_SYMBOL(simple_map_init); MODULE_LICENSE("GPL");
gpl-2.0
wdv4758h/glibc
sysdeps/unix/sysv/linux/sigpending.c
19
1122
/* Copyright (C) 1997-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <signal.h> #include <unistd.h> #include <sysdep.h> #include <sys/syscall.h> /* Change the set of blocked signals to SET, wait until a signal arrives, and restore the set of blocked signals. */ int sigpending (set) sigset_t *set; { return INLINE_SYSCALL (rt_sigpending, 2, set, _NSIG / 8); }
gpl-2.0
omnirom/android_kernel_samsung_jf
kernel/futex.c
19
77118
/* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/futex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/export.h> #include <linux/magic.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/ptrace.h> #include <linux/freezer.h> #include <asm/futex.h> #include "rtmutex_common.h" int __read_mostly futex_cmpxchg_enabled; #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* * Futex flags used to encode options to functions and preserve them across * restarts. */ #define FLAGS_SHARED 0x01 #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 /* * Priority Inheritance state: */ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; atomic_t refcount; union futex_key key; }; /** * struct futex_q - The hashed futex queue entry, one per waiting task * @list: priority-sorted list of tasks waiting on this futex * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * * We use this hashed waitqueue, instead of a normal wait_queue_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakeup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See unqueue_me_pi(). */ struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; }; static const struct futex_q futex_q_init = { /* list gets initialized in queue_me()*/ .key = FUTEX_KEY_INIT, .bitset = FUTEX_BITSET_MATCH_ANY }; /* * Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex. */ struct futex_hash_bucket { spinlock_t lock; struct plist_head chain; }; static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; /* * We hash on the keys returned from get_futex_key (see below). */ static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; } /* * Return 1 if two futex_keys are equal, 0 otherwise. */ static inline int match_futex(union futex_key *key1, union futex_key *key2) { return (key1 && key2 && key1->both.word == key2->both.word && key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); } /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ static void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: ihold(key->shared.inode); break; case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); break; } } /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); return; } switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); break; } } /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. * @rw: mapping needs to be read/write (values: VERIFY_READ, * VERIFY_WRITE) * * Returns a negative error code or 0 * The key words are stored in *key on success. * * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, * offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * * lock_page() might sleep, the caller should not hold a spinlock. */ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; int err, ro = 0; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); return 0; } again: err = get_user_pages_fast(address, 1, 1, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ if (err == -EFAULT && rw == VERIFY_READ) { err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } if (err < 0) return err; else err = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE page_head = page; if (unlikely(PageTail(page))) { put_page(page); /* serialize against __split_huge_page_splitting() */ local_irq_disable(); if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { page_head = compound_head(page); /* * page_head is valid pointer but we must pin * it before taking the PG_lock and/or * PG_compound_lock. The moment we re-enable * irqs __split_huge_page_splitting() can * return and the head page can be freed from * under us. We can't take the PG_lock and/or * PG_compound_lock on a page that could be * freed from under us. */ if (page != page_head) { get_page(page_head); put_page(page); } local_irq_enable(); } else { local_irq_enable(); goto again; } } #else page_head = compound_head(page); if (page != page_head) { get_page(page_head); put_page(page); } #endif lock_page(page_head); /* * If page_head->mapping is NULL, then it cannot be a PageAnon * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the page lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_complete_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ if (!page_head->mapping) { int shmem_swizzled = PageSwapCache(page_head); unlock_page(page_head); put_page(page_head); if (shmem_swizzled) goto again; return -EFAULT; } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page_head)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ if (ro) { err = -EFAULT; goto out; } key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page_head->mapping->host; key->shared.pgoff = page_head->index; } get_futex_key_refs(key); out: unlock_page(page_head); put_page(page_head); return err; } static inline void put_futex_key(union futex_key *key) { drop_futex_key_refs(key); } /** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non-destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ static int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = fixup_user_fault(current, mm, (unsigned long)uaddr, FAULT_FLAG_WRITE); up_read(&mm->mmap_sem); return ret < 0 ? ret : 0; } /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held. */ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (match_futex(&this->key, key)) return this; } return NULL; } static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) { int ret; pagefault_disable(); ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); pagefault_enable(); return ret; } static int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); pagefault_enable(); return ret ? -EFAULT : 0; } /* * PI code: */ static int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); pi_state->key = FUTEX_KEY_INIT; current->pi_state_cache = pi_state; return 0; } static struct futex_pi_state * alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } static void free_pi_state(struct futex_pi_state *pi_state) { if (!atomic_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } if (current->pi_state_cache) kfree(pi_state); else { /* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); current->pi_state_cache = pi_state; } } /* * Look up the task based on what TID userspace gave us. * We dont trust it. */ static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) get_task_struct(p); rcu_read_unlock(); return p; } /* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; if (!futex_cmpxchg_enabled) return; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ if (head->next != next) { spin_unlock(&hb->lock); continue; } WARN_ON(pi_state->owner != curr); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); } /* * We need to check the following states: * * Waiter | pi_state | pi->owner | uTID | uODIED | ? * * [1] NULL | --- | --- | 0 | 0/1 | Valid * [2] NULL | --- | --- | >0 | 0/1 | Valid * * [3] Found | NULL | -- | Any | 0/1 | Invalid * * [4] Found | Found | NULL | 0 | 1 | Valid * [5] Found | Found | NULL | >0 | 1 | Invalid * * [6] Found | Found | task | 0 | 1 | Valid * * [7] Found | Found | NULL | Any | 0 | Invalid * * [8] Found | Found | task | ==taskTID | 0/1 | Valid * [9] Found | Found | task | 0 | 0 | Invalid * [10] Found | Found | task | !=taskTID | 0/1 | Invalid * * [1] Indicates that the kernel can acquire the futex atomically. We * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. * * [2] Valid, if TID does not belong to a kernel thread. If no matching * thread is found then it indicates that the owner TID has died. * * [3] Invalid. The waiter is queued on a non PI futex * * [4] Valid state after exit_robust_list(), which sets the user space * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. * * [5] The user space value got manipulated between exit_robust_list() * and exit_pi_state_list() * * [6] Valid state after exit_pi_state_list() which sets the new owner in * the pi_state but cannot access the user space value. * * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. * * [8] Owner and user space value match * * [9] There is no transient state which sets the user space TID to 0 * except exit_robust_list(), but this is indicated by the * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space * TID out of sync. */ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; struct plist_head *head; struct task_struct *p; pid_t pid = uval & FUTEX_TID_MASK; head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex(&this->key, key)) { /* * Sanity check the waiter before increasing * the refcount and attaching to it. */ pi_state = this->pi_state; /* * Userspace might have messed up non-PI and * PI futexes [3] */ if (unlikely(!pi_state)) return -EINVAL; WARN_ON(!atomic_read(&pi_state->refcount)); /* * Handle the owner died case: */ if (uval & FUTEX_OWNER_DIED) { /* * exit_pi_state_list sets owner to NULL and * wakes the topmost waiter. The task which * acquires the pi_state->rt_mutex will fixup * owner. */ if (!pi_state->owner) { /* * No pi state owner, but the user * space TID is not 0. Inconsistent * state. [5] */ if (pid) return -EINVAL; /* * Take a ref on the state and * return. [4] */ goto out_state; } /* * If TID is 0, then either the dying owner * has not yet executed exit_pi_state_list() * or some waiter acquired the rtmutex in the * pi state, but did not yet fixup the TID in * user space. * * Take a ref on the state and return. [6] */ if (!pid) goto out_state; } else { /* * If the owner died bit is not set, * then the pi_state must have an * owner. [7] */ if (!pi_state->owner) return -EINVAL; } /* * Bail out if user space manipulated the * futex value. If pi state exists then the * owner TID must be the same as the user * space TID. [9/10] */ if (pid != task_pid_vnr(pi_state->owner)) return -EINVAL; /* * Protect against a corrupted uval. If uval * is 0x80000000 then pid is 0 and the waiter * bit is set. So the deadlock check in the * calling code has failed and we did not fall * into the check above due to !pid. */ if (task && pi_state->owner == task) return -EDEADLK; out_state: atomic_inc(&pi_state->refcount); *ps = pi_state; return 0; } } /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 [1] */ if (!pid) return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; if (!p->mm) { put_task_struct(p); return -EPERM; } /* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit * change of the task flags, we do this protected by * p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is * set, we know that the task has finished the * cleanup: */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } /* * No existing pi state. First waiter. [2] */ pi_state = alloc_pi_state(); /* * Initialize the pi_mutex in locked state and make 'p' * the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ pi_state->key = *key; WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); *ps = pi_state; return 0; } /** * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex * @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Returns: * 0 - ready to wait * 1 - acquired the lock * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { int lock_taken, ret, ownerdied = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: ret = lock_taken = 0; /* * To avoid races, we attempt to take the lock here again * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ newval = vpid; if (set_waiters) newval |= FUTEX_WAITERS; if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) return -EFAULT; /* * Detect deadlocks. */ if ((unlikely((curval & FUTEX_TID_MASK) == vpid))) return -EDEADLK; /* * Surprise - we got the lock, but we do not trust user space at all. */ if (unlikely(!curval)) { /* * We verify whether there is kernel state for this * futex. If not, we can safely assume, that the 0 -> * TID transition is correct. If state exists, we do * not bother to fixup the user space state as it was * corrupted already. */ return futex_top_waiter(hb, key) ? -EINVAL : 1; } uval = curval; /* * Set the FUTEX_WAITERS flag, so the owner will know it has someone * to wake at the next unlock. */ newval = curval | FUTEX_WAITERS; /* * There are two cases, where a futex might have no owner (the * owner TID is 0): OWNER_DIED. We take over the futex in this * case. We also do an unconditional take over, when the owner * of the futex died. * * This is safe as we are protected by the hash bucket lock ! */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ newval = (curval & ~FUTEX_TID_MASK) | vpid; ownerdied = 0; lock_taken = 1; } if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; if (unlikely(curval != uval)) goto retry; /* * We took the lock due to owner died take over. */ if (unlikely(lock_taken)) return 1; /* * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ ret = lookup_pi_state(uval, hb, key, ps, task); if (unlikely(ret)) { switch (ret) { case -ESRCH: /* * No owner found for this futex. Check if the * OWNER_DIED bit is set to figure out whether * this is a robust futex or not. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* * We simply start over in case of a robust * futex. The code above will take the futex * and return happy. */ if (curval & FUTEX_OWNER_DIED) { ownerdied = 1; goto retry; } default: break; } } return ret; } /** * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be NULL and must be held by the caller. */ static void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) || WARN_ON(plist_node_empty(&q->list))) return; hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); } /* * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. */ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non-futex wake up happens on another CPU then the task * might exit and p would dereference a non-existing task * struct. Prevent this by holding a reference on p across the * wake up. */ get_task_struct(p); __unqueue_futex(q); /* * The waiting task can free the futex_q as soon as * q->lock_ptr = NULL is written, without taking any locks. A * memory barrier is required here to prevent the following * store to lock_ptr from getting ahead of the plist_del. */ smp_wmb(); q->lock_ptr = NULL; wake_up_state(p, TASK_NORMAL); put_task_struct(p); } static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) { struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 uninitialized_var(curval), newval; int ret = 0; if (!pi_state) return -EINVAL; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) return -EINVAL; raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* * It is possible that the next waiter (the one that brought * this owner to the kernel) timed out and is no longer * waiting on the lock. */ if (!new_owner) new_owner = this->task; /* * We pass it to the next owner. The WAITERS bit is always * kept enabled while there is PI state around. We cleanup the * owner died bit, because we are the owner. */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; if (ret) { raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; raw_spin_unlock_irq(&new_owner->pi_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; } static int unlock_futex_pi(u32 __user *uaddr, u32 uval) { u32 uninitialized_var(oldval); /* * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) return -EFAULT; if (oldval != uval) return -EAGAIN; return 0; } /* * Express the locking dependencies for lockdep: */ static inline void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); } else { /* hb1 > hb2 */ spin_lock(&hb2->lock); spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); } } static inline void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { spin_unlock(&hb1->lock); if (hb1 != hb2) spin_unlock(&hb2->lock); } /* * Wake up waiters matching bitset queued on this futex (uaddr). */ static int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; break; } /* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; wake_futex(this); if (++ret >= nr_wake) break; } } spin_unlock(&hb->lock); put_futex_key(&key); out: return ret; } /* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ static int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head; struct futex_q *this, *next; int ret, op_ret; retry: ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { double_unlock_hb(hb1, hb2); #ifndef CONFIG_MMU /* * we don't get EFAULT from MMU faults if we don't have an MMU, * but we might get them from range checking */ ret = op_ret; goto out_put_keys; #endif if (unlikely(op_ret != -EFAULT)) { ret = op_ret; goto out_put_keys; } ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&key2); put_futex_key(&key1); goto retry; } head = &hb1->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { wake_futex(this); if (++ret >= nr_wake) break; } } if (op_ret > 0) { head = &hb2->chain; op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { wake_futex(this); if (++op_ret >= nr_wake2) break; } } ret += op_ret; } double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(&key2); out_put_key1: put_futex_key(&key1); out: return ret; } /** * requeue_futex() - Requeue a futex_q from one hb to another * @q: the futex_q to requeue * @hb1: the source hash_bucket * @hb2: the target hash_bucket * @key2: the new key for the requeued futex_q */ static inline void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; } get_futex_key_refs(key2); q->key = *key2; } /** * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue * @q: the futex_q * @key: the key of the requeue target futex * @hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock * to protect access to the pi_state to fixup the owner later. Must be called * with both q->lock_ptr and hb->lock held. */ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { get_futex_key_refs(key); q->key = *key; __unqueue_futex(q); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; wake_up_state(q->task, TASK_NORMAL); } /** * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter * @pifutex: the user address of the to futex * @hb1: the from futex hash bucket, must be locked by the caller * @hb2: the to futex hash bucket, must be locked by the caller * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. * Wake the top waiter if we succeed. If the caller specified set_waiters, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * * Return: * 0 - failed to acquire the lock atomically; * >0 - acquired the lock, return value is vpid of the top_waiter * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret, vpid; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; /* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; /* * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) { requeue_pi_wake_futex(top_waiter, key2, hb2); return vpid; } return ret; } /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 * @uaddr1: source futex user address * @flags: futex flags (FLAGS_SHARED, etc.) * @uaddr2: target futex user address * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) * @nr_requeue: number of waiters to requeue (0-INT_MAX) * @cmpval: @uaddr1 expected value (or %NULL) * @requeue_pi: if we are attempting to requeue from a non-pi futex to a * pi futex (pi to pi requeue is not supported) * * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * * Returns: * >=0 - on success, the number of tasks requeued or woken * <0 - on error */ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head1; struct futex_q *this, *next; if (requeue_pi) { /* * Requeue PI only works on two distinct uaddrs. This * check is only valid for private futexes. See below. */ if (uaddr1 == uaddr2) return -EINVAL; /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } retry: if (pi_state != NULL) { /* * We will have to lookup the pi_state again, so free this one * to keep the accounting correct. */ free_pi_state(pi_state); pi_state = NULL; } ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, requeue_pi ? VERIFY_WRITE : VERIFY_READ); if (unlikely(ret != 0)) goto out_put_key1; /* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (requeue_pi && match_futex(&key1, &key2)) { ret = -EINVAL; goto out_put_keys; } hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { double_unlock_hb(hb1, hb2); ret = get_user(curval, uaddr1); if (ret) goto out_put_keys; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&key2); put_futex_key(&key1); goto retry; } if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a * reference to it. If the lock was taken, ret contains the * vpid of the top waiter task. */ if (ret > 0) { WARN_ON(pi_state); drop_count++; task_count++; /* * If we acquired the lock, then the user * space value of uaddr2 should be vpid. It * cannot be changed by the top waiter as it * is blocked on hb2 lock if it tries to do * so. If something fiddled with it behind our * back the pi state lookup might unearth * it. So we rather use the known value than * rereading and handing potential crap to * lookup_pi_state. */ ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL); } switch (ret) { case 0: break; case -EFAULT: double_unlock_hb(hb1, hb2); put_futex_key(&key2); put_futex_key(&key1); ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; case -EAGAIN: /* The owner was exiting, try again. */ double_unlock_hb(hb1, hb2); put_futex_key(&key2); put_futex_key(&key1); cond_resched(); goto retry; default: goto out_unlock; } } head1 = &hb1->chain; plist_for_each_entry_safe(this, next, head1, list) { if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) continue; /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. */ if ((requeue_pi && !this->rt_waiter) || (!requeue_pi && this->rt_waiter)) { ret = -EINVAL; break; } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { wake_futex(this); continue; } /* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } /* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { /* Prepare the waiter to take the rt_mutex. */ atomic_inc(&pi_state->refcount); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, this->task, 1); if (ret == 1) { /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; free_pi_state(pi_state); goto out_unlock; } } requeue_futex(this, hb1, hb2, &key2); drop_count++; } out_unlock: double_unlock_hb(hb1, hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ while (--drop_count >= 0) drop_futex_key_refs(&key1); out_put_keys: put_futex_key(&key2); out_put_key1: put_futex_key(&key1); out: if (pi_state != NULL) free_pi_state(pi_state); return ret ? ret : task_count; } /* The key must be already stored in q->key. */ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) __acquires(&hb->lock) { struct futex_hash_bucket *hb; hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; spin_lock(&hb->lock); return hb; } static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) __releases(&hb->lock) { spin_unlock(&hb->lock); } /** * queue_me() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * * The hb->lock must be held by the caller, and is released here. A call to * queue_me() is typically paired with exactly one call to unqueue_me(). The * exceptions involve the PI related operations, which may use unqueue_me_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) __releases(&hb->lock) { int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); q->task = current; spin_unlock(&hb->lock); } /** * unqueue_me() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * * Returns: * 1 - if the futex_q was still queued (and we removed unqueued it) * 0 - if the futex_q was already removed by the waking thread */ static int unqueue_me(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; barrier(); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } __unqueue_futex(q); BUG_ON(q->pi_state); spin_unlock(lock_ptr); ret = 1; } drop_futex_key_refs(&q->key); return ret; } /* * PI futexes can not be requeued and must remove themself from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * and dropped here. */ static void unqueue_me_pi(struct futex_q *q) __releases(q->lock_ptr) { __unqueue_futex(q); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); } /* * Fixup the pi_state owner with the new owner. * * Must be called with hash bucket lock held and mm->sem held for non * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *newowner) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner = pi_state->owner; u32 uval, uninitialized_var(curval), newval; int ret; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; /* * We are here either because we stole the rtmutex from the * previous highest priority waiter or we are the highest priority * waiter but failed to get the rtmutex the first time. * We have to replace the newowner TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * * Modifying pi_state _before_ the user space value would * leave the pi_state in an inconsistent state when we fault * here, because we need to drop the hash bucket lock to * handle the fault. This might be observed in the PID check * in lookup_pi_state. */ retry: if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) goto handle_fault; if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ if (pi_state->owner != NULL) { raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* * To handle the page fault we need to drop the hash bucket * lock here. That gives the other task (either the highest priority * waiter itself or the task which stole the rtmutex) the * chance to try the fixup of the pi_state. So once we are * back from handling the fault we need to check the pi_state * after reacquiring the hash bucket lock and before trying to * do another fixup. When the fixup has been done already we * simply return. */ handle_fault: spin_unlock(q->lock_ptr); ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) return 0; if (ret) return ret; goto retry; } static long futex_wait_restart(struct restart_block *restart); /** * fixup_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex * @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * * Returns: * 1 - success, lock taken * 0 - success, lock not taken * <0 - on error (-EFAULT) */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current); goto out; } /* * Catch the rare case, where the lock was released when we were on the * way back before we locked the hash bucket. */ if (q->pi_state->owner == current) { /* * Try to get the rt_mutex now. This might fail as some other * task acquired the rt_mutex after we removed ourself from the * rt_mutex waiters list. */ if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { locked = 1; goto out; } /* * pi_state is incorrect, some other task did a lock steal and * we returned due to timeout or signal without taking the * rt_mutex. Too late. */ raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); owner = rt_mutex_owner(&q->pi_state->pi_mutex); if (!owner) owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); ret = fixup_pi_state_owner(uaddr, q, owner); goto out; } /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex. */ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p\n", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); out: return ret ? ret : locked; } /** * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal * @hb: the futex hash bucket, must be locked by the caller * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout */ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using set_mb() and * queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE); queue_me(q, hb); /* Arm the timer */ if (timeout) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } /* * If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). */ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) freezable_schedule(); } __set_current_state(TASK_RUNNING); } /** * futex_wait_setup() - Prepare to wait on a futex * @uaddr: the futex userspace address * @val: the expected value * @flags: futex flags (FLAGS_SHARED, etc.) * @q: the associated futex_q * @hb: storage for hash_bucket pointer to be returned to caller * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * * Returns: * 0 - uaddr contains val and hb has been locked * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked */ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, struct futex_q *q, struct futex_hash_bucket **hb) { u32 uval; int ret; /* * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for * any cond. If we locked the hash-bucket after testing *uaddr, that * would open a race condition where we could block indefinitely with * cond(var) false, which would violate the guarantee. * * On the other hand, we insert q and release the hash-bucket only * after testing *uaddr. This guarantees that futex_wait() will NOT * absorb a wakeup if *uaddr does not match the desired values * while the syscall executes. */ retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); if (unlikely(ret != 0)) return ret; retry_private: *hb = queue_lock(q); ret = get_futex_value_locked(&uval, uaddr); if (ret) { queue_unlock(q, *hb); ret = get_user(uval, uaddr); if (ret) goto out; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&q->key); goto retry; } if (uval != val) { queue_unlock(q, *hb); ret = -EWOULDBLOCK; } out: if (ret) put_futex_key(&q->key); return ret; } static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int ret; if (!bitset) return -EINVAL; q.bitset = bitset; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } retry: /* * Prepare to wait on uaddr. On success, holds hb lock and increments * q.key refs. */ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); if (ret) goto out; /* queue_me and wait for wakeup, timeout, or a signal. */ futex_wait_queue_me(hb, &q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) goto out; ret = -ETIMEDOUT; if (to && !to->task) goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ if (!signal_pending(current)) goto retry; ret = -ERESTARTSYS; if (!abs_time) goto out; restart = &current_thread_info()->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; ret = -ERESTART_RESTARTBLOCK; out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = restart->futex.uaddr; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; return (long)futex_wait(uaddr, restart->futex.flags, restart->futex.val, tp, restart->futex.bitset); } /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int res, ret; if (refill_pi_state_cache()) return -ENOMEM; if (time) { to = &timeout; hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires(&to->timer, *time); } retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; retry_private: hb = queue_lock(&q); ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); if (unlikely(ret)) { switch (ret) { case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; case -EAGAIN: /* * Task is exiting and we just wait for the * exit to complete. */ queue_unlock(&q, hb); put_futex_key(&q.key); cond_resched(); goto retry; default: goto out_unlock_put_key; } } /* * Only actually queue now that the atomic ops are done: */ queue_me(&q, hb); WARN_ON(!q.pi_state); /* * Block on the PI mutex: */ if (!trylock) ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); else { ret = rt_mutex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; } spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* * If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. */ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) rt_mutex_unlock(&q.pi_state->pi_mutex); /* Unqueue and drop the lock */ unqueue_me_pi(&q); goto out_put_key; out_unlock_put_key: queue_unlock(&q, hb); out_put_key: put_futex_key(&q.key); out: if (to) destroy_hrtimer_on_stack(&to->timer); return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: queue_unlock(&q, hb); ret = fault_in_user_writeable(uaddr); if (ret) goto out_put_key; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&q.key); goto retry; } /* * Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; u32 uval, vpid = task_pid_vnr(current); int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != vpid) return -EPERM; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking * anyone else up. We only try this if neither the waiters nor * the owner died bit are set. */ if (!(uval & ~FUTEX_TID_MASK) && cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ if (unlikely(uval == vpid)) goto out_unlock; /* * Ok, other tasks may need to be woken up - check waiters * and do the wakeup if necessary: */ head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (!match_futex (&this->key, &key)) continue; ret = wake_futex_pi(uaddr, uval, this); /* * The atomic access to the futex value * generated a pagefault, so retry the * user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; goto out_unlock; } /* * No waiters - kernel unlocks the futex: */ ret = unlock_futex_pi(uaddr, uval); if (ret == -EFAULT) goto pi_faulted; out_unlock: spin_unlock(&hb->lock); put_futex_key(&key); out: return ret; pi_faulted: spin_unlock(&hb->lock); put_futex_key(&key); ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; } /** * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex * @hb: the hash_bucket futex_q was original enqueued on * @q: the futex_q woken while waiting to be requeued * @key2: the futex_key of the requeue target futex * @timeout: the timeout associated with the wait (NULL if none) * * Detect if the task was woken on the initial futex as opposed to the requeue * target futex. If so, determine if it was a timeout or a signal that caused * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * * Returns * 0 - no early wakeup detected * <0 - -ETIMEDOUT or -ERESTARTNOINTR */ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. * It can't be requeued from uaddr2 to something else since we don't * support a PI aware source futex for requeue. */ if (!match_futex(&q->key, key2)) { WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); /* * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ plist_del(&q->list, &hb->chain); /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; if (timeout && !timeout->task) ret = -ETIMEDOUT; else if (signal_pending(current)) ret = -ERESTARTNOINTR; } return ret; } /** * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 * @uaddr: the futex we initially wait on (non-pi) * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be * the same type, no requeueing from private to shared, etc. * @val: the expected value of uaddr * @abs_time: absolute timeout * @bitset: 32 bit wakeup bitset set by userspace, defaults to all * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and * complete the acquisition of the rt_mutex prior to returning to userspace. * This ensures the rt_mutex maintains an owner when it has waiters; without * one, the pi logic wouldn't know which task to boost/deboost, if there was a * need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following: * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() * 2) wakeup on uaddr2 after a requeue * 3) signal * 4) timeout * * If 3, cleanup and return -ERESTARTNOINTR. * * If 2, we may then block on trying to take the rt_mutex and return via: * 5) successful lock * 6) signal * 7) timeout * 8) other lock acquisition failure * * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). * * If 4 or 7, we cleanup and return with -ETIMEDOUT. * * Returns: * 0 - On success * <0 - On error */ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* * Prepare to wait on uaddr. On success, increments q.key (key1) ref * count. */ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); if (ret) goto out_key2; /* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (match_futex(&q.key, &key2)) { ret = -EINVAL; goto out_put_keys; } /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquisition by the requeue code. The * futex_requeue dropped our key1 reference and incremented our key2 * reference count. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!&q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(&q.key); out_key2: put_futex_key(&key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; struct task_struct *p; if (!futex_cmpxchg_enabled) return -ENOSYS; WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, uninitialized_var(nval), mval; retry: if (get_user(uval, uaddr)) return -1; if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; /* * We are not holding a lock here, but we want to have * the pagefault_disable/enable() protection because * we want to handle the fault gracefully. If the * access fails we try to fault in the futex with R/W * verification via get_user_pages. get_user() above * does not guarantee R/W access. If that fails we * give up and leave the futex locked. */ if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { if (fault_in_user_writeable(uaddr)) return -1; goto retry; } if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); } return 0; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsigned int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); unsigned long futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) if (handle_futex_death((void __user *)entry + futex_offset, curr, pi)) return; if (rc) return; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) handle_futex_death((void __user *)pending + futex_offset, curr, pip); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int cmd = op & FUTEX_CMD_MASK; unsigned int flags = 0; if (!(op & FUTEX_PRIVATE_FLAG)) flags |= FLAGS_SHARED; if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) return -ENOSYS; } switch (cmd) { case FUTEX_LOCK_PI: case FUTEX_UNLOCK_PI: case FUTEX_TRYLOCK_PI: case FUTEX_WAIT_REQUEUE_PI: case FUTEX_CMP_REQUEUE_PI: if (!futex_cmpxchg_enabled) return -ENOSYS; } switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAIT_BITSET: return futex_wait(uaddr, flags, val, timeout, val3); case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAKE_BITSET: return futex_wake(uaddr, flags, val, val3); case FUTEX_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); case FUTEX_CMP_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: return futex_lock_pi(uaddr, flags, val, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); case FUTEX_TRYLOCK_PI: return futex_lock_pi(uaddr, flags, 0, timeout, 1); case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, uaddr2); case FUTEX_CMP_REQUEUE_PI: return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); } return -ENOSYS; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; ktime_t t, *tp = NULL; u32 val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } /* * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. */ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } static int __init futex_init(void) { u32 curval; int i; /* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional * implementation, the non-functional ones will return * -ENOSYS. */ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { plist_head_init(&futex_queues[i].chain); spin_lock_init(&futex_queues[i].lock); } return 0; } __initcall(futex_init);
gpl-2.0
daanhettema/ccv-hand
CCV-HAND_Windows/addons/ofxVectorMath/src/ofxVec2f.cpp
19
6856
#include "ofxVec2f.h" ofxVec2f::ofxVec2f( float _x, float _y ) { x = _x; y = _y; } ofxVec2f::ofxVec2f( const ofPoint& pnt ) { x = pnt.x; y = pnt.y; } // Getters and Setters. // // void ofxVec2f::set( float _x, float _y ) { x = _x; y = _y; } void ofxVec2f::set( const ofPoint& vec ) { x = vec.x; y = vec.y; } float& ofxVec2f::operator[]( const int& i ) { switch(i) { case 0: return x; case 1: return y; default: return x; } } // Check similarity/equality. // // bool ofxVec2f::operator==( const ofPoint& vec ) { return (x == vec.x) && (y == vec.y); } bool ofxVec2f::operator!=( const ofPoint& vec ) { return (x != vec.x) || (y != vec.y); } bool ofxVec2f::match( const ofPoint& vec, float tollerance ) { return (fabs(x - vec.x) < tollerance) && (fabs(y - vec.y) < tollerance); } /** * Checks if vectors look in the same direction. * Tollerance is specified in degree. */ bool ofxVec2f::align( const ofxVec2f& vec, float tollerance ) const { return fabs( this->angle( vec ) ) < tollerance; } bool ofxVec2f::alignRad( const ofxVec2f& vec, float tollerance ) const { return fabs( this->angleRad( vec ) ) < tollerance; } // Overloading for any type to any type // // void ofxVec2f::operator=( const ofPoint& vec ){ x = vec.x; y = vec.y; } ofxVec2f ofxVec2f::operator+( const ofPoint& vec ) const { return ofxVec2f( x+vec.x, y+vec.y); } ofxVec2f& ofxVec2f::operator+=( const ofPoint& vec ) { x += vec.x; y += vec.y; return *this; } ofxVec2f ofxVec2f::operator-( const ofPoint& vec ) const { return ofxVec2f(x-vec.x, y-vec.y); } ofxVec2f& ofxVec2f::operator-=( const ofPoint& vec ) { x -= vec.x; y -= vec.y; return *this; } ofxVec2f ofxVec2f::operator*( const ofPoint& vec ) const { return ofxVec2f(x*vec.x, y*vec.y); } ofxVec2f& ofxVec2f::operator*=( const ofPoint& vec ) { x*=vec.x; y*=vec.y; return *this; } ofxVec2f ofxVec2f::operator/( const ofPoint& vec ) const { return ofxVec2f( vec.x!=0 ? x/vec.x : x , vec.y!=0 ? y/vec.y : y); } ofxVec2f& ofxVec2f::operator/=( const ofPoint& vec ) { vec.x!=0 ? x/=vec.x : x; vec.y!=0 ? y/=vec.y : y; return *this; } //operator overloading for float // // void ofxVec2f::operator=( const float f){ x = f; y = f; } ofxVec2f ofxVec2f::operator+( const float f ) const { return ofxVec2f( x+f, y+f); } ofxVec2f& ofxVec2f::operator+=( const float f ) { x += f; y += f; return *this; } ofxVec2f ofxVec2f::operator-( const float f ) const { return ofxVec2f( x-f, y-f); } ofxVec2f& ofxVec2f::operator-=( const float f ) { x -= f; y -= f; return *this; } ofxVec2f ofxVec2f::operator-() const { return ofxVec2f(-x, -y); } ofxVec2f ofxVec2f::operator*( const float f ) const { return ofxVec2f(x*f, y*f); } ofxVec2f& ofxVec2f::operator*=( const float f ) { x*=f; y*=f; return *this; } ofxVec2f ofxVec2f::operator/( const float f ) const { if(f == 0) return ofxVec2f(x, y); return ofxVec2f(x/f, y/f); } ofxVec2f& ofxVec2f::operator/=( const float f ) { if(f == 0) return *this; x/=f; y/=f; return *this; } ofxVec2f ofxVec2f::rescaled( const float length ) const { return getScaled(length); } ofxVec2f ofxVec2f::getScaled( const float length ) const { float l = (float)sqrt(x*x + y*y); if( l > 0 ) return ofxVec2f( (x/l)*length, (y/l)*length ); else return ofxVec2f(); } ofxVec2f& ofxVec2f::rescale( const float length ){ return scale(length); } ofxVec2f& ofxVec2f::scale( const float length ) { float l = (float)sqrt(x*x + y*y); if (l > 0) { x = (x/l)*length; y = (y/l)*length; } return *this; } // Rotation // // ofxVec2f ofxVec2f::rotated( float angle ) const { return getRotated(angle); } ofxVec2f ofxVec2f::getRotated( float angle ) const { float a = (float)(angle*DEG_TO_RAD); return ofxVec2f( x*cos(a) - y*sin(a), x*sin(a) + y*cos(a) ); } ofxVec2f ofxVec2f::getRotatedRad( float angle ) const { float a = angle; return ofxVec2f( x*cos(a) - y*sin(a), x*sin(a) + y*cos(a) ); } ofxVec2f& ofxVec2f::rotate( float angle ) { float a = (float)(angle * DEG_TO_RAD); float xrot = x*cos(a) - y*sin(a); y = x*sin(a) + y*cos(a); x = xrot; return *this; } ofxVec2f& ofxVec2f::rotateRad( float angle ) { float a = angle; float xrot = x*cos(a) - y*sin(a); y = x*sin(a) + y*cos(a); x = xrot; return *this; } // Normalization // // ofxVec2f ofxVec2f::normalized() const { return getNormalized(); } ofxVec2f ofxVec2f::getNormalized() const { float length = (float)sqrt(x*x + y*y); if( length > 0 ) { return ofxVec2f( x/length, y/length ); } else { return ofxVec2f(); } } ofxVec2f& ofxVec2f::normalize() { float length = (float)sqrt(x*x + y*y); if( length > 0 ) { x /= length; y /= length; } return *this; } // Limit length. // // ofxVec2f ofxVec2f::limited(float max) const{ return getLimited(max); } ofxVec2f ofxVec2f::getLimited(float max) const { float length = (float)sqrt(x*x + y*y); if( length > max && length > 0 ) { return ofxVec2f( (x/length)*max, (y/length)*max ); } else { return ofxVec2f( x, y ); } } ofxVec2f& ofxVec2f::limit(float max) { float length = (float)sqrt(x*x + y*y); if( length > max && length > 0 ) { x = (x/length)*max; y = (y/length)*max; } return *this; } // Perpendicular normalized vector. // // ofxVec2f ofxVec2f::perpendiculared() const { return getPerpendicular(); } ofxVec2f ofxVec2f::getPerpendicular() const { float length = (float)sqrt( x*x + y*y ); if( length > 0 ) return ofxVec2f( -(y/length), x/length ); else return ofxVec2f(); } ofxVec2f& ofxVec2f::perpendicular() { float length = (float)sqrt( x*x + y*y ); if( length > 0 ) { float _x = x; x = -(y/length); y = _x/length; } return *this; } // Length // // float ofxVec2f::length() const { return (float)sqrt( x*x + y*y ); } float ofxVec2f::lengthSquared() const { return squareLength(); } float ofxVec2f::squareLength() const { return (float)(x*x + y*y); } /** * Angle (deg) between two vectors. * This is a signed relative angle between -180 and 180. */ float ofxVec2f::angle( const ofxVec2f& vec ) const { return (float)(atan2( x*vec.y-y*vec.x, x*vec.x + y*vec.y )*RAD_TO_DEG); } /** * Angle (deg) between two vectors. * This is a signed relative angle between -180 and 180. */ float ofxVec2f::angleRad( const ofxVec2f& vec ) const { return atan2( x*vec.y-y*vec.x, x*vec.x + y*vec.y ); } /** * Dot Product. */ float ofxVec2f::dot( const ofxVec2f& vec ) const { return x*vec.x + y*vec.y; } // Non-Member operators // // ofxVec2f operator+( float f, const ofxVec2f& vec ) { return ofxVec2f( f+vec.x, f+vec.y); } ofxVec2f operator-( float f, const ofxVec2f& vec ) { return ofxVec2f( f-vec.x, f-vec.y); } ofxVec2f operator*( float f, const ofxVec2f& vec ) { return ofxVec2f( f*vec.x, f*vec.y); } ofxVec2f operator/( float f, const ofxVec2f& vec ) { return ofxVec2f( f/vec.x, f/vec.y); }
gpl-2.0
aktau/pcsx2
plugins/CDVDnull/CDVD.cpp
19
2284
/* CDVDnull * Copyright (C) 2002-2010 PCSX2 Dev Team * * PCSX2 is free software: you can redistribute it and/or modify it under the terms * of the GNU Lesser General Public License as published by the Free Software Found- * ation, either version 3 of the License, or (at your option) any later version. * * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with PCSX2. * If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include "CDVD.h" #include "svnrev.h" #ifdef _MSC_VER #define snprintf sprintf_s #endif static char libraryName[256]; const unsigned char version = PS2E_CDVD_VERSION; const unsigned char revision = 0; const unsigned char build = 6; EXPORT_C_(char*) PS2EgetLibName() { snprintf( libraryName, 255, "CDVDnull Driver %lld%s",SVN_REV, SVN_MODS ? "m" : ""); return libraryName; } EXPORT_C_(u32) PS2EgetLibType() { return PS2E_LT_CDVD; } EXPORT_C_(u32) CALLBACK PS2EgetLibVersion2(u32 type) { return (version << 16) | (revision << 8) | build; } EXPORT_C_(s32) CDVDinit() { return 0; } EXPORT_C_(s32) CDVDopen(const char* pTitle) { return 0; } EXPORT_C_(void) CDVDclose() { } EXPORT_C_(void) CDVDshutdown() { } EXPORT_C_(s32) CDVDreadTrack(u32 lsn, int mode) { return -1; } // return can be NULL (for async modes) EXPORT_C_(u8*) CDVDgetBuffer() { return NULL; } EXPORT_C_(s32) CDVDreadSubQ(u32 lsn, cdvdSubQ* subq) { return -1; } EXPORT_C_(s32) CDVDgetTN(cdvdTN *Buffer) { return -1; } EXPORT_C_(s32) CDVDgetTD(u8 Track, cdvdTD *Buffer) { return -1; } EXPORT_C_(s32) CDVDgetTOC(void* toc) { return -1; } EXPORT_C_(s32) CDVDgetDiskType() { return CDVD_TYPE_NODISC; } EXPORT_C_(s32) CDVDgetTrayStatus() { return CDVD_TRAY_CLOSE; } EXPORT_C_(s32) CDVDctrlTrayOpen() { return 0; } EXPORT_C_(s32) CDVDctrlTrayClose() { return 0; } EXPORT_C_(void) CDVDconfigure() { SysMessage("Nothing to Configure"); } EXPORT_C_(void) CDVDabout() { SysMessage("%s %d.%d", "CDVDnull Driver", revision, build); } EXPORT_C_(s32) CDVDtest() { return 0; }
gpl-2.0
VincentS/glibc
sysdeps/unix/sysv/linux/utimensat.c
19
1505
/* Change access and modification times of open file. Linux version. Copyright (C) 2007-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <sys/stat.h> #include <sysdep.h> /* Change the access time of FILE to TSP[0] and the modification time of FILE to TSP[1]. Starting with 2.6.22 the Linux kernel has the utimensat syscall. */ int utimensat (int fd, const char *file, const struct timespec tsp[2], int flags) { if (file == NULL) { __set_errno (EINVAL); return -1; } #ifdef __NR_utimensat /* Avoid implicit array coercion in syscall macros. */ return INLINE_SYSCALL (utimensat, 4, fd, file, &tsp[0], flags); #else __set_errno (ENOSYS); return -1; #endif } #ifndef __NR_utimensat stub_warning (utimensat) #endif
gpl-2.0
techno/gcc-mist32
gcc/targhooks.c
19
48436
/* Default target hook functions. Copyright (C) 2003-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* The migration of target macros to target hooks works as follows: 1. Create a target hook that uses the existing target macros to implement the same functionality. 2. Convert all the MI files to use the hook instead of the macro. 3. Repeat for a majority of the remaining target macros. This will take some time. 4. Tell target maintainers to start migrating. 5. Eventually convert the backends to override the hook instead of defining the macros. This will take some time too. 6. TBD when, poison the macros. Unmigrated targets will break at this point. Note that we expect steps 1-3 to be done by the people that understand what the MI does with each macro, and step 5 to be done by the target maintainers for their respective targets. Note that steps 1 and 2 don't have to be done together, but no target can override the new hook until step 2 is complete for it. Once the macros are poisoned, we will revert to the old migration rules - migrate the macro, callers, and targets all at once. This comment can thus be removed at that point. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "machmode.h" #include "rtl.h" #include "hash-set.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "stor-layout.h" #include "varasm.h" #include "hashtab.h" #include "hard-reg-set.h" #include "function.h" #include "flags.h" #include "statistics.h" #include "real.h" #include "fixed-value.h" #include "insn-config.h" #include "expmed.h" #include "dojump.h" #include "explow.h" #include "calls.h" #include "emit-rtl.h" #include "stmt.h" #include "expr.h" #include "output.h" #include "diagnostic-core.h" #include "target.h" #include "tm_p.h" #include "target-def.h" #include "regs.h" #include "reload.h" #include "insn-codes.h" #include "optabs.h" #include "recog.h" #include "intl.h" #include "opts.h" #include "tree-ssa-alias.h" #include "gimple-expr.h" #include "gimplify.h" #include "stringpool.h" #include "tree-ssanames.h" bool default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED, rtx addr ATTRIBUTE_UNUSED, bool strict ATTRIBUTE_UNUSED) { #ifdef GO_IF_LEGITIMATE_ADDRESS /* Defer to the old implementation using a goto. */ if (strict) return strict_memory_address_p (mode, addr); else return memory_address_p (mode, addr); #else gcc_unreachable (); #endif } void default_external_libcall (rtx fun ATTRIBUTE_UNUSED) { #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun); #endif } int default_unspec_may_trap_p (const_rtx x, unsigned flags) { int i; /* Any floating arithmetic may trap. */ if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)) return 1; for (i = 0; i < XVECLEN (x, 0); ++i) { if (may_trap_p_1 (XVECEXP (x, 0, i), flags)) return 1; } return 0; } machine_mode default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, machine_mode mode, int *punsignedp ATTRIBUTE_UNUSED, const_tree funtype ATTRIBUTE_UNUSED, int for_return ATTRIBUTE_UNUSED) { if (type != NULL_TREE && for_return == 2) return promote_mode (type, mode, punsignedp); return mode; } machine_mode default_promote_function_mode_always_promote (const_tree type, machine_mode mode, int *punsignedp, const_tree funtype ATTRIBUTE_UNUSED, int for_return ATTRIBUTE_UNUSED) { return promote_mode (type, mode, punsignedp); } machine_mode default_cc_modes_compatible (machine_mode m1, machine_mode m2) { if (m1 == m2) return m1; return VOIDmode; } bool default_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) { return (TYPE_MODE (type) == BLKmode); } rtx default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) { return x; } bool default_legitimize_address_displacement (rtx *disp ATTRIBUTE_UNUSED, rtx *offset ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) { return false; } rtx default_expand_builtin_saveregs (void) { error ("__builtin_saveregs not supported by this target"); return const0_rtx; } void default_setup_incoming_varargs (cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_arg_size ATTRIBUTE_UNUSED, int second_time ATTRIBUTE_UNUSED) { } /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */ rtx default_builtin_setjmp_frame_value (void) { return virtual_stack_vars_rtx; } /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */ bool hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED) { return false; } bool default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED) { return (targetm.calls.setup_incoming_varargs != default_setup_incoming_varargs); } machine_mode default_eh_return_filter_mode (void) { return targetm.unwind_word_mode (); } machine_mode default_libgcc_cmp_return_mode (void) { return word_mode; } machine_mode default_libgcc_shift_count_mode (void) { return word_mode; } machine_mode default_unwind_word_mode (void) { return word_mode; } /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK. */ unsigned HOST_WIDE_INT default_shift_truncation_mask (machine_mode mode) { return SHIFT_COUNT_TRUNCATED ? GET_MODE_BITSIZE (mode) - 1 : 0; } /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */ unsigned int default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED) { return have_insn_for (DIV, mode) ? 3 : 2; } /* The default implementation of TARGET_MODE_REP_EXTENDED. */ int default_mode_rep_extended (machine_mode mode ATTRIBUTE_UNUSED, machine_mode mode_rep ATTRIBUTE_UNUSED) { return UNKNOWN; } /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */ bool hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED) { return true; } /* Return machine mode for non-standard suffix or VOIDmode if non-standard suffixes are unsupported. */ machine_mode default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED) { return VOIDmode; } /* The generic C++ ABI specifies this is a 64-bit value. */ tree default_cxx_guard_type (void) { return long_long_integer_type_node; } /* Returns the size of the cookie to use when allocating an array whose elements have the indicated TYPE. Assumes that it is already known that a cookie is needed. */ tree default_cxx_get_cookie_size (tree type) { tree cookie_size; /* We need to allocate an additional max (sizeof (size_t), alignof (true_type)) bytes. */ tree sizetype_size; tree type_align; sizetype_size = size_in_bytes (sizetype); type_align = size_int (TYPE_ALIGN_UNIT (type)); if (tree_int_cst_lt (type_align, sizetype_size)) cookie_size = sizetype_size; else cookie_size = type_align; return cookie_size; } /* Return true if a parameter must be passed by reference. This version of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */ bool hook_pass_by_reference_must_pass_in_stack (cumulative_args_t c ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named_arg ATTRIBUTE_UNUSED) { return targetm.calls.must_pass_in_stack (mode, type); } /* Return true if a parameter follows callee copies conventions. This version of the hook is true for all named arguments. */ bool hook_callee_copies_named (cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named) { return named; } /* Emit to STREAM the assembler syntax for insn operand X. */ void default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, int code ATTRIBUTE_UNUSED) { #ifdef PRINT_OPERAND PRINT_OPERAND (stream, x, code); #else gcc_unreachable (); #endif } /* Emit to STREAM the assembler syntax for an insn operand whose memory address is X. */ void default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED) { #ifdef PRINT_OPERAND_ADDRESS PRINT_OPERAND_ADDRESS (stream, x); #else gcc_unreachable (); #endif } /* Return true if CODE is a valid punctuation character for the `print_operand' hook. */ bool default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED) { #ifdef PRINT_OPERAND_PUNCT_VALID_P return PRINT_OPERAND_PUNCT_VALID_P (code); #else return false; #endif } /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME. */ tree default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED) { const char *skipped = name + (*name == '*' ? 1 : 0); const char *stripped = targetm.strip_name_encoding (skipped); if (*name != '*' && user_label_prefix[0]) stripped = ACONCAT ((user_label_prefix, stripped, NULL)); return get_identifier (stripped); } /* True if MODE is valid for the target. By "valid", we mean able to be manipulated in non-trivial ways. In particular, this means all the arithmetic is supported. By default we guess this means that any C type is supported. If we can't map the mode back to a type that would be available in C, then reject it. Special case, here, is the double-word arithmetic supported by optabs.c. */ bool default_scalar_mode_supported_p (machine_mode mode) { int precision = GET_MODE_PRECISION (mode); switch (GET_MODE_CLASS (mode)) { case MODE_PARTIAL_INT: case MODE_INT: if (precision == CHAR_TYPE_SIZE) return true; if (precision == SHORT_TYPE_SIZE) return true; if (precision == INT_TYPE_SIZE) return true; if (precision == LONG_TYPE_SIZE) return true; if (precision == LONG_LONG_TYPE_SIZE) return true; if (precision == 2 * BITS_PER_WORD) return true; return false; case MODE_FLOAT: if (precision == FLOAT_TYPE_SIZE) return true; if (precision == DOUBLE_TYPE_SIZE) return true; if (precision == LONG_DOUBLE_TYPE_SIZE) return true; return false; case MODE_DECIMAL_FLOAT: case MODE_FRACT: case MODE_UFRACT: case MODE_ACCUM: case MODE_UACCUM: return false; default: gcc_unreachable (); } } /* Return true if libgcc supports floating-point mode MODE (known to be supported as a scalar mode). */ bool default_libgcc_floating_mode_supported_p (machine_mode mode) { switch (mode) { #ifdef HAVE_SFmode case SFmode: #endif #ifdef HAVE_DFmode case DFmode: #endif #ifdef HAVE_XFmode case XFmode: #endif #ifdef HAVE_TFmode case TFmode: #endif return true; default: return false; } } /* Make some target macros useable by target-independent code. */ bool targhook_words_big_endian (void) { return !!WORDS_BIG_ENDIAN; } bool targhook_float_words_big_endian (void) { return !!FLOAT_WORDS_BIG_ENDIAN; } /* True if the target supports floating-point exceptions and rounding modes. */ bool default_float_exceptions_rounding_supported_p (void) { #ifdef HAVE_adddf3 return HAVE_adddf3; #else return false; #endif } /* True if the target supports decimal floating point. */ bool default_decimal_float_supported_p (void) { return ENABLE_DECIMAL_FLOAT; } /* True if the target supports fixed-point arithmetic. */ bool default_fixed_point_supported_p (void) { return ENABLE_FIXED_POINT; } /* True if the target supports GNU indirect functions. */ bool default_has_ifunc_p (void) { return HAVE_GNU_INDIRECT_FUNCTION; } /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns an error message. This function checks whether a given INSN is valid within a low-overhead loop. If INSN is invalid it returns the reason for that, otherwise it returns NULL. A called function may clobber any special registers required for low-overhead looping. Additionally, some targets (eg, PPC) use the count register for branch on table instructions. We reject the doloop pattern in these cases. */ const char * default_invalid_within_doloop (const rtx_insn *insn) { if (CALL_P (insn)) return "Function call in loop."; if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn)) return "Computed branch in the loop."; return NULL; } /* Mapping of builtin functions to vectorized variants. */ tree default_builtin_vectorized_function (tree fndecl ATTRIBUTE_UNUSED, tree type_out ATTRIBUTE_UNUSED, tree type_in ATTRIBUTE_UNUSED) { return NULL_TREE; } /* Vectorized conversion. */ tree default_builtin_vectorized_conversion (unsigned int code ATTRIBUTE_UNUSED, tree dest_type ATTRIBUTE_UNUSED, tree src_type ATTRIBUTE_UNUSED) { return NULL_TREE; } /* Default vectorizer cost model values. */ int default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign ATTRIBUTE_UNUSED) { unsigned elements; switch (type_of_cost) { case scalar_stmt: case scalar_load: case scalar_store: case vector_stmt: case vector_load: case vector_store: case vec_to_scalar: case scalar_to_vec: case cond_branch_not_taken: case vec_perm: case vec_promote_demote: return 1; case unaligned_load: case unaligned_store: return 2; case cond_branch_taken: return 3; case vec_construct: elements = TYPE_VECTOR_SUBPARTS (vectype); return elements / 2 + 1; default: gcc_unreachable (); } } /* Reciprocal. */ tree default_builtin_reciprocal (unsigned int fn ATTRIBUTE_UNUSED, bool md_fn ATTRIBUTE_UNUSED, bool sqrt ATTRIBUTE_UNUSED) { return NULL_TREE; } bool hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false ( cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { return false; } bool hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true ( cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { return true; } int hook_int_CUMULATIVE_ARGS_mode_tree_bool_0 ( cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { return 0; } void default_function_arg_advance (cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { gcc_unreachable (); } rtx default_function_arg (cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { gcc_unreachable (); } rtx default_function_incoming_arg (cumulative_args_t ca ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { gcc_unreachable (); } unsigned int default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED) { return PARM_BOUNDARY; } unsigned int default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED) { return PARM_BOUNDARY; } void hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED) { } const char * hook_invalid_arg_for_unprototyped_fn ( const_tree typelist ATTRIBUTE_UNUSED, const_tree funcdecl ATTRIBUTE_UNUSED, const_tree val ATTRIBUTE_UNUSED) { return NULL; } /* Initialize the stack protection decls. */ /* Stack protection related decls living in libgcc. */ static GTY(()) tree stack_chk_guard_decl; tree default_stack_protect_guard (void) { tree t = stack_chk_guard_decl; if (t == NULL) { rtx x; t = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier ("__stack_chk_guard"), ptr_type_node); TREE_STATIC (t) = 1; TREE_PUBLIC (t) = 1; DECL_EXTERNAL (t) = 1; TREE_USED (t) = 1; TREE_THIS_VOLATILE (t) = 1; DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; /* Do not share RTL as the declaration is visible outside of current function. */ x = DECL_RTL (t); RTX_FLAG (x, used) = 1; stack_chk_guard_decl = t; } return t; } static GTY(()) tree stack_chk_fail_decl; tree default_external_stack_protect_fail (void) { tree t = stack_chk_fail_decl; if (t == NULL_TREE) { t = build_function_type_list (void_type_node, NULL_TREE); t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t); TREE_STATIC (t) = 1; TREE_PUBLIC (t) = 1; DECL_EXTERNAL (t) = 1; TREE_USED (t) = 1; TREE_THIS_VOLATILE (t) = 1; TREE_NOTHROW (t) = 1; DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_VISIBILITY (t) = VISIBILITY_DEFAULT; DECL_VISIBILITY_SPECIFIED (t) = 1; stack_chk_fail_decl = t; } return build_call_expr (t, 0); } tree default_hidden_stack_protect_fail (void) { #ifndef HAVE_GAS_HIDDEN return default_external_stack_protect_fail (); #else tree t = stack_chk_fail_decl; if (!flag_pic) return default_external_stack_protect_fail (); if (t == NULL_TREE) { t = build_function_type_list (void_type_node, NULL_TREE); t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, get_identifier ("__stack_chk_fail_local"), t); TREE_STATIC (t) = 1; TREE_PUBLIC (t) = 1; DECL_EXTERNAL (t) = 1; TREE_USED (t) = 1; TREE_THIS_VOLATILE (t) = 1; TREE_NOTHROW (t) = 1; DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_VISIBILITY_SPECIFIED (t) = 1; DECL_VISIBILITY (t) = VISIBILITY_HIDDEN; stack_chk_fail_decl = t; } return build_call_expr (t, 0); #endif } bool hook_bool_const_rtx_commutative_p (const_rtx x, int outer_code ATTRIBUTE_UNUSED) { return COMMUTATIVE_P (x); } rtx default_function_value (const_tree ret_type ATTRIBUTE_UNUSED, const_tree fn_decl_or_type, bool outgoing ATTRIBUTE_UNUSED) { /* The old interface doesn't handle receiving the function type. */ if (fn_decl_or_type && !DECL_P (fn_decl_or_type)) fn_decl_or_type = NULL; #ifdef FUNCTION_VALUE return FUNCTION_VALUE (ret_type, fn_decl_or_type); #else gcc_unreachable (); #endif } rtx default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED, const_rtx fun ATTRIBUTE_UNUSED) { #ifdef LIBCALL_VALUE return LIBCALL_VALUE (mode); #else gcc_unreachable (); #endif } /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P. */ bool default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED) { #ifdef FUNCTION_VALUE_REGNO_P return FUNCTION_VALUE_REGNO_P (regno); #else gcc_unreachable (); #endif } rtx default_internal_arg_pointer (void) { /* If the reg that the virtual arg pointer will be translated into is not a fixed reg or is the stack pointer, make a copy of the virtual arg pointer, and address parms via the copy. The frame pointer is considered fixed even though it is not marked as such. */ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM || ! (fixed_regs[ARG_POINTER_REGNUM] || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))) return copy_to_reg (virtual_incoming_args_rtx); else return virtual_incoming_args_rtx; } rtx default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p) { if (incoming_p) { #ifdef STATIC_CHAIN_INCOMING_REGNUM return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM); #endif } #ifdef STATIC_CHAIN_REGNUM return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); #endif { static bool issued_error; if (!issued_error) { issued_error = true; sorry ("nested functions not supported on this target"); } /* It really doesn't matter what we return here, so long at it doesn't cause the rest of the compiler to crash. */ return gen_rtx_MEM (Pmode, stack_pointer_rtx); } } void default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func), rtx ARG_UNUSED (r_chain)) { sorry ("nested function trampolines not supported on this target"); } int default_return_pops_args (tree fundecl ATTRIBUTE_UNUSED, tree funtype ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED) { return 0; } reg_class_t default_branch_target_register_class (void) { return NO_REGS; } extern bool default_lra_p (void) { return false; } int default_register_priority (int hard_regno ATTRIBUTE_UNUSED) { return 0; } extern bool default_register_usage_leveling_p (void) { return false; } extern bool default_different_addr_displacement_p (void) { return false; } reg_class_t default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, reg_class_t reload_class_i ATTRIBUTE_UNUSED, machine_mode reload_mode ATTRIBUTE_UNUSED, secondary_reload_info *sri) { enum reg_class rclass = NO_REGS; enum reg_class reload_class = (enum reg_class) reload_class_i; if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing) { sri->icode = sri->prev_sri->t_icode; return NO_REGS; } #ifdef SECONDARY_INPUT_RELOAD_CLASS if (in_p) rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class, reload_mode, x); #endif #ifdef SECONDARY_OUTPUT_RELOAD_CLASS if (! in_p) rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, reload_mode, x); #endif if (rclass != NO_REGS) { enum insn_code icode = direct_optab_handler (in_p ? reload_in_optab : reload_out_optab, reload_mode); if (icode != CODE_FOR_nothing && !insn_operand_matches (icode, in_p, x)) icode = CODE_FOR_nothing; else if (icode != CODE_FOR_nothing) { const char *insn_constraint, *scratch_constraint; enum reg_class insn_class, scratch_class; gcc_assert (insn_data[(int) icode].n_operands == 3); insn_constraint = insn_data[(int) icode].operand[!in_p].constraint; if (!*insn_constraint) insn_class = ALL_REGS; else { if (in_p) { gcc_assert (*insn_constraint == '='); insn_constraint++; } insn_class = (reg_class_for_constraint (lookup_constraint (insn_constraint))); gcc_assert (insn_class != NO_REGS); } scratch_constraint = insn_data[(int) icode].operand[2].constraint; /* The scratch register's constraint must start with "=&", except for an input reload, where only "=" is necessary, and where it might be beneficial to re-use registers from the input. */ gcc_assert (scratch_constraint[0] == '=' && (in_p || scratch_constraint[1] == '&')); scratch_constraint++; if (*scratch_constraint == '&') scratch_constraint++; scratch_class = (reg_class_for_constraint (lookup_constraint (scratch_constraint))); if (reg_class_subset_p (reload_class, insn_class)) { gcc_assert (scratch_class == rclass); rclass = NO_REGS; } else rclass = insn_class; } if (rclass == NO_REGS) sri->icode = icode; else sri->t_icode = icode; } return rclass; } /* By default, if flag_pic is true, then neither local nor global relocs should be placed in readonly memory. */ int default_reloc_rw_mask (void) { return flag_pic ? 3 : 0; } /* By default, do no modification. */ tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED, tree id) { return id; } /* Default to natural alignment for vector types. */ HOST_WIDE_INT default_vector_alignment (const_tree type) { return tree_to_shwi (TYPE_SIZE (type)); } bool default_builtin_vector_alignment_reachable (const_tree type, bool is_packed) { if (is_packed) return false; /* Assuming that types whose size is > pointer-size are not guaranteed to be naturally aligned. */ if (tree_int_cst_compare (TYPE_SIZE (type), bitsize_int (POINTER_SIZE)) > 0) return false; /* Assuming that types whose size is <= pointer-size are naturally aligned. */ return true; } /* By default, assume that a target supports any factor of misalignment memory access if it supports movmisalign patten. is_packed is true if the memory access is defined in a packed struct. */ bool default_builtin_support_vector_misalignment (machine_mode mode, const_tree type ATTRIBUTE_UNUSED, int misalignment ATTRIBUTE_UNUSED, bool is_packed ATTRIBUTE_UNUSED) { if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing) return true; return false; } /* By default, only attempt to parallelize bitwise operations, and possibly adds/subtracts using bit-twiddling. */ machine_mode default_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED) { return word_mode; } /* By default only the size derived from the preferred vector mode is tried. */ unsigned int default_autovectorize_vector_sizes (void) { return 0; } /* By default, the cost model accumulates three separate costs (prologue, loop body, and epilogue) for a vectorized loop or block. So allocate an array of three unsigned ints, set it to zero, and return its address. */ void * default_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED) { unsigned *cost = XNEWVEC (unsigned, 3); cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0; return cost; } /* By default, the cost model looks up the cost of the given statement kind and mode, multiplies it by the occurrence count, accumulates it into the cost specified by WHERE, and returns the cost added. */ unsigned default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, struct _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { unsigned *cost = (unsigned *) data; unsigned retval = 0; tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; int stmt_cost = targetm.vectorize.builtin_vectorization_cost (kind, vectype, misalign); /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) count *= 50; /* FIXME. */ retval = (unsigned) (count * stmt_cost); cost[where] += retval; return retval; } /* By default, the cost model just returns the accumulated costs. */ void default_finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { unsigned *cost = (unsigned *) data; *prologue_cost = cost[vect_prologue]; *body_cost = cost[vect_body]; *epilogue_cost = cost[vect_epilogue]; } /* Free the cost data. */ void default_destroy_cost_data (void *data) { free (data); } /* Determine whether or not a pointer mode is valid. Assume defaults of ptr_mode or Pmode - can be overridden. */ bool default_valid_pointer_mode (machine_mode mode) { return (mode == ptr_mode || mode == Pmode); } /* Determine whether the memory reference specified by REF may alias the C libraries errno location. */ bool default_ref_may_alias_errno (ao_ref *ref) { tree base = ao_ref_base (ref); /* The default implementation assumes the errno location is a declaration of type int or is always accessed via a pointer to int. We assume that accesses to errno are not deliberately obfuscated (even in conforming ways). */ if (TYPE_UNSIGNED (TREE_TYPE (base)) || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node)) return false; /* The default implementation assumes an errno location declaration is never defined in the current compilation unit. */ if (DECL_P (base) && !TREE_STATIC (base)) return true; else if (TREE_CODE (base) == MEM_REF && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)); return !pi || pi->pt.anything || pi->pt.nonlocal; } return false; } /* Return the mode for a pointer to a given ADDRSPACE, defaulting to ptr_mode for the generic address space only. */ machine_mode default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) { gcc_assert (ADDR_SPACE_GENERIC_P (addrspace)); return ptr_mode; } /* Return the mode for an address in a given ADDRSPACE, defaulting to Pmode for the generic address space only. */ machine_mode default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) { gcc_assert (ADDR_SPACE_GENERIC_P (addrspace)); return Pmode; } /* Named address space version of valid_pointer_mode. */ bool default_addr_space_valid_pointer_mode (machine_mode mode, addr_space_t as) { if (!ADDR_SPACE_GENERIC_P (as)) return (mode == targetm.addr_space.pointer_mode (as) || mode == targetm.addr_space.address_mode (as)); return targetm.valid_pointer_mode (mode); } /* Some places still assume that all pointer or address modes are the standard Pmode and ptr_mode. These optimizations become invalid if the target actually supports multiple different modes. For now, we disable such optimizations on such targets, using this function. */ bool target_default_pointer_address_modes_p (void) { if (targetm.addr_space.address_mode != default_addr_space_address_mode) return false; if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode) return false; return true; } /* Named address space version of legitimate_address_p. */ bool default_addr_space_legitimate_address_p (machine_mode mode, rtx mem, bool strict, addr_space_t as) { if (!ADDR_SPACE_GENERIC_P (as)) gcc_unreachable (); return targetm.legitimate_address_p (mode, mem, strict); } /* Named address space version of LEGITIMIZE_ADDRESS. */ rtx default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode, addr_space_t as) { if (!ADDR_SPACE_GENERIC_P (as)) return x; return targetm.legitimize_address (x, oldx, mode); } /* The default hook for determining if one named address space is a subset of another and to return which address space to use as the common address space. */ bool default_addr_space_subset_p (addr_space_t subset, addr_space_t superset) { return (subset == superset); } /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be called for targets with only a generic address space. */ rtx default_addr_space_convert (rtx op ATTRIBUTE_UNUSED, tree from_type ATTRIBUTE_UNUSED, tree to_type ATTRIBUTE_UNUSED) { gcc_unreachable (); } bool default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED) { return true; } /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P. */ bool default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED, addr_space_t addrspace ATTRIBUTE_UNUSED) { return false; } bool default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl), tree ARG_UNUSED (name), tree ARG_UNUSED (args), int ARG_UNUSED (flags)) { warning (OPT_Wattributes, "target attribute is not supported on this machine"); return false; } bool default_target_option_pragma_parse (tree ARG_UNUSED (args), tree ARG_UNUSED (pop_target)) { warning (OPT_Wpragmas, "#pragma GCC target is not supported for this machine"); return false; } bool default_target_can_inline_p (tree caller, tree callee) { bool ret = false; tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee); tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller); /* If callee has no option attributes, then it is ok to inline */ if (!callee_opts) ret = true; /* If caller has no option attributes, but callee does then it is not ok to inline */ else if (!caller_opts) ret = false; /* If both caller and callee have attributes, assume that if the pointer is different, the two functions have different target options since build_target_option_node uses a hash table for the options. */ else ret = (callee_opts == caller_opts); return ret; } #ifndef HAVE_casesi # define HAVE_casesi 0 #endif /* If the machine does not have a case insn that compares the bounds, this means extra overhead for dispatch tables, which raises the threshold for using them. */ unsigned int default_case_values_threshold (void) { return (HAVE_casesi ? 4 : 5); } bool default_have_conditional_execution (void) { #ifdef HAVE_conditional_execution return HAVE_conditional_execution; #else return false; #endif } /* By default we assume that c99 functions are present at the runtime, but sincos is not. */ bool default_libc_has_function (enum function_class fn_class) { if (fn_class == function_c94 || fn_class == function_c99_misc || fn_class == function_c99_math_complex) return true; return false; } bool gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED) { return true; } bool no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED) { return false; } tree default_builtin_tm_load_store (tree ARG_UNUSED (type)) { return NULL_TREE; } /* Compute cost of moving registers to/from memory. */ int default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED, reg_class_t rclass ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) { #ifndef MEMORY_MOVE_COST return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in)); #else return MEMORY_MOVE_COST (mode, (enum reg_class) rclass, in); #endif } /* Compute cost of moving data from a register of class FROM to one of TO, using MODE. */ int default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED, reg_class_t from ATTRIBUTE_UNUSED, reg_class_t to ATTRIBUTE_UNUSED) { #ifndef REGISTER_MOVE_COST return 2; #else return REGISTER_MOVE_COST (mode, (enum reg_class) from, (enum reg_class) to); #endif } /* For hooks which use the MOVE_RATIO macro, this gives the legacy default behaviour. SPEED_P is true if we are compiling for speed. */ unsigned int get_move_ratio (bool speed_p ATTRIBUTE_UNUSED) { unsigned int move_ratio; #ifdef MOVE_RATIO move_ratio = (unsigned int) MOVE_RATIO (speed_p); #else #if defined (HAVE_movmemqi) || defined (HAVE_movmemhi) || defined (HAVE_movmemsi) || defined (HAVE_movmemdi) || defined (HAVE_movmemti) move_ratio = 2; #else /* No movmem patterns, pick a default. */ move_ratio = ((speed_p) ? 15 : 3); #endif #endif return move_ratio; } /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be used; return FALSE if the movmem/setmem optab should be expanded, or a call to memcpy emitted. */ bool default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size, unsigned int alignment, enum by_pieces_operation op, bool speed_p) { unsigned int max_size = 0; unsigned int ratio = 0; switch (op) { case CLEAR_BY_PIECES: max_size = STORE_MAX_PIECES; ratio = CLEAR_RATIO (speed_p); break; case MOVE_BY_PIECES: max_size = MOVE_MAX_PIECES; ratio = get_move_ratio (speed_p); break; case SET_BY_PIECES: max_size = STORE_MAX_PIECES; ratio = SET_RATIO (speed_p); break; case STORE_BY_PIECES: max_size = STORE_MAX_PIECES; ratio = get_move_ratio (speed_p); break; } return move_by_pieces_ninsns (size, alignment, max_size + 1) < ratio; } bool default_profile_before_prologue (void) { #ifdef PROFILE_BEFORE_PROLOGUE return true; #else return false; #endif } /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS. */ reg_class_t default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass) { #ifdef PREFERRED_RELOAD_CLASS return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass); #else return rclass; #endif } /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS. */ reg_class_t default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass) { return rclass; } /* The default implementation of TARGET_PREFERRED_RENAME_CLASS. */ reg_class_t default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED) { return NO_REGS; } /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P. */ bool default_class_likely_spilled_p (reg_class_t rclass) { return (reg_class_size[(int) rclass] == 1); } /* The default implementation of TARGET_CLASS_MAX_NREGS. */ unsigned char default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) { #ifdef CLASS_MAX_NREGS return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass, mode); #else return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD); #endif } /* Determine the debugging unwind mechanism for the target. */ enum unwind_info_type default_debug_unwind_info (void) { /* If the target wants to force the use of dwarf2 unwind info, let it. */ /* ??? Change all users to the hook, then poison this. */ #ifdef DWARF2_FRAME_INFO if (DWARF2_FRAME_INFO) return UI_DWARF2; #endif /* Otherwise, only turn it on if dwarf2 debugging is enabled. */ #ifdef DWARF2_DEBUGGING_INFO if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) return UI_DWARF2; #endif return UI_NONE; } /* Determine the correct mode for a Dwarf frame register that represents register REGNO. */ machine_mode default_dwarf_frame_reg_mode (int regno) { machine_mode save_mode = reg_raw_mode[regno]; if (HARD_REGNO_CALL_PART_CLOBBERED (regno, save_mode)) save_mode = choose_hard_reg_mode (regno, 1, true); return save_mode; } /* To be used by targets where reg_raw_mode doesn't return the right mode for registers used in apply_builtin_return and apply_builtin_arg. */ machine_mode default_get_reg_raw_mode (int regno) { return reg_raw_mode[regno]; } /* Return true if a leaf function should stay leaf even with profiling enabled. */ bool default_keep_leaf_when_profiled () { return false; } /* Return true if the state of option OPTION should be stored in PCH files and checked by default_pch_valid_p. Store the option's current state in STATE if so. */ static inline bool option_affects_pch_p (int option, struct cl_option_state *state) { if ((cl_options[option].flags & CL_TARGET) == 0) return false; if ((cl_options[option].flags & CL_PCH_IGNORE) != 0) return false; if (option_flag_var (option, &global_options) == &target_flags) if (targetm.check_pch_target_flags) return false; return get_option_state (&global_options, option, state); } /* Default version of get_pch_validity. By default, every flag difference is fatal; that will be mostly right for most targets, but completely right for very few. */ void * default_get_pch_validity (size_t *sz) { struct cl_option_state state; size_t i; char *result, *r; *sz = 2; if (targetm.check_pch_target_flags) *sz += sizeof (target_flags); for (i = 0; i < cl_options_count; i++) if (option_affects_pch_p (i, &state)) *sz += state.size; result = r = XNEWVEC (char, *sz); r[0] = flag_pic; r[1] = flag_pie; r += 2; if (targetm.check_pch_target_flags) { memcpy (r, &target_flags, sizeof (target_flags)); r += sizeof (target_flags); } for (i = 0; i < cl_options_count; i++) if (option_affects_pch_p (i, &state)) { memcpy (r, state.data, state.size); r += state.size; } return result; } /* Return a message which says that a PCH file was created with a different setting of OPTION. */ static const char * pch_option_mismatch (const char *option) { return xasprintf (_("created and used with differing settings of '%s'"), option); } /* Default version of pch_valid_p. */ const char * default_pch_valid_p (const void *data_p, size_t len) { struct cl_option_state state; const char *data = (const char *)data_p; size_t i; /* -fpic and -fpie also usually make a PCH invalid. */ if (data[0] != flag_pic) return _("created and used with different settings of -fpic"); if (data[1] != flag_pie) return _("created and used with different settings of -fpie"); data += 2; /* Check target_flags. */ if (targetm.check_pch_target_flags) { int tf; const char *r; memcpy (&tf, data, sizeof (target_flags)); data += sizeof (target_flags); len -= sizeof (target_flags); r = targetm.check_pch_target_flags (tf); if (r != NULL) return r; } for (i = 0; i < cl_options_count; i++) if (option_affects_pch_p (i, &state)) { if (memcmp (data, state.data, state.size) != 0) return pch_option_mismatch (cl_options[i].opt_text); data += state.size; len -= state.size; } return NULL; } /* Default version of cstore_mode. */ machine_mode default_cstore_mode (enum insn_code icode) { return insn_data[(int) icode].operand[0].mode; } /* Default version of member_type_forces_blk. */ bool default_member_type_forces_blk (const_tree, machine_mode) { return false; } rtx default_load_bounds_for_arg (rtx addr ATTRIBUTE_UNUSED, rtx ptr ATTRIBUTE_UNUSED, rtx bnd ATTRIBUTE_UNUSED) { gcc_unreachable (); } void default_store_bounds_for_arg (rtx val ATTRIBUTE_UNUSED, rtx addr ATTRIBUTE_UNUSED, rtx bounds ATTRIBUTE_UNUSED, rtx to ATTRIBUTE_UNUSED) { gcc_unreachable (); } rtx default_load_returned_bounds (rtx slot ATTRIBUTE_UNUSED) { gcc_unreachable (); } void default_store_returned_bounds (rtx slot ATTRIBUTE_UNUSED, rtx bounds ATTRIBUTE_UNUSED) { gcc_unreachable (); } /* Default version of canonicalize_comparison. */ void default_canonicalize_comparison (int *, rtx *, rtx *, bool) { } /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ void default_atomic_assign_expand_fenv (tree *, tree *, tree *) { } #ifndef PAD_VARARGS_DOWN #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN #endif /* Build an indirect-ref expression over the given TREE, which represents a piece of a va_arg() expansion. */ tree build_va_arg_indirect_ref (tree addr) { addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr); return addr; } /* The "standard" implementation of va_arg: read the value from the current (padded) address and increment by the (padded) size. */ tree std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, gimple_seq *post_p) { tree addr, t, type_size, rounded_size, valist_tmp; unsigned HOST_WIDE_INT align, boundary; bool indirect; #ifdef ARGS_GROW_DOWNWARD /* All of the alignment and movement below is for args-grow-up machines. As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all implement their own specialized gimplify_va_arg_expr routines. */ gcc_unreachable (); #endif indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false); if (indirect) type = build_pointer_type (type); align = PARM_BOUNDARY / BITS_PER_UNIT; boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type); /* When we align parameter on stack for caller, if the parameter alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee here with caller. */ if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT) boundary = MAX_SUPPORTED_STACK_ALIGNMENT; boundary /= BITS_PER_UNIT; /* Hoist the valist value into a temporary for the moment. */ valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL); /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually requires greater alignment, we must perform dynamic alignment. */ if (boundary > align && !integer_zerop (TYPE_SIZE (type))) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, fold_build_pointer_plus_hwi (valist_tmp, boundary - 1)); gimplify_and_add (t, pre_p); t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist), valist_tmp, build_int_cst (TREE_TYPE (valist), -boundary))); gimplify_and_add (t, pre_p); } else boundary = align; /* If the actual alignment is less than the alignment of the type, adjust the type accordingly so that we don't assume strict alignment when dereferencing the pointer. */ boundary *= BITS_PER_UNIT; if (boundary < TYPE_ALIGN (type)) { type = build_variant_type_copy (type); TYPE_ALIGN (type) = boundary; } /* Compute the rounded size of the type. */ type_size = size_in_bytes (type); rounded_size = round_up (type_size, align); /* Reduce rounded_size so it's sharable with the postqueue. */ gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue); /* Get AP. */ addr = valist_tmp; if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size)) { /* Small args are padded downward. */ t = fold_build2_loc (input_location, GT_EXPR, sizetype, rounded_size, size_int (align)); t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node, size_binop (MINUS_EXPR, rounded_size, type_size)); addr = fold_build_pointer_plus (addr, t); } /* Compute new value for AP. */ t = fold_build_pointer_plus (valist_tmp, rounded_size); t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); gimplify_and_add (t, pre_p); addr = fold_convert (build_pointer_type (type), addr); if (indirect) addr = build_va_arg_indirect_ref (addr); return build_va_arg_indirect_ref (addr); } tree default_chkp_bound_type (void) { tree res = make_node (POINTER_BOUNDS_TYPE); TYPE_PRECISION (res) = TYPE_PRECISION (size_type_node) * 2; TYPE_NAME (res) = get_identifier ("__bounds_type"); SET_TYPE_MODE (res, targetm.chkp_bound_mode ()); layout_type (res); return res; } enum machine_mode default_chkp_bound_mode (void) { return VOIDmode; } tree default_builtin_chkp_function (unsigned int fcode ATTRIBUTE_UNUSED) { return NULL_TREE; } rtx default_chkp_function_value_bounds (const_tree ret_type ATTRIBUTE_UNUSED, const_tree fn_decl_or_type ATTRIBUTE_UNUSED, bool outgoing ATTRIBUTE_UNUSED) { gcc_unreachable (); } tree default_chkp_make_bounds_constant (HOST_WIDE_INT lb ATTRIBUTE_UNUSED, HOST_WIDE_INT ub ATTRIBUTE_UNUSED) { return NULL_TREE; } int default_chkp_initialize_bounds (tree var ATTRIBUTE_UNUSED, tree lb ATTRIBUTE_UNUSED, tree ub ATTRIBUTE_UNUSED, tree *stmts ATTRIBUTE_UNUSED) { return 0; } void default_setup_incoming_vararg_bounds (cumulative_args_t ca ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_arg_size ATTRIBUTE_UNUSED, int second_time ATTRIBUTE_UNUSED) { } /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do not support nested low-overhead loops. */ bool can_use_doloop_if_innermost (const widest_int &, const widest_int &, unsigned int loop_depth, bool) { return loop_depth == 1; } #include "gt-targhooks.h"
gpl-2.0
jwakely/gcc
gcc/testsuite/gfortran.dg/unlimited_polymorphic_20.f90
19
2396
! { dg-do run } ! ! Testing fix for PR fortran/60255 ! ! Author: Andre Vehreschild <vehre@gmx.de> ! MODULE m contains subroutine bar (arg, res) class(*) :: arg character(100) :: res select type (w => arg) type is (character(*)) write (res, '(I2)') len(w) end select end subroutine END MODULE program test use m; implicit none character(LEN=:), allocatable, target :: S character(LEN=100) :: res class(*), pointer :: ucp, ucp2 call sub1 ("long test string", 16) call sub2 () S = "test" ucp => S call sub3 (ucp) allocate (ucp2, source=ucp) call sub3 (ucp2) call sub4 (S, 4) call sub4 ("This is a longer string.", 24) call bar (S, res) if (trim (res) .NE. " 4") STOP 1 call bar(ucp, res) if (trim (res) .NE. " 4") STOP 2 contains subroutine sub1(dcl, ilen) character(len=*), target :: dcl integer(4) :: ilen character(len=:), allocatable :: hlp class(*), pointer :: ucp ucp => dcl select type (ucp) type is (character(len=*)) if (len(dcl) .NE. ilen) STOP 3 if (len(ucp) .NE. ilen) STOP 4 hlp = ucp if (len(hlp) .NE. ilen) STOP 5 class default STOP 6 end select end subroutine subroutine sub2 character(len=:), allocatable, target :: dcl class(*), pointer :: ucp dcl = "ttt" ucp => dcl select type (ucp) type is (character(len=*)) if (len(ucp) .ne. 3) STOP 7 class default STOP 8 end select end subroutine subroutine sub3(ucp) character(len=:), allocatable :: hlp class(*), pointer :: ucp select type (ucp) type is (character(len=*)) if (len(ucp) .ne. 4) STOP 9 hlp = ucp if (len(hlp) .ne. 4) STOP 10 class default STOP 11 end select end subroutine subroutine sub4(ucp, ilen) character(len=:), allocatable :: hlp integer(4) :: ilen class(*) :: ucp select type (ucp) type is (character(len=*)) if (len(ucp) .ne. ilen) STOP 12 hlp = ucp if (len(hlp) .ne. ilen) STOP 13 class default STOP 14 end select end subroutine end program
gpl-2.0
imoseyon/leanKernel-d2usc-deprecated
drivers/net/macvlan.c
531
21841
/* * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * The code this is based on carried the following copyright notice: * --- * (C) Copyright 2001-2006 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com * Re-worked by Ben Greear <greearb@candelatech.com> * --- */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/rculist.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <net/rtnetlink.h> #include <net/xfrm.h> #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct rcu_head rcu; bool passthru; int count; }; static void macvlan_port_destroy(struct net_device *dev); #define macvlan_port_get_rcu(dev) \ ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) #define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { struct macvlan_dev *vlan; struct hlist_node *n; hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; } static void macvlan_hash_add(struct macvlan_dev *vlan) { struct macvlan_port *port = vlan->port; const unsigned char *addr = vlan->dev->dev_addr; hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]); } static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) { hlist_del_rcu(&vlan->hlist); if (sync) synchronize_rcu(); } static void macvlan_hash_change_addr(struct macvlan_dev *vlan, const unsigned char *addr) { macvlan_hash_del(vlan, true); /* Now that we are unhashed it is safe to change the device * address without confusing packet delivery. */ memcpy(vlan->dev->dev_addr, addr, ETH_ALEN); macvlan_hash_add(vlan); } static int macvlan_addr_busy(const struct macvlan_port *port, const unsigned char *addr) { /* Test to see if the specified multicast address is * currently in use by the underlying device or * another macvlan. */ if (!compare_ether_addr_64bits(port->dev->dev_addr, addr)) return 1; if (macvlan_hash_lookup(port, addr)) return 1; return 0; } static int macvlan_broadcast_one(struct sk_buff *skb, const struct macvlan_dev *vlan, const struct ethhdr *eth, bool local) { struct net_device *dev = vlan->dev; if (!skb) return NET_RX_DROP; if (local) return vlan->forward(dev, skb); skb->dev = dev; if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; return vlan->receive(skb); } static void macvlan_broadcast(struct sk_buff *skb, const struct macvlan_port *port, struct net_device *src, enum macvlan_mode mode) { const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; struct hlist_node *n; struct sk_buff *nskb; unsigned int i; int err; if (skb->protocol == htons(ETH_P_PAUSE)) return; for (i = 0; i < MACVLAN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { if (vlan->dev == src || !(vlan->mode & mode)) continue; nskb = skb_clone(skb, GFP_ATOMIC); err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, 1); } } } /* called under rcu_read_lock() from netif_receive_skb */ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { struct macvlan_port *port; struct sk_buff *skb = *pskb; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; struct net_device *dev; unsigned int len = 0; int ret = NET_RX_DROP; port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { src = macvlan_hash_lookup(port, eth->h_source); if (!src) /* frame comes from an external address */ macvlan_broadcast(skb, port, NULL, MACVLAN_MODE_PRIVATE | MACVLAN_MODE_VEPA | MACVLAN_MODE_PASSTHRU| MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_VEPA) /* flood to everyone except source */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA | MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_BRIDGE) /* * flood only to VEPA ports, bridge ports * already saw the frame on the way out. */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); return RX_HANDLER_PASS; } if (port->passthru) vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) return RX_HANDLER_PASS; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; skb->dev = dev; skb->pkt_type = PACKET_HOST; ret = vlan->receive(skb); out: macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); return RX_HANDLER_CONSUMED; } static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; __u8 ip_summed = skb->ip_summed; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; skb->ip_summed = CHECKSUM_UNNECESSARY; /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } } xmit_world: skb->ip_summed = ip_summed; skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int len = skb->len; int ret; const struct macvlan_dev *vlan = netdev_priv(dev); ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct macvlan_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(vlan->pcpu_stats->tx_dropped); } return ret; } EXPORT_SYMBOL_GPL(macvlan_start_xmit); static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return dev_hard_header(skb, lowerdev, type, daddr, saddr ? : dev->dev_addr, len); } static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; int err; if (vlan->port->passthru) { dev_set_promiscuity(lowerdev, 1); goto hash_add; } err = -EBUSY; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; err = dev_uc_add(lowerdev, dev->dev_addr); if (err < 0) goto out; if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) goto del_unicast; } hash_add: macvlan_hash_add(vlan); return 0; del_unicast: dev_uc_del(lowerdev, dev->dev_addr); out: return err; } static int macvlan_stop(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (vlan->port->passthru) { dev_set_promiscuity(lowerdev, -1); goto hash_del; } dev_mc_unsync(lowerdev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); dev_uc_del(lowerdev, dev->dev_addr); hash_del: macvlan_hash_del(vlan, !dev->dismantle); return 0; } static int macvlan_set_mac_address(struct net_device *dev, void *p) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr->sa_data)) return -EBUSY; err = dev_uc_add(lowerdev, addr->sa_data); if (err) return err; dev_uc_del(lowerdev, dev->dev_addr); macvlan_hash_change_addr(vlan, addr->sa_data); } return 0; } static void macvlan_change_rx_flags(struct net_device *dev, int change) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); } static void macvlan_set_multicast_list(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); dev_mc_sync(vlan->lowerdev, dev); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) { struct macvlan_dev *vlan = netdev_priv(dev); if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu) return -EINVAL; dev->mtu = new_mtu; return 0; } /* * macvlan network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &macvlan_netdev_xmit_lock_key); } static void macvlan_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &macvlan_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); const struct net_device *lowerdev = vlan->lowerdev; dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= NETIF_F_LLTX; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats); if (!vlan->pcpu_stats) return -ENOMEM; return 0; } static void macvlan_uninit(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; free_percpu(vlan->pcpu_stats); port->count -= 1; if (!port->count) macvlan_port_destroy(port->dev); } static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->pcpu_stats) { struct macvlan_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_errors = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_bh(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* rx_errors & tx_dropped are u32, updated * without syncp protection. */ rx_errors += p->rx_errors; tx_dropped += p->tx_dropped; } stats->rx_errors = rx_errors; stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } return stats; } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { snprintf(drvinfo->driver, 32, "macvlan"); snprintf(drvinfo->version, 32, "0.1"); } static int macvlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); return dev_ethtool_get_settings(vlan->lowerdev, cmd); } static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = macvlan_ethtool_get_settings, .get_drvinfo = macvlan_ethtool_get_drvinfo, }; static const struct net_device_ops macvlan_netdev_ops = { .ndo_init = macvlan_init, .ndo_uninit = macvlan_uninit, .ndo_open = macvlan_open, .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_multicast_list = macvlan_set_multicast_list, .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, }; void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &macvlan_hard_header_ops, dev->ethtool_ops = &macvlan_ethtool_ops; } EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); dev->tx_queue_len = 0; } static int macvlan_port_create(struct net_device *dev) { struct macvlan_port *port; unsigned int i; int err; if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; port->passthru = false; port->dev = dev; INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); else dev->priv_flags |= IFF_MACVLAN_PORT; return err; } static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get(dev); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); kfree_rcu(port, rcu); } static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (data && data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: case MACVLAN_MODE_VEPA: case MACVLAN_MODE_BRIDGE: case MACVLAN_MODE_PASSTHRU: break; default: return -EINVAL; } } return 0; } int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], int (*receive)(struct sk_buff *skb), int (*forward)(struct net_device *dev, struct sk_buff *skb)) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port; struct net_device *lowerdev; int err; if (!tb[IFLA_LINK]) return -EINVAL; lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; /* When creating macvlans on top of other macvlans - use * the real device as the lowerdev. */ if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) { struct macvlan_dev *lowervlan = netdev_priv(lowerdev); lowerdev = lowervlan->lowerdev; } if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu; else if (dev->mtu > lowerdev->mtu) return -EINVAL; if (!tb[IFLA_ADDRESS]) random_ether_addr(dev->dev_addr); if (!macvlan_port_exists(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; } port = macvlan_port_get(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ if (port->passthru) return -EINVAL; vlan->lowerdev = lowerdev; vlan->dev = dev; vlan->port = port; vlan->receive = receive; vlan->forward = forward; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { if (port->count) return -EINVAL; port->passthru = true; memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); } port->count += 1; err = register_netdevice(dev); if (err < 0) goto destroy_port; list_add_tail(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; destroy_port: port->count -= 1; if (!port->count) macvlan_port_destroy(lowerdev); return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); static int macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { return macvlan_common_newlink(src_net, dev, tb, data, netif_rx, dev_forward_skb); } void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); list_del(&vlan->list); unregister_netdevice_queue(dev, head); } EXPORT_SYMBOL_GPL(macvlan_dellink); static int macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct macvlan_dev *vlan = netdev_priv(dev); if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); return 0; } static size_t macvlan_get_size(const struct net_device *dev) { return nla_total_size(4); } static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ ops->priv_size = sizeof(struct macvlan_dev); ops->validate = macvlan_validate; ops->maxtype = IFLA_MACVLAN_MAX; ops->policy = macvlan_policy; ops->changelink = macvlan_changelink; ops->get_size = macvlan_get_size; ops->fill_info = macvlan_fill_info; return rtnl_link_register(ops); }; EXPORT_SYMBOL_GPL(macvlan_link_register); static struct rtnl_link_ops macvlan_link_ops = { .kind = "macvlan", .setup = macvlan_setup, .newlink = macvlan_newlink, .dellink = macvlan_dellink, }; static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); if (!macvlan_port_exists(dev)) return NOTIFY_DONE; port = macvlan_port_get(dev); switch (event) { case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, vlan->dev); break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { vlan->dev->features = dev->features & MACVLAN_FEATURES; vlan->dev->gso_max_size = dev->gso_max_size; netdev_features_change(vlan->dev); } break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) break; list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); list_del(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; } return NOTIFY_DONE; } static struct notifier_block macvlan_notifier_block __read_mostly = { .notifier_call = macvlan_device_event, }; static int __init macvlan_init_module(void) { int err; register_netdevice_notifier(&macvlan_notifier_block); err = macvlan_link_register(&macvlan_link_ops); if (err < 0) goto err1; return 0; err1: unregister_netdevice_notifier(&macvlan_notifier_block); return err; } static void __exit macvlan_cleanup_module(void) { rtnl_link_unregister(&macvlan_link_ops); unregister_netdevice_notifier(&macvlan_notifier_block); } module_init(macvlan_init_module); module_exit(macvlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Driver for MAC address based VLANs"); MODULE_ALIAS_RTNL_LINK("macvlan");
gpl-2.0
xdje42/gdb
libiberty/clock.c
531
2622
/* ANSI-compatible clock function. Copyright (C) 1994, 1995, 1999 Free Software Foundation, Inc. This file is part of the libiberty library. This library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. As a special exception, if you link this library with files compiled with a GNU compiler to produce an executable, this does not cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ /* @deftypefn Supplemental long clock (void) Returns an approximation of the CPU time used by the process as a @code{clock_t}; divide this number by @samp{CLOCKS_PER_SEC} to get the number of seconds used. @end deftypefn */ #include "config.h" #ifdef HAVE_GETRUSAGE #include <sys/time.h> #include <sys/resource.h> #endif #ifdef HAVE_TIMES #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #include <sys/times.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef _SC_CLK_TCK #define GNU_HZ sysconf(_SC_CLK_TCK) #else #ifdef HZ #define GNU_HZ HZ #else #ifdef CLOCKS_PER_SEC #define GNU_HZ CLOCKS_PER_SEC #endif #endif #endif /* FIXME: should be able to declare as clock_t. */ long clock (void) { #ifdef HAVE_GETRUSAGE struct rusage rusage; getrusage (0, &rusage); return (rusage.ru_utime.tv_sec * 1000000 + rusage.ru_utime.tv_usec + rusage.ru_stime.tv_sec * 1000000 + rusage.ru_stime.tv_usec); #else #ifdef HAVE_TIMES struct tms tms; times (&tms); return (tms.tms_utime + tms.tms_stime) * (1000000 / GNU_HZ); #else #ifdef VMS struct { int proc_user_time; int proc_system_time; int child_user_time; int child_system_time; } vms_times; times (&vms_times); return (vms_times.proc_user_time + vms_times.proc_system_time) * 10000; #else /* A fallback, if nothing else available. */ return 0; #endif /* VMS */ #endif /* HAVE_TIMES */ #endif /* HAVE_GETRUSAGE */ }
gpl-2.0
aosm/gcc
libiberty/vsprintf.c
531
1878
/* Simple implementation of vsprintf for systems without it. Highly system-dependent, but should work on most "traditional" implementations of stdio; newer ones should already have vsprintf. Written by Per Bothner of Cygnus Support. Based on libg++'s "form" (written by Doug Lea; dl@rocky.oswego.edu). Copyright (C) 1991, 1995, 2002 Free Software Foundation, Inc. This file is part of the libiberty library. This library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. As a special exception, if you link this library with files compiled with a GNU compiler to produce an executable, this does not cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #include <ansidecl.h> #include <stdarg.h> #include <stdio.h> #undef vsprintf #if defined _IOSTRG && defined _IOWRT int vsprintf (char *buf, const char *format, va_list ap) { FILE b; int ret; #ifdef VMS b->_flag = _IOWRT|_IOSTRG; b->_ptr = buf; b->_cnt = 12000; #else b._flag = _IOWRT|_IOSTRG; b._ptr = buf; b._cnt = 12000; #endif ret = _doprnt(format, ap, &b); putc('\0', &b); return ret; } #endif
gpl-2.0
060411121/zcl_linux
drivers/acpi/sbshc.c
787
7733
/* * SMBus driver for ACPI Embedded Controller (v0.1) * * Copyright (c) 2007 Alexey Starikovskiy * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2. */ #include <linux/acpi.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/interrupt.h> #include "sbshc.h" #define PREFIX "ACPI: " #define ACPI_SMB_HC_CLASS "smbus_host_ctl" #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" struct acpi_smb_hc { struct acpi_ec *ec; struct mutex lock; wait_queue_head_t wait; u8 offset; u8 query_bit; smbus_alarm_callback callback; void *context; }; static int acpi_smbus_hc_add(struct acpi_device *device); static int acpi_smbus_hc_remove(struct acpi_device *device); static const struct acpi_device_id sbs_device_ids[] = { {"ACPI0001", 0}, {"ACPI0005", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, sbs_device_ids); static struct acpi_driver acpi_smb_hc_driver = { .name = "smbus_hc", .class = ACPI_SMB_HC_CLASS, .ids = sbs_device_ids, .ops = { .add = acpi_smbus_hc_add, .remove = acpi_smbus_hc_remove, }, }; union acpi_smb_status { u8 raw; struct { u8 status:5; u8 reserved:1; u8 alarm:1; u8 done:1; } fields; }; enum acpi_smb_status_codes { SMBUS_OK = 0, SMBUS_UNKNOWN_FAILURE = 0x07, SMBUS_DEVICE_ADDRESS_NACK = 0x10, SMBUS_DEVICE_ERROR = 0x11, SMBUS_DEVICE_COMMAND_ACCESS_DENIED = 0x12, SMBUS_UNKNOWN_ERROR = 0x13, SMBUS_DEVICE_ACCESS_DENIED = 0x17, SMBUS_TIMEOUT = 0x18, SMBUS_HOST_UNSUPPORTED_PROTOCOL = 0x19, SMBUS_BUSY = 0x1a, SMBUS_PEC_ERROR = 0x1f, }; enum acpi_smb_offset { ACPI_SMB_PROTOCOL = 0, /* protocol, PEC */ ACPI_SMB_STATUS = 1, /* status */ ACPI_SMB_ADDRESS = 2, /* address */ ACPI_SMB_COMMAND = 3, /* command */ ACPI_SMB_DATA = 4, /* 32 data registers */ ACPI_SMB_BLOCK_COUNT = 0x24, /* number of data bytes */ ACPI_SMB_ALARM_ADDRESS = 0x25, /* alarm address */ ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ }; static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) { return ec_read(hc->offset + address, data); } static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data) { return ec_write(hc->offset + address, data); } static inline int smb_check_done(struct acpi_smb_hc *hc) { union acpi_smb_status status = {.raw = 0}; smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); return status.fields.done && (status.fields.status == SMBUS_OK); } static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) { if (wait_event_timeout(hc->wait, smb_check_done(hc), msecs_to_jiffies(timeout))) return 0; /* * After the timeout happens, OS will try to check the status of SMbus. * If the status is what OS expected, it will be regarded as the bogus * timeout. */ if (smb_check_done(hc)) return 0; else return -ETIME; } static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 address, u8 command, u8 *data, u8 length) { int ret = -EFAULT, i; u8 temp, sz = 0; if (!hc) { printk(KERN_ERR PREFIX "host controller is not configured\n"); return ret; } mutex_lock(&hc->lock); if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) goto end; if (temp) { ret = -EBUSY; goto end; } smb_hc_write(hc, ACPI_SMB_COMMAND, command); if (!(protocol & 0x01)) { smb_hc_write(hc, ACPI_SMB_BLOCK_COUNT, length); for (i = 0; i < length; ++i) smb_hc_write(hc, ACPI_SMB_DATA + i, data[i]); } smb_hc_write(hc, ACPI_SMB_ADDRESS, address << 1); smb_hc_write(hc, ACPI_SMB_PROTOCOL, protocol); /* * Wait for completion. Save the status code, data size, * and data into the return package (if required by the protocol). */ ret = wait_transaction_complete(hc, 1000); if (ret || !(protocol & 0x01)) goto end; switch (protocol) { case SMBUS_RECEIVE_BYTE: case SMBUS_READ_BYTE: sz = 1; break; case SMBUS_READ_WORD: sz = 2; break; case SMBUS_READ_BLOCK: if (smb_hc_read(hc, ACPI_SMB_BLOCK_COUNT, &sz)) { ret = -EFAULT; goto end; } sz &= 0x1f; break; } for (i = 0; i < sz; ++i) smb_hc_read(hc, ACPI_SMB_DATA + i, &data[i]); end: mutex_unlock(&hc->lock); return ret; } int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address, u8 command, u8 *data) { return acpi_smbus_transaction(hc, protocol, address, command, data, 0); } EXPORT_SYMBOL_GPL(acpi_smbus_read); int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address, u8 command, u8 *data, u8 length) { return acpi_smbus_transaction(hc, protocol, address, command, data, length); } EXPORT_SYMBOL_GPL(acpi_smbus_write); int acpi_smbus_register_callback(struct acpi_smb_hc *hc, smbus_alarm_callback callback, void *context) { mutex_lock(&hc->lock); hc->callback = callback; hc->context = context; mutex_unlock(&hc->lock); return 0; } EXPORT_SYMBOL_GPL(acpi_smbus_register_callback); int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc) { mutex_lock(&hc->lock); hc->callback = NULL; hc->context = NULL; mutex_unlock(&hc->lock); return 0; } EXPORT_SYMBOL_GPL(acpi_smbus_unregister_callback); static inline void acpi_smbus_callback(void *context) { struct acpi_smb_hc *hc = context; if (hc->callback) hc->callback(hc->context); } static int smbus_alarm(void *context) { struct acpi_smb_hc *hc = context; union acpi_smb_status status; u8 address; if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) return 0; /* Check if it is only a completion notify */ if (status.fields.done) wake_up(&hc->wait); if (!status.fields.alarm) return 0; mutex_lock(&hc->lock); smb_hc_read(hc, ACPI_SMB_ALARM_ADDRESS, &address); status.fields.alarm = 0; smb_hc_write(hc, ACPI_SMB_STATUS, status.raw); /* We are only interested in events coming from known devices */ switch (address >> 1) { case ACPI_SBS_CHARGER: case ACPI_SBS_MANAGER: case ACPI_SBS_BATTERY: acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_smbus_callback, hc); default:; } mutex_unlock(&hc->lock); return 0; } typedef int (*acpi_ec_query_func) (void *data); extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); static int acpi_smbus_hc_add(struct acpi_device *device) { int status; unsigned long long val; struct acpi_smb_hc *hc; if (!device) return -EINVAL; status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "error obtaining _EC.\n"); return -EIO; } strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS); hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL); if (!hc) return -ENOMEM; mutex_init(&hc->lock); init_waitqueue_head(&hc->wait); hc->ec = acpi_driver_data(device->parent); hc->offset = (val >> 8) & 0xff; hc->query_bit = val & 0xff; device->driver_data = hc; acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", hc->ec, hc->offset, hc->query_bit); return 0; } extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); static int acpi_smbus_hc_remove(struct acpi_device *device) { struct acpi_smb_hc *hc; if (!device) return -EINVAL; hc = acpi_driver_data(device); acpi_ec_remove_query_handler(hc->ec, hc->query_bit); kfree(hc); device->driver_data = NULL; return 0; } module_acpi_driver(acpi_smb_hc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexey Starikovskiy"); MODULE_DESCRIPTION("ACPI SMBus HC driver");
gpl-2.0
joetang1989/linux-kernel4.2
tools/thermal/tmon/tmon.c
1299
9053
/* * tmon.c Thermal Monitor (TMON) main function and entry point * * Copyright (C) 2012 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 or later as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Jacob Pan <jacob.jun.pan@linux.intel.com> * */ #include <getopt.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <ncurses.h> #include <ctype.h> #include <time.h> #include <signal.h> #include <limits.h> #include <sys/time.h> #include <pthread.h> #include <math.h> #include <stdarg.h> #include <syslog.h> #include "tmon.h" unsigned long ticktime = 1; /* seconds */ unsigned long no_control = 1; /* monitoring only or use cooling device for * temperature control. */ double time_elapsed = 0.0; unsigned long target_temp_user = 65; /* can be select by tui later */ int dialogue_on; int tmon_exit; static short daemon_mode; static int logging; /* for recording thermal data to a file */ static int debug_on; FILE *tmon_log; /*cooling device used for the PID controller */ char ctrl_cdev[CDEV_NAME_SIZE] = "None"; int target_thermal_zone; /* user selected target zone instance */ static void start_daemon_mode(void); pthread_t event_tid; pthread_mutex_t input_lock; void usage() { printf("Usage: tmon [OPTION...]\n"); printf(" -c, --control cooling device in control\n"); printf(" -d, --daemon run as daemon, no TUI\n"); printf(" -g, --debug debug message in syslog\n"); printf(" -h, --help show this help message\n"); printf(" -l, --log log data to /var/tmp/tmon.log\n"); printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); printf(" -T, --target-temp initial target temperature\n"); printf(" -v, --version show version\n"); printf(" -z, --zone target thermal zone id\n"); exit(0); } void version() { printf("TMON version %s\n", VERSION); exit(EXIT_SUCCESS); } static void tmon_cleanup(void) { syslog(LOG_INFO, "TMON exit cleanup\n"); fflush(stdout); refresh(); if (tmon_log) fclose(tmon_log); if (event_tid) { pthread_mutex_lock(&input_lock); pthread_cancel(event_tid); pthread_mutex_unlock(&input_lock); pthread_mutex_destroy(&input_lock); } closelog(); /* relax control knobs, undo throttling */ set_ctrl_state(0); keypad(stdscr, FALSE); echo(); nocbreak(); close_windows(); endwin(); free_thermal_data(); exit(1); } static void tmon_sig_handler(int sig) { syslog(LOG_INFO, "TMON caught signal %d\n", sig); refresh(); switch (sig) { case SIGTERM: printf("sigterm, exit and clean up\n"); fflush(stdout); break; case SIGKILL: printf("sigkill, exit and clean up\n"); fflush(stdout); break; case SIGINT: printf("ctrl-c, exit and clean up\n"); fflush(stdout); break; default: break; } tmon_exit = true; } static void start_syslog(void) { if (debug_on) setlogmask(LOG_UPTO(LOG_DEBUG)); else setlogmask(LOG_UPTO(LOG_ERR)); openlog("tmon.log", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL0); syslog(LOG_NOTICE, "TMON started by User %d", getuid()); } static void prepare_logging(void) { int i; struct stat logstat; if (!logging) return; /* open local data log file */ tmon_log = fopen(TMON_LOG_FILE, "w+"); if (!tmon_log) { syslog(LOG_ERR, "failed to open log file %s\n", TMON_LOG_FILE); return; } if (lstat(TMON_LOG_FILE, &logstat) < 0) { syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE); fclose(tmon_log); tmon_log = NULL; return; } /* The log file must be a regular file owned by us */ if (S_ISLNK(logstat.st_mode)) { syslog(LOG_ERR, "Log file is a symlink. Will not log\n"); fclose(tmon_log); tmon_log = NULL; return; } if (logstat.st_uid != getuid()) { syslog(LOG_ERR, "We don't own the log file. Not logging\n"); fclose(tmon_log); tmon_log = NULL; return; } fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n"); for (i = 0; i < ptdata.nr_tz_sensor; i++) { char binding_str[33]; /* size of long + 1 */ int j; memset(binding_str, 0, sizeof(binding_str)); for (j = 0; j < 32; j++) binding_str[j] = (ptdata.tzi[i].cdev_binding & 1<<j) ? '1' : '0'; fprintf(tmon_log, "#thermal zone %s%02d cdevs binding: %32s\n", ptdata.tzi[i].type, ptdata.tzi[i].instance, binding_str); for (j = 0; j < ptdata.tzi[i].nr_trip_pts; j++) { fprintf(tmon_log, "#\tTP%02d type:%s, temp:%lu\n", j, trip_type_name[ptdata.tzi[i].tp[j].type], ptdata.tzi[i].tp[j].temp); } } for (i = 0; i < ptdata.nr_cooling_dev; i++) fprintf(tmon_log, "#cooling devices%02d: %s\n", i, ptdata.cdi[i].type); fprintf(tmon_log, "#---------- THERMAL DATA LOG STARTED -----------\n"); fprintf(tmon_log, "Samples TargetTemp "); for (i = 0; i < ptdata.nr_tz_sensor; i++) { fprintf(tmon_log, "%s%d ", ptdata.tzi[i].type, ptdata.tzi[i].instance); } for (i = 0; i < ptdata.nr_cooling_dev; i++) fprintf(tmon_log, "%s%d ", ptdata.cdi[i].type, ptdata.cdi[i].instance); fprintf(tmon_log, "\n"); } static struct option opts[] = { { "control", 1, NULL, 'c' }, { "daemon", 0, NULL, 'd' }, { "time-interval", 1, NULL, 't' }, { "target-temp", 1, NULL, 'T' }, { "log", 0, NULL, 'l' }, { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'v' }, { "debug", 0, NULL, 'g' }, { 0, 0, NULL, 0 } }; int main(int argc, char **argv) { int err = 0; int id2 = 0, c; double yk = 0.0, temp; /* controller output */ int target_tz_index; if (geteuid() != 0) { printf("TMON needs to be run as root\n"); exit(EXIT_FAILURE); } while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) { switch (c) { case 'c': no_control = 0; strncpy(ctrl_cdev, optarg, CDEV_NAME_SIZE); break; case 'd': start_daemon_mode(); printf("Run TMON in daemon mode\n"); break; case 't': ticktime = strtod(optarg, NULL); if (ticktime < 1) ticktime = 1; break; case 'T': temp = strtod(optarg, NULL); if (temp < 0) { fprintf(stderr, "error: temperature must be positive\n"); return 1; } target_temp_user = temp; break; case 'l': printf("Logging data to /var/tmp/tmon.log\n"); logging = 1; break; case 'h': usage(); break; case 'v': version(); break; case 'g': debug_on = 1; break; case 'z': target_thermal_zone = strtod(optarg, NULL); break; default: break; } } if (pthread_mutex_init(&input_lock, NULL) != 0) { fprintf(stderr, "\n mutex init failed, exit\n"); return 1; } start_syslog(); if (signal(SIGINT, tmon_sig_handler) == SIG_ERR) syslog(LOG_DEBUG, "Cannot handle SIGINT\n"); if (signal(SIGTERM, tmon_sig_handler) == SIG_ERR) syslog(LOG_DEBUG, "Cannot handle SIGINT\n"); if (probe_thermal_sysfs()) { pthread_mutex_destroy(&input_lock); closelog(); return -1; } initialize_curses(); setup_windows(); signal(SIGWINCH, resize_handler); show_title_bar(); show_sensors_w(); show_cooling_device(); update_thermal_data(); show_data_w(); prepare_logging(); init_thermal_controller(); nodelay(stdscr, TRUE); err = pthread_create(&event_tid, NULL, &handle_tui_events, NULL); if (err != 0) { printf("\ncan't create thread :[%s]", strerror(err)); tmon_cleanup(); exit(EXIT_FAILURE); } /* validate range of user selected target zone, default to the first * instance if out of range */ target_tz_index = zone_instance_to_index(target_thermal_zone); if (target_tz_index < 0) { target_thermal_zone = ptdata.tzi[0].instance; syslog(LOG_ERR, "target zone is not found, default to %d\n", target_thermal_zone); } while (1) { sleep(ticktime); show_title_bar(); show_sensors_w(); update_thermal_data(); if (!dialogue_on) { show_data_w(); show_cooling_device(); } cur_thermal_record++; time_elapsed += ticktime; controller_handler(trec[0].temp[target_tz_index] / 1000, &yk); trec[0].pid_out_pct = yk; if (!dialogue_on) show_control_w(); if (tmon_exit) break; } tmon_cleanup(); return 0; } static void start_daemon_mode() { daemon_mode = 1; /* fork */ pid_t sid, pid = fork(); if (pid < 0) { exit(EXIT_FAILURE); } else if (pid > 0) /* kill parent */ exit(EXIT_SUCCESS); /* disable TUI, it may not be necessary, but saves some resource */ disable_tui(); /* change the file mode mask */ umask(S_IWGRP | S_IWOTH); /* new SID for the daemon process */ sid = setsid(); if (sid < 0) exit(EXIT_FAILURE); /* change working directory */ if ((chdir("/")) < 0) exit(EXIT_FAILURE); sleep(10); close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); }
gpl-2.0
lukego/linux
tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
1299
1517
/* * Copyright 2014, Michael Ellerman, IBM Corp. * Licensed under GPLv2. */ #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #include <setjmp.h> #include <signal.h> #include "ebb.h" /* * Test that a fork clears the PMU state of the child. eg. BESCR/EBBHR/EBBRR * are cleared, and MMCR0_PMCC is reset, preventing the child from accessing * the PMU. */ static struct event event; static int child(void) { /* Even though we have EBE=0 we can still see the EBB regs */ FAIL_IF(mfspr(SPRN_BESCR) != 0); FAIL_IF(mfspr(SPRN_EBBHR) != 0); FAIL_IF(mfspr(SPRN_EBBRR) != 0); FAIL_IF(catch_sigill(write_pmc1)); /* We can still read from the event, though it is on our parent */ FAIL_IF(event_read(&event)); return 0; } /* Tests that fork clears EBB state */ int fork_cleanup(void) { pid_t pid; event_init_named(&event, 0x1001e, "cycles"); event_leader_ebb_init(&event); FAIL_IF(event_open(&event)); ebb_enable_pmc_counting(1); setup_ebb_handler(standard_ebb_callee); ebb_global_enable(); FAIL_IF(ebb_event_enable(&event)); mtspr(SPRN_MMCR0, MMCR0_FC); mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); /* Don't need to actually take any EBBs */ pid = fork(); if (pid == 0) exit(child()); /* Child does the actual testing */ FAIL_IF(wait_for_child(pid)); /* After fork */ event_close(&event); return 0; } int main(void) { return test_harness(fork_cleanup, "fork_cleanup"); }
gpl-2.0
regalstreak/S7262-Kernel
fs/binfmt_misc.c
1811
15557
/* * binfmt_misc.c * * Copyright (C) 1997 Richard Günther * * binfmt_misc detects binaries via a magic or filename extension and invokes * a specified wrapper. This should obsolete binfmt_java, binfmt_em86 and * binfmt_mz. * * 1997-04-25 first version * [...] * 1997-05-19 cleanup * 1997-06-26 hpa: pass the real filename rather than argv[0] * 1997-06-30 minor cleanup * 1997-08-09 removed extension stripping, locking cleanup * 2001-02-28 AV: rewritten into something that resembles C. Original didn't. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/binfmts.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/syscalls.h> #include <linux/fs.h> #include <asm/uaccess.h> enum { VERBOSE_STATUS = 1 /* make it zero to save 400 bytes kernel memory */ }; static LIST_HEAD(entries); static int enabled = 1; enum {Enabled, Magic}; #define MISC_FMT_PRESERVE_ARGV0 (1<<31) #define MISC_FMT_OPEN_BINARY (1<<30) #define MISC_FMT_CREDENTIALS (1<<29) typedef struct { struct list_head list; unsigned long flags; /* type, status, etc. */ int offset; /* offset of magic */ int size; /* size of magic/mask */ char *magic; /* magic or filename extension */ char *mask; /* mask, NULL for exact match */ char *interpreter; /* filename of interpreter */ char *name; struct dentry *dentry; } Node; static DEFINE_RWLOCK(entries_lock); static struct file_system_type bm_fs_type; static struct vfsmount *bm_mnt; static int entry_count; /* * Check if we support the binfmt * if we do, return the node, else NULL * locking is done in load_misc_binary */ static Node *check_file(struct linux_binprm *bprm) { char *p = strrchr(bprm->interp, '.'); struct list_head *l; list_for_each(l, &entries) { Node *e = list_entry(l, Node, list); char *s; int j; if (!test_bit(Enabled, &e->flags)) continue; if (!test_bit(Magic, &e->flags)) { if (p && !strcmp(e->magic, p + 1)) return e; continue; } s = bprm->buf + e->offset; if (e->mask) { for (j = 0; j < e->size; j++) if ((*s++ ^ e->magic[j]) & e->mask[j]) break; } else { for (j = 0; j < e->size; j++) if ((*s++ ^ e->magic[j])) break; } if (j == e->size) return e; } return NULL; } /* * the loader itself */ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) { Node *fmt; struct file * interp_file = NULL; char iname[BINPRM_BUF_SIZE]; const char *iname_addr = iname; int retval; int fd_binary = -1; retval = -ENOEXEC; if (!enabled) goto _ret; retval = -ENOEXEC; if (bprm->recursion_depth > BINPRM_MAX_RECURSION) goto _ret; /* to keep locking time low, we copy the interpreter string */ read_lock(&entries_lock); fmt = check_file(bprm); if (fmt) strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE); read_unlock(&entries_lock); if (!fmt) goto _ret; if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { retval = remove_arg_zero(bprm); if (retval) goto _ret; } if (fmt->flags & MISC_FMT_OPEN_BINARY) { /* if the binary should be opened on behalf of the * interpreter than keep it open and assign descriptor * to it */ fd_binary = get_unused_fd(); if (fd_binary < 0) { retval = fd_binary; goto _ret; } fd_install(fd_binary, bprm->file); /* if the binary is not readable than enforce mm->dumpable=0 regardless of the interpreter's permissions */ if (file_permission(bprm->file, MAY_READ)) bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; allow_write_access(bprm->file); bprm->file = NULL; /* mark the bprm that fd should be passed to interp */ bprm->interp_flags |= BINPRM_FLAGS_EXECFD; bprm->interp_data = fd_binary; } else { allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; } /* make argv[1] be the path to the binary */ retval = copy_strings_kernel (1, &bprm->interp, bprm); if (retval < 0) goto _error; bprm->argc++; /* add the interp as argv[0] */ retval = copy_strings_kernel (1, &iname_addr, bprm); if (retval < 0) goto _error; bprm->argc ++; bprm->interp = iname; /* for binfmt_script */ interp_file = open_exec (iname); retval = PTR_ERR (interp_file); if (IS_ERR (interp_file)) goto _error; bprm->file = interp_file; if (fmt->flags & MISC_FMT_CREDENTIALS) { /* * No need to call prepare_binprm(), it's already been * done. bprm->buf is stale, update from interp_file. */ memset(bprm->buf, 0, BINPRM_BUF_SIZE); retval = kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); } else retval = prepare_binprm (bprm); if (retval < 0) goto _error; bprm->recursion_depth++; retval = search_binary_handler (bprm, regs); if (retval < 0) goto _error; _ret: return retval; _error: if (fd_binary > 0) sys_close(fd_binary); bprm->interp_flags = 0; bprm->interp_data = 0; goto _ret; } /* Command parsers */ /* * parses and copies one argument enclosed in del from *sp to *dp, * recognising the \x special. * returns pointer to the copied argument or NULL in case of an * error (and sets err) or null argument length. */ static char *scanarg(char *s, char del) { char c; while ((c = *s++) != del) { if (c == '\\' && *s == 'x') { s++; if (!isxdigit(*s++)) return NULL; if (!isxdigit(*s++)) return NULL; } } return s; } static int unquote(char *from) { char c = 0, *s = from, *p = from; while ((c = *s++) != '\0') { if (c == '\\' && *s == 'x') { s++; c = toupper(*s++); *p = (c - (isdigit(c) ? '0' : 'A' - 10)) << 4; c = toupper(*s++); *p++ |= c - (isdigit(c) ? '0' : 'A' - 10); continue; } *p++ = c; } return p - from; } static char * check_special_flags (char * sfs, Node * e) { char * p = sfs; int cont = 1; /* special flags */ while (cont) { switch (*p) { case 'P': p++; e->flags |= MISC_FMT_PRESERVE_ARGV0; break; case 'O': p++; e->flags |= MISC_FMT_OPEN_BINARY; break; case 'C': p++; /* this flags also implies the open-binary flag */ e->flags |= (MISC_FMT_CREDENTIALS | MISC_FMT_OPEN_BINARY); break; default: cont = 0; } } return p; } /* * This registers a new binary format, it recognises the syntax * ':name:type:offset:magic:mask:interpreter:flags' * where the ':' is the IFS, that can be chosen with the first char */ static Node *create_entry(const char __user *buffer, size_t count) { Node *e; int memsize, err; char *buf, *p; char del; /* some sanity checks */ err = -EINVAL; if ((count < 11) || (count > 256)) goto out; err = -ENOMEM; memsize = sizeof(Node) + count + 8; e = kmalloc(memsize, GFP_USER); if (!e) goto out; p = buf = (char *)e + sizeof(Node); memset(e, 0, sizeof(Node)); if (copy_from_user(buf, buffer, count)) goto Efault; del = *p++; /* delimeter */ memset(buf+count, del, 8); e->name = p; p = strchr(p, del); if (!p) goto Einval; *p++ = '\0'; if (!e->name[0] || !strcmp(e->name, ".") || !strcmp(e->name, "..") || strchr(e->name, '/')) goto Einval; switch (*p++) { case 'E': e->flags = 1<<Enabled; break; case 'M': e->flags = (1<<Enabled) | (1<<Magic); break; default: goto Einval; } if (*p++ != del) goto Einval; if (test_bit(Magic, &e->flags)) { char *s = strchr(p, del); if (!s) goto Einval; *s++ = '\0'; e->offset = simple_strtoul(p, &p, 10); if (*p++) goto Einval; e->magic = p; p = scanarg(p, del); if (!p) goto Einval; p[-1] = '\0'; if (!e->magic[0]) goto Einval; e->mask = p; p = scanarg(p, del); if (!p) goto Einval; p[-1] = '\0'; if (!e->mask[0]) e->mask = NULL; e->size = unquote(e->magic); if (e->mask && unquote(e->mask) != e->size) goto Einval; if (e->size + e->offset > BINPRM_BUF_SIZE) goto Einval; } else { p = strchr(p, del); if (!p) goto Einval; *p++ = '\0'; e->magic = p; p = strchr(p, del); if (!p) goto Einval; *p++ = '\0'; if (!e->magic[0] || strchr(e->magic, '/')) goto Einval; p = strchr(p, del); if (!p) goto Einval; *p++ = '\0'; } e->interpreter = p; p = strchr(p, del); if (!p) goto Einval; *p++ = '\0'; if (!e->interpreter[0]) goto Einval; p = check_special_flags (p, e); if (*p == '\n') p++; if (p != buf + count) goto Einval; return e; out: return ERR_PTR(err); Efault: kfree(e); return ERR_PTR(-EFAULT); Einval: kfree(e); return ERR_PTR(-EINVAL); } /* * Set status of entry/binfmt_misc: * '1' enables, '0' disables and '-1' clears entry/binfmt_misc */ static int parse_command(const char __user *buffer, size_t count) { char s[4]; if (!count) return 0; if (count > 3) return -EINVAL; if (copy_from_user(s, buffer, count)) return -EFAULT; if (s[count-1] == '\n') count--; if (count == 1 && s[0] == '0') return 1; if (count == 1 && s[0] == '1') return 2; if (count == 2 && s[0] == '-' && s[1] == '1') return 3; return -EINVAL; } /* generic stuff */ static void entry_status(Node *e, char *page) { char *dp; char *status = "disabled"; const char * flags = "flags: "; if (test_bit(Enabled, &e->flags)) status = "enabled"; if (!VERBOSE_STATUS) { sprintf(page, "%s\n", status); return; } sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter); dp = page + strlen(page); /* print the special flags */ sprintf (dp, "%s", flags); dp += strlen (flags); if (e->flags & MISC_FMT_PRESERVE_ARGV0) { *dp ++ = 'P'; } if (e->flags & MISC_FMT_OPEN_BINARY) { *dp ++ = 'O'; } if (e->flags & MISC_FMT_CREDENTIALS) { *dp ++ = 'C'; } *dp ++ = '\n'; if (!test_bit(Magic, &e->flags)) { sprintf(dp, "extension .%s\n", e->magic); } else { int i; sprintf(dp, "offset %i\nmagic ", e->offset); dp = page + strlen(page); for (i = 0; i < e->size; i++) { sprintf(dp, "%02x", 0xff & (int) (e->magic[i])); dp += 2; } if (e->mask) { sprintf(dp, "\nmask "); dp += 6; for (i = 0; i < e->size; i++) { sprintf(dp, "%02x", 0xff & (int) (e->mask[i])); dp += 2; } } *dp++ = '\n'; *dp = '\0'; } } static struct inode *bm_get_inode(struct super_block *sb, int mode) { struct inode * inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); } return inode; } static void bm_evict_inode(struct inode *inode) { end_writeback(inode); kfree(inode->i_private); } static void kill_node(Node *e) { struct dentry *dentry; write_lock(&entries_lock); dentry = e->dentry; if (dentry) { list_del_init(&e->list); e->dentry = NULL; } write_unlock(&entries_lock); if (dentry) { dentry->d_inode->i_nlink--; d_drop(dentry); dput(dentry); simple_release_fs(&bm_mnt, &entry_count); } } /* /<entry> */ static ssize_t bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) { Node *e = file->f_path.dentry->d_inode->i_private; ssize_t res; char *page; if (!(page = (char*) __get_free_page(GFP_KERNEL))) return -ENOMEM; entry_status(e, page); res = simple_read_from_buffer(buf, nbytes, ppos, page, strlen(page)); free_page((unsigned long) page); return res; } static ssize_t bm_entry_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct dentry *root; Node *e = file->f_path.dentry->d_inode->i_private; int res = parse_command(buffer, count); switch (res) { case 1: clear_bit(Enabled, &e->flags); break; case 2: set_bit(Enabled, &e->flags); break; case 3: root = dget(file->f_path.mnt->mnt_sb->s_root); mutex_lock(&root->d_inode->i_mutex); kill_node(e); mutex_unlock(&root->d_inode->i_mutex); dput(root); break; default: return res; } return count; } static const struct file_operations bm_entry_operations = { .read = bm_entry_read, .write = bm_entry_write, .llseek = default_llseek, }; /* /register */ static ssize_t bm_register_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { Node *e; struct inode *inode; struct dentry *root, *dentry; struct super_block *sb = file->f_path.mnt->mnt_sb; int err = 0; e = create_entry(buffer, count); if (IS_ERR(e)) return PTR_ERR(e); root = dget(sb->s_root); mutex_lock(&root->d_inode->i_mutex); dentry = lookup_one_len(e->name, root, strlen(e->name)); err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out; err = -EEXIST; if (dentry->d_inode) goto out2; inode = bm_get_inode(sb, S_IFREG | 0644); err = -ENOMEM; if (!inode) goto out2; err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count); if (err) { iput(inode); inode = NULL; goto out2; } e->dentry = dget(dentry); inode->i_private = e; inode->i_fop = &bm_entry_operations; d_instantiate(dentry, inode); write_lock(&entries_lock); list_add(&e->list, &entries); write_unlock(&entries_lock); err = 0; out2: dput(dentry); out: mutex_unlock(&root->d_inode->i_mutex); dput(root); if (err) { kfree(e); return -EINVAL; } return count; } static const struct file_operations bm_register_operations = { .write = bm_register_write, .llseek = noop_llseek, }; /* /status */ static ssize_t bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { char *s = enabled ? "enabled\n" : "disabled\n"; return simple_read_from_buffer(buf, nbytes, ppos, s, strlen(s)); } static ssize_t bm_status_write(struct file * file, const char __user * buffer, size_t count, loff_t *ppos) { int res = parse_command(buffer, count); struct dentry *root; switch (res) { case 1: enabled = 0; break; case 2: enabled = 1; break; case 3: root = dget(file->f_path.mnt->mnt_sb->s_root); mutex_lock(&root->d_inode->i_mutex); while (!list_empty(&entries)) kill_node(list_entry(entries.next, Node, list)); mutex_unlock(&root->d_inode->i_mutex); dput(root); default: return res; } return count; } static const struct file_operations bm_status_operations = { .read = bm_status_read, .write = bm_status_write, .llseek = default_llseek, }; /* Superblock handling */ static const struct super_operations s_ops = { .statfs = simple_statfs, .evict_inode = bm_evict_inode, }; static int bm_fill_super(struct super_block * sb, void * data, int silent) { static struct tree_descr bm_files[] = { [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO}, [3] = {"register", &bm_register_operations, S_IWUSR}, /* last one */ {""} }; int err = simple_fill_super(sb, 0x42494e4d, bm_files); if (!err) sb->s_op = &s_ops; return err; } static struct dentry *bm_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, bm_fill_super); } static struct linux_binfmt misc_format = { .module = THIS_MODULE, .load_binary = load_misc_binary, }; static struct file_system_type bm_fs_type = { .owner = THIS_MODULE, .name = "binfmt_misc", .mount = bm_mount, .kill_sb = kill_litter_super, }; static int __init init_misc_binfmt(void) { int err = register_filesystem(&bm_fs_type); if (!err) { err = insert_binfmt(&misc_format); if (err) unregister_filesystem(&bm_fs_type); } return err; } static void __exit exit_misc_binfmt(void) { unregister_binfmt(&misc_format); unregister_filesystem(&bm_fs_type); } core_initcall(init_misc_binfmt); module_exit(exit_misc_binfmt); MODULE_LICENSE("GPL");
gpl-2.0
ion-storm/nexusplayer
fs/gfs2/rgrp.c
2067
64103
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/gfs2_ondisk.h> #include <linux/prefetch.h> #include <linux/blkdev.h> #include <linux/rbtree.h> #include <linux/random.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "glops.h" #include "lops.h" #include "meta_io.h" #include "quota.h" #include "rgrp.h" #include "super.h" #include "trans.h" #include "util.h" #include "log.h" #include "inode.h" #include "trace_gfs2.h" #define BFITNOENT ((u32)~0) #define NO_BLOCK ((u64)~0) #if BITS_PER_LONG == 32 #define LBITMASK (0x55555555UL) #define LBITSKIP55 (0x55555555UL) #define LBITSKIP00 (0x00000000UL) #else #define LBITMASK (0x5555555555555555UL) #define LBITSKIP55 (0x5555555555555555UL) #define LBITSKIP00 (0x0000000000000000UL) #endif /* * These routines are used by the resource group routines (rgrp.c) * to keep track of block allocation. Each block is represented by two * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. * * 0 = Free * 1 = Used (not metadata) * 2 = Unlinked (still in use) inode * 3 = Used (metadata) */ static const char valid_change[16] = { /* current */ /* n */ 0, 1, 1, 1, /* e */ 1, 0, 0, 0, /* w */ 0, 0, 0, 1, 1, 0, 0, 0 }; static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, const struct gfs2_inode *ip, bool nowrap); /** * gfs2_setbit - Set a bit in the bitmaps * @rbm: The position of the bit to set * @do_clone: Also set the clone bitmap, if it exists * @new_state: the new state of the block * */ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, unsigned char new_state) { unsigned char *byte1, *byte2, *end, cur_state; unsigned int buflen = rbm->bi->bi_len; const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; BUG_ON(byte1 >= end); cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; if (unlikely(!valid_change[new_state * 4 + cur_state])) { printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " "new_state=%d\n", rbm->offset, cur_state, new_state); printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", (unsigned long long)rbm->rgd->rd_addr, rbm->bi->bi_start); printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", rbm->bi->bi_offset, rbm->bi->bi_len); dump_stack(); gfs2_consist_rgrpd(rbm->rgd); return; } *byte1 ^= (cur_state ^ new_state) << bit; if (do_clone && rbm->bi->bi_clone) { byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; *byte2 ^= (cur_state ^ new_state) << bit; } } /** * gfs2_testbit - test a bit in the bitmaps * @rbm: The bit to test * * Returns: The two bit block state of the requested bit */ static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) { const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; const u8 *byte; unsigned int bit; byte = buffer + (rbm->offset / GFS2_NBBY); bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; return (*byte >> bit) & GFS2_BIT_MASK; } /** * gfs2_bit_search * @ptr: Pointer to bitmap data * @mask: Mask to use (normally 0x55555.... but adjusted for search start) * @state: The state we are searching for * * We xor the bitmap data with a patter which is the bitwise opposite * of what we are looking for, this gives rise to a pattern of ones * wherever there is a match. Since we have two bits per entry, we * take this pattern, shift it down by one place and then and it with * the original. All the even bit positions (0,2,4, etc) then represent * successful matches, so we mask with 0x55555..... to remove the unwanted * odd bit positions. * * This allows searching of a whole u64 at once (32 blocks) with a * single test (on 64 bit arches). */ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) { u64 tmp; static const u64 search[] = { [0] = 0xffffffffffffffffULL, [1] = 0xaaaaaaaaaaaaaaaaULL, [2] = 0x5555555555555555ULL, [3] = 0x0000000000000000ULL, }; tmp = le64_to_cpu(*ptr) ^ search[state]; tmp &= (tmp >> 1); tmp &= mask; return tmp; } /** * rs_cmp - multi-block reservation range compare * @blk: absolute file system block number of the new reservation * @len: number of blocks in the new reservation * @rs: existing reservation to compare against * * returns: 1 if the block range is beyond the reach of the reservation * -1 if the block range is before the start of the reservation * 0 if the block range overlaps with the reservation */ static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) { u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm); if (blk >= startblk + rs->rs_free) return 1; if (blk + len - 1 < startblk) return -1; return 0; } /** * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing * a block in a given allocation state. * @buf: the buffer that holds the bitmaps * @len: the length (in bytes) of the buffer * @goal: start search at this block's bit-pair (within @buffer) * @state: GFS2_BLKST_XXX the state of the block we're looking for. * * Scope of @goal and returned block number is only within this bitmap buffer, * not entire rgrp or filesystem. @buffer will be offset from the actual * beginning of a bitmap block buffer, skipping any header structures, but * headers are always a multiple of 64 bits long so that the buffer is * always aligned to a 64 bit boundary. * * The size of the buffer is in bytes, but is it assumed that it is * always ok to read a complete multiple of 64 bits at the end * of the block in case the end is no aligned to a natural boundary. * * Return: the block number (bitmap buffer scope) that was found */ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u32 goal, u8 state) { u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1); const __le64 *ptr = ((__le64 *)buf) + (goal >> 5); const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64))); u64 tmp; u64 mask = 0x5555555555555555ULL; u32 bit; /* Mask off bits we don't care about at the start of the search */ mask <<= spoint; tmp = gfs2_bit_search(ptr, mask, state); ptr++; while(tmp == 0 && ptr < end) { tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state); ptr++; } /* Mask off any bits which are more than len bytes from the start */ if (ptr == end && (len & (sizeof(u64) - 1))) tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1)))); /* Didn't find anything, so return */ if (tmp == 0) return BFITNOENT; ptr--; bit = __ffs64(tmp); bit /= 2; /* two bits per entry in the bitmap */ return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; } /** * gfs2_rbm_from_block - Set the rbm based upon rgd and block number * @rbm: The rbm with rgd already set correctly * @block: The block number (filesystem relative) * * This sets the bi and offset members of an rbm based on a * resource group and a filesystem relative block number. The * resource group must be set in the rbm on entry, the bi and * offset members will be set by this function. * * Returns: 0 on success, or an error code */ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) { u64 rblock = block - rbm->rgd->rd_data0; u32 x; if (WARN_ON_ONCE(rblock > UINT_MAX)) return -EINVAL; if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) return -E2BIG; rbm->bi = rbm->rgd->rd_bits; rbm->offset = (u32)(rblock); /* Check if the block is within the first block */ if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) return 0; /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ rbm->offset += (sizeof(struct gfs2_rgrp) - sizeof(struct gfs2_meta_header)) * GFS2_NBBY; x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; rbm->bi += x; return 0; } /** * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned * @rbm: Position to search (value/result) * @n_unaligned: Number of unaligned blocks to check * @len: Decremented for each block found (terminate on zero) * * Returns: true if a non-free block is encountered */ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) { u64 block; u32 n; u8 res; for (n = 0; n < n_unaligned; n++) { res = gfs2_testbit(rbm); if (res != GFS2_BLKST_FREE) return true; (*len)--; if (*len == 0) return true; block = gfs2_rbm_to_block(rbm); if (gfs2_rbm_from_block(rbm, block + 1)) return true; } return false; } /** * gfs2_free_extlen - Return extent length of free blocks * @rbm: Starting position * @len: Max length to check * * Starting at the block specified by the rbm, see how many free blocks * there are, not reading more than len blocks ahead. This can be done * using memchr_inv when the blocks are byte aligned, but has to be done * on a block by block basis in case of unaligned blocks. Also this * function can cope with bitmap boundaries (although it must stop on * a resource group boundary) * * Returns: Number of free blocks in the extent */ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len) { struct gfs2_rbm rbm = *rrbm; u32 n_unaligned = rbm.offset & 3; u32 size = len; u32 bytes; u32 chunk_size; u8 *ptr, *start, *end; u64 block; if (n_unaligned && gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) goto out; n_unaligned = len & 3; /* Start is now byte aligned */ while (len > 3) { start = rbm.bi->bi_bh->b_data; if (rbm.bi->bi_clone) start = rbm.bi->bi_clone; end = start + rbm.bi->bi_bh->b_size; start += rbm.bi->bi_offset; BUG_ON(rbm.offset & 3); start += (rbm.offset / GFS2_NBBY); bytes = min_t(u32, len / GFS2_NBBY, (end - start)); ptr = memchr_inv(start, 0, bytes); chunk_size = ((ptr == NULL) ? bytes : (ptr - start)); chunk_size *= GFS2_NBBY; BUG_ON(len < chunk_size); len -= chunk_size; block = gfs2_rbm_to_block(&rbm); if (gfs2_rbm_from_block(&rbm, block + chunk_size)) { n_unaligned = 0; break; } if (ptr) { n_unaligned = 3; break; } n_unaligned = len & 3; } /* Deal with any bits left over at the end */ if (n_unaligned) gfs2_unaligned_extlen(&rbm, n_unaligned, &len); out: return size - len; } /** * gfs2_bitcount - count the number of bits in a certain state * @rgd: the resource group descriptor * @buffer: the buffer that holds the bitmaps * @buflen: the length (in bytes) of the buffer * @state: the state of the block we're looking for * * Returns: The number of bits */ static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer, unsigned int buflen, u8 state) { const u8 *byte = buffer; const u8 *end = buffer + buflen; const u8 state1 = state << 2; const u8 state2 = state << 4; const u8 state3 = state << 6; u32 count = 0; for (; byte < end; byte++) { if (((*byte) & 0x03) == state) count++; if (((*byte) & 0x0C) == state1) count++; if (((*byte) & 0x30) == state2) count++; if (((*byte) & 0xC0) == state3) count++; } return count; } /** * gfs2_rgrp_verify - Verify that a resource group is consistent * @rgd: the rgrp * */ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi = NULL; u32 length = rgd->rd_length; u32 count[4], tmp; int buf, x; memset(count, 0, 4 * sizeof(u32)); /* Count # blocks in each of 4 possible allocation states */ for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; for (x = 0; x < 4; x++) count[x] += gfs2_bitcount(rgd, bi->bi_bh->b_data + bi->bi_offset, bi->bi_len, x); } if (count[0] != rgd->rd_free) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "free data mismatch: %u != %u\n", count[0], rgd->rd_free); return; } tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; if (count[1] != tmp) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used data mismatch: %u != %u\n", count[1], tmp); return; } if (count[2] + count[3] != rgd->rd_dinodes) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used metadata mismatch: %u != %u\n", count[2] + count[3], rgd->rd_dinodes); return; } } static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) { u64 first = rgd->rd_data0; u64 last = first + rgd->rd_data; return first <= block && block < last; } /** * gfs2_blk2rgrpd - Find resource group for a given data/meta block number * @sdp: The GFS2 superblock * @blk: The data block number * @exact: True if this needs to be an exact match * * Returns: The resource group, or NULL if not found */ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) { struct rb_node *n, *next; struct gfs2_rgrpd *cur; spin_lock(&sdp->sd_rindex_spin); n = sdp->sd_rindex_tree.rb_node; while (n) { cur = rb_entry(n, struct gfs2_rgrpd, rd_node); next = NULL; if (blk < cur->rd_addr) next = n->rb_left; else if (blk >= cur->rd_data0 + cur->rd_data) next = n->rb_right; if (next == NULL) { spin_unlock(&sdp->sd_rindex_spin); if (exact) { if (blk < cur->rd_addr) return NULL; if (blk >= cur->rd_data0 + cur->rd_data) return NULL; } return cur; } n = next; } spin_unlock(&sdp->sd_rindex_spin); return NULL; } /** * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem * @sdp: The GFS2 superblock * * Returns: The first rgrp in the filesystem */ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) { const struct rb_node *n; struct gfs2_rgrpd *rgd; spin_lock(&sdp->sd_rindex_spin); n = rb_first(&sdp->sd_rindex_tree); rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } /** * gfs2_rgrpd_get_next - get the next RG * @rgd: the resource group descriptor * * Returns: The next rgrp */ struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; const struct rb_node *n; spin_lock(&sdp->sd_rindex_spin); n = rb_next(&rgd->rd_node); if (n == NULL) n = rb_first(&sdp->sd_rindex_tree); if (unlikely(&rgd->rd_node == n)) { spin_unlock(&sdp->sd_rindex_spin); return NULL; } rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } void gfs2_free_clones(struct gfs2_rgrpd *rgd) { int x; for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; kfree(bi->bi_clone); bi->bi_clone = NULL; } } /** * gfs2_rs_alloc - make sure we have a reservation assigned to the inode * @ip: the inode for this reservation */ int gfs2_rs_alloc(struct gfs2_inode *ip) { int error = 0; down_write(&ip->i_rw_mutex); if (ip->i_res) goto out; ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); if (!ip->i_res) { error = -ENOMEM; goto out; } RB_CLEAR_NODE(&ip->i_res->rs_node); out: up_write(&ip->i_rw_mutex); return error; } static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) { gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n", (unsigned long long)rs->rs_inum, (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm), rs->rs_rbm.offset, rs->rs_free); } /** * __rs_deltree - remove a multi-block reservation from the rgd tree * @rs: The reservation to remove * */ static void __rs_deltree(struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; if (!gfs2_rs_active(rs)) return; rgd = rs->rs_rbm.rgd; trace_gfs2_rs(rs, TRACE_RS_TREEDEL); rb_erase(&rs->rs_node, &rgd->rd_rstree); RB_CLEAR_NODE(&rs->rs_node); if (rs->rs_free) { /* return reserved blocks to the rgrp */ BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; rs->rs_free = 0; clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); smp_mb__after_clear_bit(); } } /** * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree * @rs: The reservation to remove * */ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; rgd = rs->rs_rbm.rgd; if (rgd) { spin_lock(&rgd->rd_rsspin); __rs_deltree(rs); spin_unlock(&rgd->rd_rsspin); } } /** * gfs2_rs_delete - delete a multi-block reservation * @ip: The inode for this reservation * */ void gfs2_rs_delete(struct gfs2_inode *ip) { struct inode *inode = &ip->i_inode; down_write(&ip->i_rw_mutex); if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) { gfs2_rs_deltree(ip->i_res); BUG_ON(ip->i_res->rs_free); kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); ip->i_res = NULL; } up_write(&ip->i_rw_mutex); } /** * return_all_reservations - return all reserved blocks back to the rgrp. * @rgd: the rgrp that needs its space back * * We previously reserved a bunch of blocks for allocation. Now we need to * give them back. This leave the reservation structures in tact, but removes * all of their corresponding "no-fly zones". */ static void return_all_reservations(struct gfs2_rgrpd *rgd) { struct rb_node *n; struct gfs2_blkreserv *rs; spin_lock(&rgd->rd_rsspin); while ((n = rb_first(&rgd->rd_rstree))) { rs = rb_entry(n, struct gfs2_blkreserv, rs_node); __rs_deltree(rs); } spin_unlock(&rgd->rd_rsspin); } void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) { struct rb_node *n; struct gfs2_rgrpd *rgd; struct gfs2_glock *gl; while ((n = rb_first(&sdp->sd_rindex_tree))) { rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); gl = rgd->rd_gl; rb_erase(n, &sdp->sd_rindex_tree); if (gl) { spin_lock(&gl->gl_spin); gl->gl_object = NULL; spin_unlock(&gl->gl_spin); gfs2_glock_add_to_lru(gl); gfs2_glock_put(gl); } gfs2_free_clones(rgd); kfree(rgd->rd_bits); return_all_reservations(rgd); kmem_cache_free(gfs2_rgrpd_cachep, rgd); } } static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) { printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); printk(KERN_INFO " ri_length = %u\n", rgd->rd_length); printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); printk(KERN_INFO " ri_data = %u\n", rgd->rd_data); printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes); } /** * gfs2_compute_bitstructs - Compute the bitmap sizes * @rgd: The resource group descriptor * * Calculates bitmap descriptors, one for each block that contains bitmap data * * Returns: errno */ static int compute_bitstructs(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi; u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ u32 bytes_left, bytes; int x; if (!length) return -EINVAL; rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); if (!rgd->rd_bits) return -ENOMEM; bytes_left = rgd->rd_bitbytes; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; bi->bi_flags = 0; /* small rgrp; bitmap stored completely in header block */ if (length == 1) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_len = bytes; /* header block */ } else if (x == 0) { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_len = bytes; /* last block */ } else if (x + 1 == length) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_len = bytes; /* other blocks */ } else { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_len = bytes; } bytes_left -= bytes; } if (bytes_left) { gfs2_consist_rgrpd(rgd); return -EIO; } bi = rgd->rd_bits + (length - 1); if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) { if (gfs2_consist_rgrpd(rgd)) { gfs2_rindex_print(rgd); fs_err(sdp, "start=%u len=%u offset=%u\n", bi->bi_start, bi->bi_len, bi->bi_offset); } return -EIO; } return 0; } /** * gfs2_ri_total - Total up the file system space, according to the rindex. * @sdp: the filesystem * */ u64 gfs2_ri_total(struct gfs2_sbd *sdp) { u64 total_data = 0; struct inode *inode = sdp->sd_rindex; struct gfs2_inode *ip = GFS2_I(inode); char buf[sizeof(struct gfs2_rindex)]; int error, rgrps; for (rgrps = 0;; rgrps++) { loff_t pos = rgrps * sizeof(struct gfs2_rindex); if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) break; error = gfs2_internal_read(ip, buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) break; total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); } return total_data; } static int rgd_insert(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; /* Figure out where to put new node */ while (*newn) { struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node); parent = *newn; if (rgd->rd_addr < cur->rd_addr) newn = &((*newn)->rb_left); else if (rgd->rd_addr > cur->rd_addr) newn = &((*newn)->rb_right); else return -EEXIST; } rb_link_node(&rgd->rd_node, parent, newn); rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); sdp->sd_rgrps++; return 0; } /** * read_rindex_entry - Pull in a new resource index entry from the disk * @ip: Pointer to the rindex inode * * Returns: 0 on success, > 0 on EOF, error code otherwise */ static int read_rindex_entry(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); struct gfs2_rindex buf; int error; struct gfs2_rgrpd *rgd; if (pos >= i_size_read(&ip->i_inode)) return 1; error = gfs2_internal_read(ip, (char *)&buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) return (error == 0) ? 1 : error; rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); error = -ENOMEM; if (!rgd) return error; rgd->rd_sbd = sdp; rgd->rd_addr = be64_to_cpu(buf.ri_addr); rgd->rd_length = be32_to_cpu(buf.ri_length); rgd->rd_data0 = be64_to_cpu(buf.ri_data0); rgd->rd_data = be32_to_cpu(buf.ri_data); rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); spin_lock_init(&rgd->rd_rsspin); error = compute_bitstructs(rgd); if (error) goto fail; error = gfs2_glock_get(sdp, rgd->rd_addr, &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); if (error) goto fail; rgd->rd_gl->gl_object = rgd; rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; rgd->rd_flags &= ~GFS2_RDF_UPTODATE; if (rgd->rd_data > sdp->sd_max_rg_data) sdp->sd_max_rg_data = rgd->rd_data; spin_lock(&sdp->sd_rindex_spin); error = rgd_insert(rgd); spin_unlock(&sdp->sd_rindex_spin); if (!error) return 0; error = 0; /* someone else read in the rgrp; free it and ignore it */ gfs2_glock_put(rgd->rd_gl); fail: kfree(rgd->rd_bits); kmem_cache_free(gfs2_rgrpd_cachep, rgd); return error; } /** * gfs2_ri_update - Pull in a new resource index from the disk * @ip: pointer to the rindex inode * * Returns: 0 on successful update, error code otherwise */ static int gfs2_ri_update(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int error; do { error = read_rindex_entry(ip); } while (error == 0); if (error < 0) return error; sdp->sd_rindex_uptodate = 1; return 0; } /** * gfs2_rindex_update - Update the rindex if required * @sdp: The GFS2 superblock * * We grab a lock on the rindex inode to make sure that it doesn't * change whilst we are performing an operation. We keep this lock * for quite long periods of time compared to other locks. This * doesn't matter, since it is shared and it is very, very rarely * accessed in the exclusive mode (i.e. only when expanding the filesystem). * * This makes sure that we're using the latest copy of the resource index * special file, which might have been updated if someone expanded the * filesystem (via gfs2_grow utility), which adds new resource groups. * * Returns: 0 on succeess, error code otherwise */ int gfs2_rindex_update(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); struct gfs2_glock *gl = ip->i_gl; struct gfs2_holder ri_gh; int error = 0; int unlock_required = 0; /* Read new copy from disk if we don't have the latest */ if (!sdp->sd_rindex_uptodate) { if (!gfs2_glock_is_locked_by_me(gl)) { error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (error) return error; unlock_required = 1; } if (!sdp->sd_rindex_uptodate) error = gfs2_ri_update(ip); if (unlock_required) gfs2_glock_dq_uninit(&ri_gh); } return error; } static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) { const struct gfs2_rgrp *str = buf; u32 rg_flags; rg_flags = be32_to_cpu(str->rg_flags); rg_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; rgd->rd_flags |= rg_flags; rgd->rd_free = be32_to_cpu(str->rg_free); rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); } static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) { struct gfs2_rgrp *str = buf; str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK); str->rg_free = cpu_to_be32(rgd->rd_free); str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); str->__pad = cpu_to_be32(0); str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); } static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd) { struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data; if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free || rgl->rl_dinodes != str->rg_dinodes || rgl->rl_igeneration != str->rg_igeneration) return 0; return 1; } static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf) { const struct gfs2_rgrp *str = buf; rgl->rl_magic = cpu_to_be32(GFS2_MAGIC); rgl->rl_flags = str->rg_flags; rgl->rl_free = str->rg_free; rgl->rl_dinodes = str->rg_dinodes; rgl->rl_igeneration = str->rg_igeneration; rgl->__pad = 0UL; } static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change) { struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change; rgl->rl_unlinked = cpu_to_be32(unlinked); } static u32 count_unlinked(struct gfs2_rgrpd *rgd) { struct gfs2_bitmap *bi; const u32 length = rgd->rd_length; const u8 *buffer = NULL; u32 i, goal, count = 0; for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) { goal = 0; buffer = bi->bi_bh->b_data + bi->bi_offset; WARN_ON(!buffer_uptodate(bi->bi_bh)); while (goal < bi->bi_len * GFS2_NBBY) { goal = gfs2_bitfit(buffer, bi->bi_len, goal, GFS2_BLKST_UNLINKED); if (goal == BFITNOENT) break; count++; goal++; } } return count; } /** * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps * @rgd: the struct gfs2_rgrpd describing the RG to read in * * Read in all of a Resource Group's header and bitmap blocks. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. * * Returns: errno */ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl = rgd->rd_gl; unsigned int length = rgd->rd_length; struct gfs2_bitmap *bi; unsigned int x, y; int error; if (rgd->rd_bits[0].bi_bh != NULL) return 0; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); if (error) goto fail; } for (y = length; y--;) { bi = rgd->rd_bits + y; error = gfs2_meta_wait(sdp, bi->bi_bh); if (error) goto fail; if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : GFS2_METATYPE_RG)) { error = -EIO; goto fail; } } if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) { for (x = 0; x < length; x++) clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); rgd->rd_free_clone = rgd->rd_free; } if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd)); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); } else if (sdp->sd_args.ar_rgrplvb) { if (!gfs2_rgrp_lvb_valid(rgd)){ gfs2_consist_rgrpd(rgd); error = -EIO; goto fail; } if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; } return 0; fail: while (x--) { bi = rgd->rd_bits + x; brelse(bi->bi_bh); bi->bi_bh = NULL; gfs2_assert_warn(sdp, !bi->bi_clone); } return error; } int update_rgrp_lvb(struct gfs2_rgrpd *rgd) { u32 rl_flags; if (rgd->rd_flags & GFS2_RDF_UPTODATE) return 0; if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) return gfs2_rgrp_bh_get(rgd); rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); rl_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); rgd->rd_free_clone = rgd->rd_free; rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes); rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration); return 0; } int gfs2_rgrp_go_lock(struct gfs2_holder *gh) { struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; struct gfs2_sbd *sdp = rgd->rd_sbd; if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb) return 0; return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object); } /** * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() * @gh: The glock holder for the resource group * */ void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) { struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; int x, length = rgd->rd_length; for (x = 0; x < length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; if (bi->bi_bh) { brelse(bi->bi_bh); bi->bi_bh = NULL; } } } int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, struct buffer_head *bh, const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) { struct super_block *sb = sdp->sd_vfs; u64 blk; sector_t start = 0; sector_t nr_blks = 0; int rv; unsigned int x; u32 trimmed = 0; u8 diff; for (x = 0; x < bi->bi_len; x++) { const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; clone += bi->bi_offset; clone += x; if (bh) { const u8 *orig = bh->b_data + bi->bi_offset + x; diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); } else { diff = ~(*clone | (*clone >> 1)); } diff &= 0x55; if (diff == 0) continue; blk = offset + ((bi->bi_start + x) * GFS2_NBBY); while(diff) { if (diff & 1) { if (nr_blks == 0) goto start_new_extent; if ((start + nr_blks) != blk) { if (nr_blks >= minlen) { rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_blks; } nr_blks = 0; start_new_extent: start = blk; } nr_blks++; } diff >>= 2; blk++; } } if (nr_blks >= minlen) { rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_blks; } if (ptrimmed) *ptrimmed = trimmed; return 0; fail: if (sdp->sd_args.ar_discard) fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv); sdp->sd_args.ar_discard = 0; return -EIO; } /** * gfs2_fitrim - Generate discard requests for unused bits of the filesystem * @filp: Any file on the filesystem * @argp: Pointer to the arguments (also used to pass result) * * Returns: 0 on success, otherwise error code */ int gfs2_fitrim(struct file *filp, void __user *argp) { struct inode *inode = file_inode(filp); struct gfs2_sbd *sdp = GFS2_SB(inode); struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); struct buffer_head *bh; struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd_end; struct gfs2_holder gh; struct fstrim_range r; int ret = 0; u64 amt; u64 trimmed = 0; u64 start, end, minlen; unsigned int x; unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; ret = gfs2_rindex_update(sdp); if (ret) return ret; start = r.start >> bs_shift; end = start + (r.len >> bs_shift); minlen = max_t(u64, r.minlen, q->limits.discard_granularity) >> bs_shift; rgd = gfs2_blk2rgrpd(sdp, start, 0); rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); if (end <= start || minlen > sdp->sd_max_rg_data || start > rgd_end->rd_data0 + rgd_end->rd_data) return -EINVAL; while (1) { ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); if (ret) goto out; if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) { /* Trim each bitmap in the rgrp */ for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, minlen, &amt); if (ret) { gfs2_glock_dq_uninit(&gh); goto out; } trimmed += amt; } /* Mark rgrp as having been trimmed */ ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); if (ret == 0) { bh = rgd->rd_bits[0].bi_bh; rgd->rd_flags |= GFS2_RGF_TRIMMED; gfs2_trans_add_meta(rgd->rd_gl, bh); gfs2_rgrp_out(rgd, bh->b_data); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data); gfs2_trans_end(sdp); } } gfs2_glock_dq_uninit(&gh); if (rgd == rgd_end) break; rgd = gfs2_rgrpd_get_next(rgd); } out: r.len = trimmed << 9; if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; return ret; } /** * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree * @ip: the inode structure * */ static void rs_insert(struct gfs2_inode *ip) { struct rb_node **newn, *parent = NULL; int rc; struct gfs2_blkreserv *rs = ip->i_res; struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd; u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm); BUG_ON(gfs2_rs_active(rs)); spin_lock(&rgd->rd_rsspin); newn = &rgd->rd_rstree.rb_node; while (*newn) { struct gfs2_blkreserv *cur = rb_entry(*newn, struct gfs2_blkreserv, rs_node); parent = *newn; rc = rs_cmp(fsblock, rs->rs_free, cur); if (rc > 0) newn = &((*newn)->rb_right); else if (rc < 0) newn = &((*newn)->rb_left); else { spin_unlock(&rgd->rd_rsspin); WARN_ON(1); return; } } rb_link_node(&rs->rs_node, parent, newn); rb_insert_color(&rs->rs_node, &rgd->rd_rstree); /* Do our rgrp accounting for the reservation */ rgd->rd_reserved += rs->rs_free; /* blocks reserved */ spin_unlock(&rgd->rd_rsspin); trace_gfs2_rs(rs, TRACE_RS_INSERT); } /** * rg_mblk_search - find a group of multiple free blocks to form a reservation * @rgd: the resource group descriptor * @ip: pointer to the inode for which we're reserving blocks * @requested: number of blocks required for this allocation * */ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested) { struct gfs2_rbm rbm = { .rgd = rgd, }; u64 goal; struct gfs2_blkreserv *rs = ip->i_res; u32 extlen; u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved; int ret; struct inode *inode = &ip->i_inode; if (S_ISDIR(inode->i_mode)) extlen = 1; else { extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); } if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) return; /* Find bitmap block that contains bits for goal block */ if (rgrp_contains_block(rgd, ip->i_goal)) goal = ip->i_goal; else goal = rgd->rd_last_alloc + rgd->rd_data0; if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) return; ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true); if (ret == 0) { rs->rs_rbm = rbm; rs->rs_free = extlen; rs->rs_inum = ip->i_no_addr; rs_insert(ip); } else { if (goal == rgd->rd_last_alloc + rgd->rd_data0) rgd->rd_last_alloc = 0; } } /** * gfs2_next_unreserved_block - Return next block that is not reserved * @rgd: The resource group * @block: The starting block * @length: The required length * @ip: Ignore any reservations for this inode * * If the block does not appear in any reservation, then return the * block number unchanged. If it does appear in the reservation, then * keep looking through the tree of reservations in order to find the * first block number which is not reserved. */ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, u32 length, const struct gfs2_inode *ip) { struct gfs2_blkreserv *rs; struct rb_node *n; int rc; spin_lock(&rgd->rd_rsspin); n = rgd->rd_rstree.rb_node; while (n) { rs = rb_entry(n, struct gfs2_blkreserv, rs_node); rc = rs_cmp(block, length, rs); if (rc < 0) n = n->rb_left; else if (rc > 0) n = n->rb_right; else break; } if (n) { while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) { block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free; n = n->rb_right; if (n == NULL) break; rs = rb_entry(n, struct gfs2_blkreserv, rs_node); } } spin_unlock(&rgd->rd_rsspin); return block; } /** * gfs2_reservation_check_and_update - Check for reservations during block alloc * @rbm: The current position in the resource group * @ip: The inode for which we are searching for blocks * @minext: The minimum extent length * * This checks the current position in the rgrp to see whether there is * a reservation covering this block. If not then this function is a * no-op. If there is, then the position is moved to the end of the * contiguous reservation(s) so that we are pointing at the first * non-reserved block. * * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error */ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, const struct gfs2_inode *ip, u32 minext) { u64 block = gfs2_rbm_to_block(rbm); u32 extlen = 1; u64 nblock; int ret; /* * If we have a minimum extent length, then skip over any extent * which is less than the min extent length in size. */ if (minext) { extlen = gfs2_free_extlen(rbm, minext); nblock = block + extlen; if (extlen < minext) goto fail; } /* * Check the extent which has been found against the reservations * and skip if parts of it are already reserved */ nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); if (nblock == block) return 0; fail: ret = gfs2_rbm_from_block(rbm, nblock); if (ret < 0) return ret; return 1; } /** * gfs2_rbm_find - Look for blocks of a particular state * @rbm: Value/result starting position and final position * @state: The state which we want to find * @minext: The requested extent length (0 for a single block) * @ip: If set, check for reservations * @nowrap: Stop looking at the end of the rgrp, rather than wrapping * around until we've reached the starting point. * * Side effects: * - If looking for free blocks, we set GBF_FULL on each bitmap which * has no free blocks in it. * * Returns: 0 on success, -ENOSPC if there is no block of the requested state */ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, const struct gfs2_inode *ip, bool nowrap) { struct buffer_head *bh; struct gfs2_bitmap *initial_bi; u32 initial_offset; u32 offset; u8 *buffer; int index; int n = 0; int iters = rbm->rgd->rd_length; int ret; /* If we are not starting at the beginning of a bitmap, then we * need to add one to the bitmap count to ensure that we search * the starting bitmap twice. */ if (rbm->offset != 0) iters++; while(1) { if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && (state == GFS2_BLKST_FREE)) goto next_bitmap; bh = rbm->bi->bi_bh; buffer = bh->b_data + rbm->bi->bi_offset; WARN_ON(!buffer_uptodate(bh)); if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; initial_offset = rbm->offset; offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); if (offset == BFITNOENT) goto bitmap_full; rbm->offset = offset; if (ip == NULL) return 0; initial_bi = rbm->bi; ret = gfs2_reservation_check_and_update(rbm, ip, minext); if (ret == 0) return 0; if (ret > 0) { n += (rbm->bi - initial_bi); goto next_iter; } if (ret == -E2BIG) { index = 0; rbm->offset = 0; n += (rbm->bi - initial_bi); goto res_covered_end_of_rgrp; } return ret; bitmap_full: /* Mark bitmap as full and fall through */ if ((state == GFS2_BLKST_FREE) && initial_offset == 0) set_bit(GBF_FULL, &rbm->bi->bi_flags); next_bitmap: /* Find next bitmap in the rgrp */ rbm->offset = 0; index = rbm->bi - rbm->rgd->rd_bits; index++; if (index == rbm->rgd->rd_length) index = 0; res_covered_end_of_rgrp: rbm->bi = &rbm->rgd->rd_bits[index]; if ((index == 0) && nowrap) break; n++; next_iter: if (n >= iters) break; } return -ENOSPC; } /** * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes * @rgd: The rgrp * @last_unlinked: block address of the last dinode we unlinked * @skip: block address we should explicitly not unlink * * Returns: 0 if no error * The inode, if one has been found, in inode. */ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) { u64 block; struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl; struct gfs2_inode *ip; int error; int found = 0; struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; while (1) { down_write(&sdp->sd_log_flush_lock); error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true); up_write(&sdp->sd_log_flush_lock); if (error == -ENOSPC) break; if (WARN_ON_ONCE(error)) break; block = gfs2_rbm_to_block(&rbm); if (gfs2_rbm_from_block(&rbm, block + 1)) break; if (*last_unlinked != NO_BLOCK && block <= *last_unlinked) continue; if (block == skip) continue; *last_unlinked = block; error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl); if (error) continue; /* If the inode is already in cache, we can ignore it here * because the existing inode disposal code will deal with * it when all refs have gone away. Accessing gl_object like * this is not safe in general. Here it is ok because we do * not dereference the pointer, and we only need an approx * answer to whether it is NULL or not. */ ip = gl->gl_object; if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gfs2_glock_put(gl); else found++; /* Limit reclaim to sensible number of tasks */ if (found > NR_CPUS) return; } rgd->rd_flags &= ~GFS2_RDF_CHECK; return; } /** * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested * @rgd: The rgrp in question * @loops: An indication of how picky we can be (0=very, 1=less so) * * This function uses the recently added glock statistics in order to * figure out whether a parciular resource group is suffering from * contention from multiple nodes. This is done purely on the basis * of timings, since this is the only data we have to work with and * our aim here is to reject a resource group which is highly contended * but (very important) not to do this too often in order to ensure that * we do not land up introducing fragmentation by changing resource * groups when not actually required. * * The calculation is fairly simple, we want to know whether the SRTTB * (i.e. smoothed round trip time for blocking operations) to acquire * the lock for this rgrp's glock is significantly greater than the * time taken for resource groups on average. We introduce a margin in * the form of the variable @var which is computed as the sum of the two * respective variences, and multiplied by a factor depending on @loops * and whether we have a lot of data to base the decision on. This is * then tested against the square difference of the means in order to * decide whether the result is statistically significant or not. * * Returns: A boolean verdict on the congestion status */ static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) { const struct gfs2_glock *gl = rgd->rd_gl; const struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_lkstats *st; s64 r_dcount, l_dcount; s64 r_srttb, l_srttb; s64 srttb_diff; s64 sqr_diff; s64 var; preempt_disable(); st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP]; r_srttb = st->stats[GFS2_LKS_SRTTB]; r_dcount = st->stats[GFS2_LKS_DCOUNT]; var = st->stats[GFS2_LKS_SRTTVARB] + gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; preempt_enable(); l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0)) return false; srttb_diff = r_srttb - l_srttb; sqr_diff = srttb_diff * srttb_diff; var *= 2; if (l_dcount < 8 || r_dcount < 8) var *= 2; if (loops == 1) var *= 2; return ((srttb_diff < 0) && (sqr_diff > var)); } /** * gfs2_rgrp_used_recently * @rs: The block reservation with the rgrp to test * @msecs: The time limit in milliseconds * * Returns: True if the rgrp glock has been used within the time limit */ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, u64 msecs) { u64 tdiff; tdiff = ktime_to_ns(ktime_sub(ktime_get_real(), rs->rs_rbm.rgd->rd_gl->gl_dstamp)); return tdiff > (msecs * 1000 * 1000); } static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); u32 skip; get_random_bytes(&skip, sizeof(skip)); return skip % sdp->sd_rgrps; } static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) { struct gfs2_rgrpd *rgd = *pos; struct gfs2_sbd *sdp = rgd->rd_sbd; rgd = gfs2_rgrpd_get_next(rgd); if (rgd == NULL) rgd = gfs2_rgrpd_get_first(sdp); *pos = rgd; if (rgd != begin) /* If we didn't wrap */ return true; return false; } /** * gfs2_inplace_reserve - Reserve space in the filesystem * @ip: the inode to reserve space for * @requested: the number of blocks to be reserved * * Returns: errno */ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *begin = NULL; struct gfs2_blkreserv *rs = ip->i_res; int error = 0, rg_locked, flags = 0; u64 last_unlinked = NO_BLOCK; int loops = 0; u32 skip = 0; if (sdp->sd_args.ar_rgrplvb) flags |= GL_SKIP; if (gfs2_assert_warn(sdp, requested)) return -EINVAL; if (gfs2_rs_active(rs)) { begin = rs->rs_rbm.rgd; flags = 0; /* Yoda: Do or do not. There is no try */ } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { rs->rs_rbm.rgd = begin = ip->i_rgd; } else { rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); } if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV)) skip = gfs2_orlov_skip(ip); if (rs->rs_rbm.rgd == NULL) return -EBADSLT; while (loops < 3) { rg_locked = 1; if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) { rg_locked = 0; if (skip && skip--) goto next_rgrp; if (!gfs2_rs_active(rs) && (loops < 2) && gfs2_rgrp_used_recently(rs, 1000) && gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) goto next_rgrp; error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, LM_ST_EXCLUSIVE, flags, &rs->rs_rgd_gh); if (unlikely(error)) return error; if (!gfs2_rs_active(rs) && (loops < 2) && gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) goto skip_rgrp; if (sdp->sd_args.ar_rgrplvb) { error = update_rgrp_lvb(rs->rs_rbm.rgd); if (unlikely(error)) { gfs2_glock_dq_uninit(&rs->rs_rgd_gh); return error; } } } /* Skip unuseable resource groups */ if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) goto skip_rgrp; if (sdp->sd_args.ar_rgrplvb) gfs2_rgrp_bh_get(rs->rs_rbm.rgd); /* Get a reservation if we don't already have one */ if (!gfs2_rs_active(rs)) rg_mblk_search(rs->rs_rbm.rgd, ip, requested); /* Skip rgrps when we can't get a reservation on first pass */ if (!gfs2_rs_active(rs) && (loops < 1)) goto check_rgrp; /* If rgrp has enough free space, use it */ if (rs->rs_rbm.rgd->rd_free_clone >= requested) { ip->i_rgd = rs->rs_rbm.rgd; return 0; } /* Drop reservation, if we couldn't use reserved rgrp */ if (gfs2_rs_active(rs)) gfs2_rs_deltree(rs); check_rgrp: /* Check for unlinked inodes which can be reclaimed */ if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked, ip->i_no_addr); skip_rgrp: /* Unlock rgrp if required */ if (!rg_locked) gfs2_glock_dq_uninit(&rs->rs_rgd_gh); next_rgrp: /* Find the next rgrp, and continue looking */ if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) continue; if (skip) continue; /* If we've scanned all the rgrps, but found no free blocks * then this checks for some less likely conditions before * trying again. */ loops++; /* Check that fs hasn't grown if writing to rindex */ if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { error = gfs2_ri_update(ip); if (error) return error; } /* Flushing the log may release space */ if (loops == 2) gfs2_log_flush(sdp, NULL); } return -ENOSPC; } /** * gfs2_inplace_release - release an inplace reservation * @ip: the inode the reservation was taken out on * * Release a reservation made by gfs2_inplace_reserve(). */ void gfs2_inplace_release(struct gfs2_inode *ip) { struct gfs2_blkreserv *rs = ip->i_res; if (rs->rs_rgd_gh.gh_gl) gfs2_glock_dq_uninit(&rs->rs_rgd_gh); } /** * gfs2_get_block_type - Check a block in a RG is of given type * @rgd: the resource group holding the block * @block: the block number * * Returns: The block type (GFS2_BLKST_*) */ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) { struct gfs2_rbm rbm = { .rgd = rgd, }; int ret; ret = gfs2_rbm_from_block(&rbm, block); WARN_ON_ONCE(ret != 0); return gfs2_testbit(&rbm); } /** * gfs2_alloc_extent - allocate an extent from a given bitmap * @rbm: the resource group information * @dinode: TRUE if the first block we allocate is for a dinode * @n: The extent length (value/result) * * Add the bitmap buffer to the transaction. * Set the found bits to @new_state to change block's allocation state. */ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, unsigned int *n) { struct gfs2_rbm pos = { .rgd = rbm->rgd, }; const unsigned int elen = *n; u64 block; int ret; *n = 1; block = gfs2_rbm_to_block(rbm); gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh); gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); block++; while (*n < elen) { ret = gfs2_rbm_from_block(&pos, block); if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) break; gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh); gfs2_setbit(&pos, true, GFS2_BLKST_USED); (*n)++; block++; } } /** * rgblk_free - Change alloc state of given block(s) * @sdp: the filesystem * @bstart: the start of a run of blocks to free * @blen: the length of the block run (all must lie within ONE RG!) * @new_state: GFS2_BLKST_XXX the after-allocation block state * * Returns: Resource group containing the block(s) */ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, u32 blen, unsigned char new_state) { struct gfs2_rbm rbm; rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); if (!rbm.rgd) { if (gfs2_consist(sdp)) fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); return NULL; } while (blen--) { gfs2_rbm_from_block(&rbm, bstart); bstart++; if (!rbm.bi->bi_clone) { rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, GFP_NOFS | __GFP_NOFAIL); memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, rbm.bi->bi_len); } gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh); gfs2_setbit(&rbm, false, new_state); } return rbm.rgd; } /** * gfs2_rgrp_dump - print out an rgrp * @seq: The iterator * @gl: The glock in question * */ int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl) { struct gfs2_rgrpd *rgd = gl->gl_object; struct gfs2_blkreserv *trs; const struct rb_node *n; if (rgd == NULL) return 0; gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n", (unsigned long long)rgd->rd_addr, rgd->rd_flags, rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes, rgd->rd_reserved); spin_lock(&rgd->rd_rsspin); for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { trs = rb_entry(n, struct gfs2_blkreserv, rs_node); dump_rs(seq, trs); } spin_unlock(&rgd->rd_rsspin); return 0; } static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n", (unsigned long long)rgd->rd_addr); fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n"); gfs2_rgrp_dump(NULL, rgd->rd_gl); rgd->rd_flags |= GFS2_RDF_ERROR; } /** * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation * @ip: The inode we have just allocated blocks for * @rbm: The start of the allocated blocks * @len: The extent length * * Adjusts a reservation after an allocation has taken place. If the * reservation does not match the allocation, or if it is now empty * then it is removed. */ static void gfs2_adjust_reservation(struct gfs2_inode *ip, const struct gfs2_rbm *rbm, unsigned len) { struct gfs2_blkreserv *rs = ip->i_res; struct gfs2_rgrpd *rgd = rbm->rgd; unsigned rlen; u64 block; int ret; spin_lock(&rgd->rd_rsspin); if (gfs2_rs_active(rs)) { if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) { block = gfs2_rbm_to_block(rbm); ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len); rlen = min(rs->rs_free, len); rs->rs_free -= rlen; rgd->rd_reserved -= rlen; trace_gfs2_rs(rs, TRACE_RS_CLAIM); if (rs->rs_free && !ret) goto out; } __rs_deltree(rs); } out: spin_unlock(&rgd->rd_rsspin); } /** * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode * @ip: the inode to allocate the block for * @bn: Used to return the starting block number * @nblocks: requested number of blocks/extent length (value/result) * @dinode: 1 if we're allocating a dinode block, else 0 * @generation: the generation number of the inode * * Returns: 0 or error */ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, bool dinode, u64 *generation) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; unsigned int ndata; u64 goal; u64 block; /* block, within the file system scope */ int error; if (gfs2_rs_active(ip->i_res)) goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm); else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal)) goal = ip->i_goal; else goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0; gfs2_rbm_from_block(&rbm, goal); error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false); if (error == -ENOSPC) { gfs2_rbm_from_block(&rbm, goal); error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false); } /* Since all blocks are reserved in advance, this shouldn't happen */ if (error) { fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n", (unsigned long long)ip->i_no_addr, error, *nblocks, test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags)); goto rgrp_error; } gfs2_alloc_extent(&rbm, dinode, nblocks); block = gfs2_rbm_to_block(&rbm); rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; if (gfs2_rs_active(ip->i_res)) gfs2_adjust_reservation(ip, &rbm, *nblocks); ndata = *nblocks; if (dinode) ndata--; if (!dinode) { ip->i_goal = block + ndata - 1; error = gfs2_meta_inode_buffer(ip, &dibh); if (error == 0) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; gfs2_trans_add_meta(ip->i_gl, dibh); di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal); brelse(dibh); } } if (rbm.rgd->rd_free < *nblocks) { printk(KERN_WARNING "nblocks=%u\n", *nblocks); goto rgrp_error; } rbm.rgd->rd_free -= *nblocks; if (dinode) { rbm.rgd->rd_dinodes++; *generation = rbm.rgd->rd_igeneration++; if (*generation == 0) *generation = rbm.rgd->rd_igeneration++; } gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data); gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); if (dinode) gfs2_trans_add_unrevoke(sdp, block, 1); gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); rbm.rgd->rd_free_clone -= *nblocks; trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); *bn = block; return 0; rgrp_error: gfs2_rgrp_error(rbm.rgd); return -EIO; } /** * __gfs2_free_blocks - free a contiguous run of block(s) * @ip: the inode these blocks are being freed from * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * @meta: 1 if the blocks represent metadata * */ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); if (!rgd) return; trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); rgd->rd_free += blen; rgd->rd_flags &= ~GFS2_RGF_TRIMMED; gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); /* Directories keep their data in the metadata address space */ if (meta || ip->i_depth) gfs2_meta_wipe(ip, bstart, blen); } /** * gfs2_free_meta - free a contiguous run of data block(s) * @ip: the inode these blocks are being freed from * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * */ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); __gfs2_free_blocks(ip, bstart, blen, 1); gfs2_statfs_change(sdp, 0, +blen, 0); gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } void gfs2_unlink_di(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_rgrpd *rgd; u64 blkno = ip->i_no_addr; rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); if (!rgd) return; trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); update_rgrp_lvb_unlinked(rgd, 1); } static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_rgrpd *tmp_rgd; tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); if (!tmp_rgd) return; gfs2_assert_withdraw(sdp, rgd == tmp_rgd); if (!rgd->rd_dinodes) gfs2_consist_rgrpd(rgd); rgd->rd_dinodes--; rgd->rd_free++; gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); update_rgrp_lvb_unlinked(rgd, -1); gfs2_statfs_change(sdp, 0, +1, -1); } void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { gfs2_free_uninit_di(rgd, ip->i_no_addr); trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_meta_wipe(ip, ip->i_no_addr, 1); } /** * gfs2_check_blk_type - Check the type of a block * @sdp: The superblock * @no_addr: The block number to check * @type: The block type we are looking for * * Returns: 0 if the block type matches the expected type * -ESTALE if it doesn't match * or -ve errno if something went wrong while checking */ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) { struct gfs2_rgrpd *rgd; struct gfs2_holder rgd_gh; int error = -EINVAL; rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); if (!rgd) goto fail; error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); if (error) goto fail; if (gfs2_get_block_type(rgd, no_addr) != type) error = -ESTALE; gfs2_glock_dq_uninit(&rgd_gh); fail: return error; } /** * gfs2_rlist_add - add a RG to a list of RGs * @ip: the inode * @rlist: the list of resource groups * @block: the block * * Figure out what RG a block belongs to and add that RG to the list * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, u64 block) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; struct gfs2_rgrpd **tmp; unsigned int new_space; unsigned int x; if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) return; if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) rgd = ip->i_rgd; else rgd = gfs2_blk2rgrpd(sdp, block, 1); if (!rgd) { fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); return; } ip->i_rgd = rgd; for (x = 0; x < rlist->rl_rgrps; x++) if (rlist->rl_rgd[x] == rgd) return; if (rlist->rl_rgrps == rlist->rl_space) { new_space = rlist->rl_space + 10; tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), GFP_NOFS | __GFP_NOFAIL); if (rlist->rl_rgd) { memcpy(tmp, rlist->rl_rgd, rlist->rl_space * sizeof(struct gfs2_rgrpd *)); kfree(rlist->rl_rgd); } rlist->rl_space = new_space; rlist->rl_rgd = tmp; } rlist->rl_rgd[rlist->rl_rgrps++] = rgd; } /** * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate * and initialize an array of glock holders for them * @rlist: the list of resource groups * @state: the lock state to acquire the RG lock in * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) { unsigned int x; rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), GFP_NOFS | __GFP_NOFAIL); for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, 0, &rlist->rl_ghs[x]); } /** * gfs2_rlist_free - free a resource group list * @list: the list of resource groups * */ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) { unsigned int x; kfree(rlist->rl_rgd); if (rlist->rl_ghs) { for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_uninit(&rlist->rl_ghs[x]); kfree(rlist->rl_ghs); rlist->rl_ghs = NULL; } }
gpl-2.0
nadavitay/linux-3.14.1
drivers/mtd/ubi/misc.c
2067
4594
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* Here we keep miscellaneous functions which are used all over the UBI code */ #include "ubi.h" /** * calc_data_len - calculate how much real data is stored in a buffer. * @ubi: UBI device description object * @buf: a buffer with the contents of the physical eraseblock * @length: the buffer length * * This function calculates how much "real data" is stored in @buf and returnes * the length. Continuous 0xFF bytes at the end of the buffer are not * considered as "real data". */ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length) { int i; ubi_assert(!(length & (ubi->min_io_size - 1))); for (i = length - 1; i >= 0; i--) if (((const uint8_t *)buf)[i] != 0xFF) break; /* The resulting length must be aligned to the minimum flash I/O size */ length = ALIGN(i + 1, ubi->min_io_size); return length; } /** * ubi_check_volume - check the contents of a static volume. * @ubi: UBI device description object * @vol_id: ID of the volume to check * * This function checks if static volume @vol_id is corrupted by fully reading * it and checking data CRC. This function returns %0 if the volume is not * corrupted, %1 if it is corrupted and a negative error code in case of * failure. Dynamic volumes are not checked and zero is returned immediately. */ int ubi_check_volume(struct ubi_device *ubi, int vol_id) { void *buf; int err = 0, i; struct ubi_volume *vol = ubi->volumes[vol_id]; if (vol->vol_type != UBI_STATIC_VOLUME) return 0; buf = vmalloc(vol->usable_leb_size); if (!buf) return -ENOMEM; for (i = 0; i < vol->used_ebs; i++) { int size; if (i == vol->used_ebs - 1) size = vol->last_eb_bytes; else size = vol->usable_leb_size; err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); if (err) { if (mtd_is_eccerr(err)) err = 1; break; } } vfree(buf); return err; } /** * ubi_update_reserved - update bad eraseblock handling accounting data. * @ubi: UBI device description object * * This function calculates the gap between current number of PEBs reserved for * bad eraseblock handling and the required level of PEBs that must be * reserved, and if necessary, reserves more PEBs to fill that gap, according * to availability. Should be called with ubi->volumes_lock held. */ void ubi_update_reserved(struct ubi_device *ubi) { int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; if (need <= 0 || ubi->avail_pebs == 0) return; need = min_t(int, need, ubi->avail_pebs); ubi->avail_pebs -= need; ubi->rsvd_pebs += need; ubi->beb_rsvd_pebs += need; ubi_msg("reserved more %d PEBs for bad PEB handling", need); } /** * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad * eraseblock handling. * @ubi: UBI device description object */ void ubi_calculate_reserved(struct ubi_device *ubi) { /* * Calculate the actual number of PEBs currently needed to be reserved * for future bad eraseblock handling. */ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count; if (ubi->beb_rsvd_level < 0) { ubi->beb_rsvd_level = 0; ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)", ubi->bad_peb_count, ubi->bad_peb_limit); } } /** * ubi_check_pattern - check if buffer contains only a certain byte pattern. * @buf: buffer to check * @patt: the pattern to check * @size: buffer size in bytes * * This function returns %1 in there are only @patt bytes in @buf, and %0 if * something else was also found. */ int ubi_check_pattern(const void *buf, uint8_t patt, int size) { int i; for (i = 0; i < size; i++) if (((const uint8_t *)buf)[i] != patt) return 0; return 1; }
gpl-2.0
GeyerA/kernel_shamu
drivers/leds/leds-da9052.c
2323
5185
/* * LED Driver for Dialog DA9052 PMICs. * * Copyright(c) 2012 Dialog Semiconductor Ltd. * * Author: David Dajun Chen <dchen@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/mfd/da9052/reg.h> #include <linux/mfd/da9052/da9052.h> #include <linux/mfd/da9052/pdata.h> #define DA9052_OPENDRAIN_OUTPUT 2 #define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3) #define DA9052_MASK_UPPER_NIBBLE 0xF0 #define DA9052_MASK_LOWER_NIBBLE 0x0F #define DA9052_NIBBLE_SHIFT 4 #define DA9052_MAX_BRIGHTNESS 0x5f struct da9052_led { struct led_classdev cdev; struct work_struct work; struct da9052 *da9052; unsigned char led_index; unsigned char id; int brightness; }; static unsigned char led_reg[] = { DA9052_LED_CONT_4_REG, DA9052_LED_CONT_5_REG, }; static int da9052_set_led_brightness(struct da9052_led *led) { u8 val; int error; val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM; error = da9052_reg_write(led->da9052, led_reg[led->led_index], val); if (error < 0) dev_err(led->da9052->dev, "Failed to set led brightness, %d\n", error); return error; } static void da9052_led_work(struct work_struct *work) { struct da9052_led *led = container_of(work, struct da9052_led, work); da9052_set_led_brightness(led); } static void da9052_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct da9052_led *led; led = container_of(led_cdev, struct da9052_led, cdev); led->brightness = value; schedule_work(&led->work); } static int da9052_configure_leds(struct da9052 *da9052) { int error; unsigned char register_value = DA9052_OPENDRAIN_OUTPUT | DA9052_SET_HIGH_LVL_OUTPUT; error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG, DA9052_MASK_LOWER_NIBBLE, register_value); if (error < 0) { dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n", error); return error; } error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG, DA9052_MASK_UPPER_NIBBLE, register_value << DA9052_NIBBLE_SHIFT); if (error < 0) dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n", error); return error; } static int da9052_led_probe(struct platform_device *pdev) { struct da9052_pdata *pdata; struct da9052 *da9052; struct led_platform_data *pled; struct da9052_led *led = NULL; int error = -ENODEV; int i; da9052 = dev_get_drvdata(pdev->dev.parent); pdata = da9052->dev->platform_data; if (pdata == NULL) { dev_err(&pdev->dev, "No platform data\n"); goto err; } pled = pdata->pled; if (pled == NULL) { dev_err(&pdev->dev, "No platform data for LED\n"); goto err; } led = devm_kzalloc(&pdev->dev, sizeof(struct da9052_led) * pled->num_leds, GFP_KERNEL); if (led == NULL) { dev_err(&pdev->dev, "Failed to alloc memory\n"); error = -ENOMEM; goto err; } for (i = 0; i < pled->num_leds; i++) { led[i].cdev.name = pled->leds[i].name; led[i].cdev.brightness_set = da9052_led_set; led[i].cdev.brightness = LED_OFF; led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS; led[i].brightness = LED_OFF; led[i].led_index = pled->leds[i].flags; led[i].da9052 = dev_get_drvdata(pdev->dev.parent); INIT_WORK(&led[i].work, da9052_led_work); error = led_classdev_register(pdev->dev.parent, &led[i].cdev); if (error) { dev_err(&pdev->dev, "Failed to register led %d\n", led[i].led_index); goto err_register; } error = da9052_set_led_brightness(&led[i]); if (error) { dev_err(&pdev->dev, "Unable to init led %d\n", led[i].led_index); continue; } } error = da9052_configure_leds(led->da9052); if (error) { dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error); goto err_register; } platform_set_drvdata(pdev, led); return 0; err_register: for (i = i - 1; i >= 0; i--) { led_classdev_unregister(&led[i].cdev); cancel_work_sync(&led[i].work); } err: return error; } static int da9052_led_remove(struct platform_device *pdev) { struct da9052_led *led = platform_get_drvdata(pdev); struct da9052_pdata *pdata; struct da9052 *da9052; struct led_platform_data *pled; int i; da9052 = dev_get_drvdata(pdev->dev.parent); pdata = da9052->dev->platform_data; pled = pdata->pled; for (i = 0; i < pled->num_leds; i++) { led[i].brightness = 0; da9052_set_led_brightness(&led[i]); led_classdev_unregister(&led[i].cdev); cancel_work_sync(&led[i].work); } return 0; } static struct platform_driver da9052_led_driver = { .driver = { .name = "da9052-leds", .owner = THIS_MODULE, }, .probe = da9052_led_probe, .remove = da9052_led_remove, }; module_platform_driver(da9052_led_driver); MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>"); MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/HTC_Amaze_Kernel
drivers/net/wireless/libertas/tx.c
2323
5441
/* * This file contains the handling of TX in wlan driver. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <net/cfg80211.h> #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" /** * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) * * @rate: Input rate * returns: Output Rate (0 if invalid) */ static u32 convert_radiotap_rate_to_mv(u8 rate) { switch (rate) { case 2: /* 1 Mbps */ return 0 | (1 << 4); case 4: /* 2 Mbps */ return 1 | (1 << 4); case 11: /* 5.5 Mbps */ return 2 | (1 << 4); case 22: /* 11 Mbps */ return 3 | (1 << 4); case 12: /* 6 Mbps */ return 4 | (1 << 4); case 18: /* 9 Mbps */ return 5 | (1 << 4); case 24: /* 12 Mbps */ return 6 | (1 << 4); case 36: /* 18 Mbps */ return 7 | (1 << 4); case 48: /* 24 Mbps */ return 8 | (1 << 4); case 72: /* 36 Mbps */ return 9 | (1 << 4); case 96: /* 48 Mbps */ return 10 | (1 << 4); case 108: /* 54 Mbps */ return 11 | (1 << 4); } return 0; } /** * lbs_hard_start_xmit - checks the conditions and sends packet to IF * layer if everything is ok * * @skb: A pointer to skb which includes TX packet * @dev: A pointer to the &struct net_device * returns: 0 or -1 */ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; struct lbs_private *priv = dev->ml_priv; struct txpd *txpd; char *p802x_hdr; uint16_t pkt_len; netdev_tx_t ret = NETDEV_TX_OK; lbs_deb_enter(LBS_DEB_TX); /* We need to protect against the queues being restarted before we get round to stopping them */ spin_lock_irqsave(&priv->driver_lock, flags); if (priv->surpriseremoved) goto free; if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); /* We'll never manage to send this one; drop it and return 'OK' */ dev->stats.tx_dropped++; dev->stats.tx_errors++; goto free; } netif_stop_queue(priv->dev); if (priv->mesh_dev) netif_stop_queue(priv->mesh_dev); if (priv->tx_pending_len) { /* This can happen if packets come in on the mesh and eth device simultaneously -- there's no mutual exclusion on hard_start_xmit() calls between devices. */ lbs_deb_tx("Packet on %s while busy\n", dev->name); ret = NETDEV_TX_BUSY; goto unlock; } priv->tx_pending_len = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); txpd = (void *)priv->tx_pending_buf; memset(txpd, 0, sizeof(struct txpd)); p802x_hdr = skb->data; pkt_len = skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); /* skip the radiotap header */ p802x_hdr += sizeof(*rtap_hdr); pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); lbs_mesh_set_txpd(priv, dev, txpd); lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); spin_lock_irqsave(&priv->driver_lock, flags); priv->tx_pending_len = pkt_len + sizeof(struct txpd); lbs_deb_tx("%s lined up packet\n", __func__); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* Keep the skb around for when we get feedback */ priv->currenttxskb = skb; } else { free: dev_kfree_skb_any(skb); } unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); return ret; } /** * lbs_send_tx_feedback - sends to the host the last transmitted packet, * filling the radiotap headers with transmission information. * * @priv: A pointer to &struct lbs_private structure * @try_count: A 32-bit value containing transmission retry status. * * returns: void */ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; radiotap_hdr->data_retries = try_count ? (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && lbs_mesh_connected(priv)) netif_wake_queue(priv->mesh_dev); } EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
gpl-2.0
mihadyuk/wandboard-linux
fs/jffs2/compr_lzo.c
3347
2326
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2007 Nokia Corporation. All rights reserved. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by Richard Purdie <rpurdie@openedhand.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/lzo.h> #include "compr.h" static void *lzo_mem; static void *lzo_compress_buf; static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */ static void free_workspace(void) { vfree(lzo_mem); vfree(lzo_compress_buf); } static int __init alloc_workspace(void) { lzo_mem = vmalloc(LZO1X_MEM_COMPRESS); lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); if (!lzo_mem || !lzo_compress_buf) { printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n"); free_workspace(); return -ENOMEM; } return 0; } static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { size_t compress_size; int ret; mutex_lock(&deflate_mutex); ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); if (ret != LZO_E_OK) goto fail; if (compress_size > *dstlen) goto fail; memcpy(cpage_out, lzo_compress_buf, compress_size); mutex_unlock(&deflate_mutex); *dstlen = compress_size; return 0; fail: mutex_unlock(&deflate_mutex); return -1; } static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { size_t dl = destlen; int ret; ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl); if (ret != LZO_E_OK || dl != destlen) return -1; return 0; } static struct jffs2_compressor jffs2_lzo_comp = { .priority = JFFS2_LZO_PRIORITY, .name = "lzo", .compr = JFFS2_COMPR_LZO, .compress = &jffs2_lzo_compress, .decompress = &jffs2_lzo_decompress, .disabled = 0, }; int __init jffs2_lzo_init(void) { int ret; ret = alloc_workspace(); if (ret < 0) return ret; ret = jffs2_register_compressor(&jffs2_lzo_comp); if (ret) free_workspace(); return ret; } void jffs2_lzo_exit(void) { jffs2_unregister_compressor(&jffs2_lzo_comp); free_workspace(); }
gpl-2.0
Kali-/android_kernel_sony_msm8960T
arch/x86/kernel/traps.c
4115
19578
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * Handle hardware traps and faults. */ #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kexec.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/nmi.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/io.h> #ifdef CONFIG_EISA #include <linux/ioport.h> #include <linux/eisa.h> #endif #ifdef CONFIG_MCA #include <linux/mca.h> #endif #if defined(CONFIG_EDAC) #include <linux/edac.h> #endif #include <asm/kmemcheck.h> #include <asm/stacktrace.h> #include <asm/processor.h> #include <asm/debugreg.h> #include <linux/atomic.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/mce.h> #include <asm/mach_traps.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> #include <asm/pgalloc.h> #include <asm/proto.h> #else #include <asm/processor-flags.h> #include <asm/setup.h> asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ char ignore_fpu_irq; /* * The IDT has to be page-aligned to simplify the Pentium * F0 0F bug workaround. */ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; #endif DECLARE_BITMAP(used_vectors, NR_VECTORS); EXPORT_SYMBOL_GPL(used_vectors); static inline void conditional_sti(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } static inline void preempt_conditional_sti(struct pt_regs *regs) { inc_preempt_count(); if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } static inline void conditional_cli(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); } static inline void preempt_conditional_cli(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); dec_preempt_count(); } static void __kprobes do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { struct task_struct *tsk = current; #ifdef CONFIG_X86_32 if (regs->flags & X86_VM_MASK) { /* * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. */ if (trapnr < X86_TRAP_UD) goto vm86_trap; goto trap_signal; } #endif if (!user_mode(regs)) goto kernel_trap; #ifdef CONFIG_X86_32 trap_signal: #endif /* * We want error_code and trap_nr set for userspace faults and * kernelspace faults which result in die(), but not * kernelspace faults which are fixed up. die() gives the * process no chance to handle the signal and notice the * kernel fault information, so that won't result in polluting * the information about previously queued, but not yet * delivered, faults. See also do_general_protection below. */ tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; #ifdef CONFIG_X86_64 if (show_unhandled_signals && unhandled_signal(tsk, signr) && printk_ratelimit()) { printk(KERN_INFO "%s[%d] trap %s ip:%lx sp:%lx error:%lx", tsk->comm, tsk->pid, str, regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); printk("\n"); } #endif if (info) force_sig_info(signr, info, tsk); else force_sig(signr, tsk); return; kernel_trap: if (!fixup_exception(regs)) { tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; die(str, regs, error_code); } return; #ifdef CONFIG_X86_32 vm86_trap: if (handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) goto trap_signal; return; #endif } #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ == NOTIFY_STOP) \ return; \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, NULL); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ == NOTIFY_STOP) \ return; \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, &info); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) #ifdef CONFIG_X86_32 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) #endif DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) #ifdef CONFIG_X86_64 /* Runs on IST stack */ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) { if (notify_die(DIE_TRAP, "stack segment", regs, error_code, X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) return; preempt_conditional_sti(regs); do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); preempt_conditional_cli(regs); } dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_DF; /* * This is always a kernel trap and never fixable (and thus must * never return). */ for (;;) die(str, regs, error_code); } #endif dotraplinkage void __kprobes do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; conditional_sti(regs); #ifdef CONFIG_X86_32 if (regs->flags & X86_VM_MASK) goto gp_in_vm86; #endif tsk = current; if (!user_mode(regs)) goto gp_in_kernel; tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && printk_ratelimit()) { printk(KERN_INFO "%s[%d] general protection ip:%lx sp:%lx error:%lx", tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); printk("\n"); } force_sig(SIGSEGV, tsk); return; #ifdef CONFIG_X86_32 gp_in_vm86: local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); return; #endif gp_in_kernel: if (fixup_exception(regs)) return; tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (notify_die(DIE_GPF, "general protection fault", regs, error_code, X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) return; die("general protection fault", regs, error_code); } /* May run on IST stack. */ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) return; /* * Let others (NMI) know that the debug stack is in use * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); preempt_conditional_sti(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); preempt_conditional_cli(regs); debug_stack_usage_dec(); } #ifdef CONFIG_X86_64 /* * Help handler running on IST stack to switch back to user stack * for scheduling or signal handling. The actual stack switch is done in * entry.S */ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; /* Did already sync */ if (eregs == (struct pt_regs *)eregs->sp) ; /* Exception from user space */ else if (user_mode(eregs)) regs = task_pt_regs(current); /* * Exception from kernel and interrupts are enabled. Move to * kernel process stack. */ else if (eregs->flags & X86_EFLAGS_IF) regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); if (eregs != regs) *regs = *eregs; return regs; } #endif /* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) * * May run on IST stack. */ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; int user_icebp = 0; unsigned long dr6; int si_code; get_debugreg(dr6, 6); /* Filter out all the reserved bits which are preset to 1 */ dr6 &= ~DR6_RESERVED; /* * If dr6 has no reason to give us about the origin of this trap, * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ if (!dr6 && user_mode(regs)) user_icebp = 1; /* Catch kmemcheck conditions first of all! */ if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) return; /* DR6 may or may not be cleared by the CPU */ set_debugreg(0, 6); /* * The processor cleared BTF, so don't mark that we need it set. */ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, SIGTRAP) == NOTIFY_STOP) return; /* * Let others (NMI) know that the debug stack is in use * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, X86_TRAP_DB); preempt_conditional_cli(regs); debug_stack_usage_dec(); return; } /* * Single-stepping through system calls: ignore any exceptions in * kernel space, but re-enable TF when returning to user mode. * * We already checked v86 mode above, so we can check for kernel mode * by just checking the CPL of CS. */ if ((dr6 & DR_STEP) && !user_mode(regs)) { tsk->thread.debugreg6 &= ~DR_STEP; set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; } si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); preempt_conditional_cli(regs); debug_stack_usage_dec(); return; } /* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task = current; siginfo_t info; unsigned short err; char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) return; conditional_sti(regs); if (!user_mode_vm(regs)) { if (!fixup_exception(regs)) { task->thread.error_code = error_code; task->thread.trap_nr = trapnr; die(str, regs, error_code); } return; } /* * Save the info for the exception handler and clear the error. */ save_init_fpu(task); task->thread.trap_nr = trapnr; task->thread.error_code = error_code; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)regs->ip; if (trapnr == X86_TRAP_MF) { unsigned short cwd, swd; /* * (~cwd & swd) will mask out exceptions that are not set to unmasked * status. 0x3f is the exception bits in these regs, 0x200 is the * C1 reg you need in case of a stack fault, 0x040 is the stack * fault bit. We should only be taking one exception at a time, * so if this combination doesn't produce any single exception, * then we have a bad program that isn't synchronizing its FPU usage * and it will suffer the consequences since we won't be able to * fully reproduce the context of the exception */ cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); err = swd & ~cwd; } else { /* * The SIMD FPU exceptions are handled a little differently, as there * is only a single status/control register. Thus, to determine which * unmasked exception was caught we must mask the exception mask bits * at 0x1f80, and then use these to mask the exception bits at 0x3f. */ unsigned short mxcsr = get_fpu_mxcsr(task); err = ~(mxcsr >> 7) & mxcsr; } if (err & 0x001) { /* Invalid op */ /* * swd & 0x240 == 0x040: Stack Underflow * swd & 0x240 == 0x240: Stack Overflow * User must clear the SF bit (0x40) if set */ info.si_code = FPE_FLTINV; } else if (err & 0x004) { /* Divide by Zero */ info.si_code = FPE_FLTDIV; } else if (err & 0x008) { /* Overflow */ info.si_code = FPE_FLTOVF; } else if (err & 0x012) { /* Denormal, Underflow */ info.si_code = FPE_FLTUND; } else if (err & 0x020) { /* Precision */ info.si_code = FPE_FLTRES; } else { /* * If we're using IRQ 13, or supposedly even some trap * X86_TRAP_MF implementations, it's possible * we get a spurious trap, which is not an error. */ return; } force_sig_info(SIGFPE, &info, task); } dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) { #ifdef CONFIG_X86_32 ignore_fpu_irq = 1; #endif math_error(regs, error_code, X86_TRAP_MF); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { math_error(regs, error_code, X86_TRAP_XF); } dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) { conditional_sti(regs); #if 0 /* No need to warn about this any longer. */ printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); #endif } asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) { } asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) { } /* * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * * Must be called with kernel preemption disabled (eg with local * local interrupts as in the case of do_device_not_available). */ void math_state_restore(void) { struct task_struct *tsk = current; if (!tsk_used_math(tsk)) { local_irq_enable(); /* * does a slab alloc which can sleep */ if (init_fpu(tsk)) { /* * ran out of memory! */ do_group_exit(SIGKILL); return; } local_irq_disable(); } __thread_fpu_begin(tsk); /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. */ if (unlikely(restore_fpu_checking(tsk))) { __thread_fpu_end(tsk); force_sig(SIGSEGV, tsk); return; } tsk->fpu_counter++; } EXPORT_SYMBOL_GPL(math_state_restore); dotraplinkage void __kprobes do_device_not_available(struct pt_regs *regs, long error_code) { #ifdef CONFIG_MATH_EMULATION if (read_cr0() & X86_CR0_EM) { struct math_emu_info info = { }; conditional_sti(regs); info.regs = regs; math_emulate(&info); return; } #endif math_state_restore(); /* interrupts still off */ #ifdef CONFIG_X86_32 conditional_sti(regs); #endif } #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; local_irq_enable(); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_BADSTK; info.si_addr = NULL; if (notify_die(DIE_TRAP, "iret exception", regs, error_code, X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) return; do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, &info); } #endif /* Set of traps needed for early debugging. */ void __init early_trap_init(void) { set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); /* int3 can be called from all */ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); set_intr_gate(X86_TRAP_PF, &page_fault); load_idt(&idt_descr); } void __init trap_init(void) { int i; #ifdef CONFIG_EISA void __iomem *p = early_ioremap(0x0FFFD9, 4); if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) EISA_bus = 1; early_iounmap(p, 4); #endif set_intr_gate(X86_TRAP_DE, &divide_error); set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); /* int4 can be called from all */ set_system_intr_gate(X86_TRAP_OF, &overflow); set_intr_gate(X86_TRAP_BR, &bounds); set_intr_gate(X86_TRAP_UD, &invalid_op); set_intr_gate(X86_TRAP_NM, &device_not_available); #ifdef CONFIG_X86_32 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); #else set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); #endif set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); set_intr_gate(X86_TRAP_TS, &invalid_TSS); set_intr_gate(X86_TRAP_NP, &segment_not_present); set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); set_intr_gate(X86_TRAP_GP, &general_protection); set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); set_intr_gate(X86_TRAP_MF, &coprocessor_error); set_intr_gate(X86_TRAP_AC, &alignment_check); #ifdef CONFIG_X86_MCE set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); #endif set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); /* Reserve all the builtin and the syscall vector: */ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) set_bit(i, used_vectors); #ifdef CONFIG_IA32_EMULATION set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); set_bit(IA32_SYSCALL_VECTOR, used_vectors); #endif #ifdef CONFIG_X86_32 set_system_trap_gate(SYSCALL_VECTOR, &system_call); set_bit(SYSCALL_VECTOR, used_vectors); #endif /* * Should be a barrier for any external CPU state: */ cpu_init(); x86_init.irqs.trap_init(); #ifdef CONFIG_X86_64 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); set_nmi_gate(X86_TRAP_DB, &debug); set_nmi_gate(X86_TRAP_BP, &int3); #endif }
gpl-2.0
Megatron007/Megabyte_kernel_victara
drivers/mtd/maps/ixp2000.c
5139
6143
/* * drivers/mtd/maps/ixp2000.c * * Mapping for the Intel XScale IXP2000 based systems * * Copyright (C) 2002 Intel Corp. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com> * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <mach/hardware.h> #include <asm/mach/flash.h> #include <linux/reboot.h> struct ixp2000_flash_info { struct mtd_info *mtd; struct map_info map; struct resource *res; }; static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) { unsigned long (*set_bank)(unsigned long) = (unsigned long(*)(unsigned long))map->map_priv_2; return (set_bank ? set_bank(ofs) : ofs); } #ifdef __ARMEB__ /* * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which * causes the lower address bits to be XORed with 0x11 on 8 bit accesses * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44. */ static int erratum44_workaround = 0; static inline unsigned long address_fix8_write(unsigned long addr) { if (erratum44_workaround) { return (addr ^ 3); } return addr; } #else #define address_fix8_write(x) (x) #endif static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs))); return val; } /* * We can't use the standard memcpy due to the broken SlowPort * address translation on rev A0 and A1 silicon and the fact that * we have banked flash. */ static void ixp2000_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { from = flash_bank_setup(map, from); while(len--) *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++); } static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs) { *(__u8 *) (address_fix8_write(map->map_priv_1 + flash_bank_setup(map, ofs))) = d.x[0]; } static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { to = flash_bank_setup(map, to); while(len--) { unsigned long tmp = address_fix8_write(map->map_priv_1 + to++); *(__u8 *)(tmp) = *(__u8 *)(from++); } } static int ixp2000_flash_remove(struct platform_device *dev) { struct flash_platform_data *plat = dev->dev.platform_data; struct ixp2000_flash_info *info = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); if(!info) return 0; if (info->mtd) { mtd_device_unregister(info->mtd); map_destroy(info->mtd); } if (info->map.map_priv_1) iounmap((void *) info->map.map_priv_1); if (info->res) { release_resource(info->res); kfree(info->res); } if (plat->exit) plat->exit(); return 0; } static int ixp2000_flash_probe(struct platform_device *dev) { static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; struct ixp2000_flash_data *ixp_data = dev->dev.platform_data; struct flash_platform_data *plat; struct ixp2000_flash_info *info; unsigned long window_size; int err = -1; if (!ixp_data) return -ENODEV; plat = ixp_data->platform_data; if (!plat) return -ENODEV; window_size = resource_size(dev->resource); dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n", ixp_data->nr_banks, ((u32)window_size >> 20)); if (plat->width != 1) { dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n", plat->width * 8); return -EIO; } info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); if(!info) { err = -ENOMEM; goto Error; } platform_set_drvdata(dev, info); /* * Tell the MTD layer we're not 1:1 mapped so that it does * not attempt to do a direct access on us. */ info->map.phys = NO_XIP; info->map.size = ixp_data->nr_banks * window_size; info->map.bankwidth = 1; /* * map_priv_2 is used to store a ptr to the bank_setup routine */ info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; info->map.name = dev_name(&dev->dev); info->map.read = ixp2000_flash_read8; info->map.write = ixp2000_flash_write8; info->map.copy_from = ixp2000_flash_copy_from; info->map.copy_to = ixp2000_flash_copy_to; info->res = request_mem_region(dev->resource->start, resource_size(dev->resource), dev_name(&dev->dev)); if (!info->res) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto Error; } info->map.map_priv_1 = (unsigned long)ioremap(dev->resource->start, resource_size(dev->resource)); if (!info->map.map_priv_1) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); err = -EIO; goto Error; } #if defined(__ARMEB__) /* * Enable erratum 44 workaround for NPUs with broken slowport */ erratum44_workaround = ixp2000_has_broken_slowport(); dev_info(&dev->dev, "Erratum 44 workaround %s\n", erratum44_workaround ? "enabled" : "disabled"); #endif info->mtd = do_map_probe(plat->map_name, &info->map); if (!info->mtd) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENXIO; goto Error; } info->mtd->owner = THIS_MODULE; err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0); if (err) goto Error; return 0; Error: ixp2000_flash_remove(dev); return err; } static struct platform_driver ixp2000_flash_driver = { .probe = ixp2000_flash_probe, .remove = ixp2000_flash_remove, .driver = { .name = "IXP2000-Flash", .owner = THIS_MODULE, }, }; module_platform_driver(ixp2000_flash_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); MODULE_ALIAS("platform:IXP2000-Flash");
gpl-2.0
blaskewitts/android_kernel_samsung_klte
arch/mips/pci/pci-yosemite.c
8723
1702
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/titan_dep.h> extern struct pci_ops titan_pci_ops; static struct resource py_mem_resource = { .start = 0xe0000000UL, .end = 0xe3ffffffUL, .name = "Titan PCI MEM", .flags = IORESOURCE_MEM }; /* * PMON really reserves 16MB of I/O port space but that's stupid, nothing * needs that much since allocations are limited to 256 bytes per device * anyway. So we just claim 64kB here. */ #define TITAN_IO_SIZE 0x0000ffffUL #define TITAN_IO_BASE 0xe8000000UL static struct resource py_io_resource = { .start = 0x00001000UL, .end = TITAN_IO_SIZE - 1, .name = "Titan IO MEM", .flags = IORESOURCE_IO, }; static struct pci_controller py_controller = { .pci_ops = &titan_pci_ops, .mem_resource = &py_mem_resource, .mem_offset = 0x00000000UL, .io_resource = &py_io_resource, .io_offset = 0x00000000UL }; static char ioremap_failed[] __initdata = "Could not ioremap I/O port range"; static int __init pmc_yosemite_setup(void) { unsigned long io_v_base; io_v_base = (unsigned long) ioremap(TITAN_IO_BASE, TITAN_IO_SIZE); if (!io_v_base) panic(ioremap_failed); set_io_port_base(io_v_base); py_controller.io_map_base = io_v_base; TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1); ioport_resource.end = TITAN_IO_SIZE - 1; register_pci_controller(&py_controller); return 0; } arch_initcall(pmc_yosemite_setup);
gpl-2.0
jomeister15/ICS-SGH-I727-kernel
drivers/scsi/bvme6000_scsi.c
9235
3367
/* * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux. * * Based on work by Alan Hourihane and Kars de Jong * * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk> */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/bvme6000hw.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>"); MODULE_DESCRIPTION("BVME6000 NCR53C710 driver"); MODULE_LICENSE("GPL"); static struct scsi_host_template bvme6000_scsi_driver_template = { .name = "BVME6000 NCR53c710 SCSI", .proc_name = "BVME6000", .this_id = 7, .module = THIS_MODULE, }; static struct platform_device *bvme6000_scsi_device; static __devinit int bvme6000_probe(struct platform_device *dev) { struct Scsi_Host *host; struct NCR_700_Host_Parameters *hostdata; if (!MACH_IS_BVME6000) goto out; hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (!hostdata) { printk(KERN_ERR "bvme6000-scsi: " "Failed to allocate host data\n"); goto out; } /* Fill in the required pieces of hostdata */ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE; hostdata->clock = 40; /* XXX - depends on the CPU clock! */ hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->dcntl_extra = EA_710; hostdata->ctest7_extra = CTEST7_TT1; /* and register the chip */ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, &dev->dev); if (!host) { printk(KERN_ERR "bvme6000-scsi: No host detected; " "board configuration problem?\n"); goto out_free; } host->base = BVME_NCR53C710_BASE; host->this_id = 7; host->irq = BVME_IRQ_SCSI; if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi", host)) { printk(KERN_ERR "bvme6000-scsi: request_irq failed\n"); goto out_put_host; } platform_set_drvdata(dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: kfree(hostdata); out: return -ENODEV; } static __devexit int bvme6000_device_remove(struct platform_device *dev) { struct Scsi_Host *host = platform_get_drvdata(dev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); return 0; } static struct platform_driver bvme6000_scsi_driver = { .driver = { .name = "bvme6000-scsi", .owner = THIS_MODULE, }, .probe = bvme6000_probe, .remove = __devexit_p(bvme6000_device_remove), }; static int __init bvme6000_scsi_init(void) { int err; err = platform_driver_register(&bvme6000_scsi_driver); if (err) return err; bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi", -1, NULL, 0); if (IS_ERR(bvme6000_scsi_device)) { platform_driver_unregister(&bvme6000_scsi_driver); return PTR_ERR(bvme6000_scsi_device); } return 0; } static void __exit bvme6000_scsi_exit(void) { platform_device_unregister(bvme6000_scsi_device); platform_driver_unregister(&bvme6000_scsi_driver); } module_init(bvme6000_scsi_init); module_exit(bvme6000_scsi_exit);
gpl-2.0
sev3n85/samsung_s3ve3g_EUR
drivers/hid/hid-ezkey.c
9747
2453
/* * HID driver for some ezkey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define ez_map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) #define ez_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) static int ez_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x230: ez_map_key(BTN_MOUSE); break; case 0x231: ez_map_rel(REL_WHEEL); break; /* * this keyboard has a scrollwheel implemented in * totally broken way. We map this usage temporarily * to HWHEEL and handle it in the event quirk handler */ case 0x232: ez_map_rel(REL_HWHEEL); break; default: return 0; } return 1; } static int ez_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; /* handle the temporary quirky mapping to HWHEEL */ if (usage->type == EV_REL && usage->code == REL_HWHEEL) { struct input_dev *input = field->hidinput->input; input_event(input, usage->type, REL_WHEEL, -value); return 1; } return 0; } static const struct hid_device_id ez_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { } }; MODULE_DEVICE_TABLE(hid, ez_devices); static struct hid_driver ez_driver = { .name = "ezkey", .id_table = ez_devices, .input_mapping = ez_input_mapping, .event = ez_event, }; static int __init ez_init(void) { return hid_register_driver(&ez_driver); } static void __exit ez_exit(void) { hid_unregister_driver(&ez_driver); } module_init(ez_init); module_exit(ez_exit); MODULE_LICENSE("GPL");
gpl-2.0
utkanos/android_htc_mecha_kernel
drivers/pci/hotplug/cpqphp_nvram.c
12563
13820
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> #include <asm/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" #define ROM_INT15_PHY_ADDR 0x0FF859 #define READ_EV 0xD8A4 #define WRITE_EV 0xD8A5 struct register_foo { union { unsigned long lword; /* eax */ unsigned short word; /* ax */ struct { unsigned char low; /* al */ unsigned char high; /* ah */ } byte; } data; unsigned char opcode; /* see below */ unsigned long length; /* if the reg. is a pointer, how much data */ } __attribute__ ((packed)); struct all_reg { struct register_foo eax_reg; struct register_foo ebx_reg; struct register_foo ecx_reg; struct register_foo edx_reg; struct register_foo edi_reg; struct register_foo esi_reg; struct register_foo eflags_reg; } __attribute__ ((packed)); struct ev_hrt_header { u8 Version; u8 num_of_ctrl; u8 next; }; struct ev_hrt_ctrl { u8 bus; u8 device; u8 function; u8 mem_avail; u8 p_mem_avail; u8 io_avail; u8 bus_avail; u8 next; }; static u8 evbuffer_init; static u8 evbuffer_length; static u8 evbuffer[1024]; static void __iomem *compaq_int15_entry_point; /* lock for ordering int15_bios_call() */ static spinlock_t int15_lock; /* This is a series of function that deals with * setting & getting the hotplug resource table in some environment variable. */ /* * We really shouldn't be doing this unless there is a _very_ good reason to!!! * greg k-h */ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail) { u8 **tByte; if ((*used + 1) > *avail) return(1); *((u8*)*p_buffer) = value; tByte = (u8**)p_buffer; (*tByte)++; *used+=1; return(0); } static u32 add_dword( u32 **p_buffer, u32 value, u32 *used, u32 *avail) { if ((*used + 4) > *avail) return(1); **p_buffer = value; (*p_buffer)++; *used+=4; return(0); } /* * check_for_compaq_ROM * * this routine verifies that the ROM OEM string is 'COMPAQ' * * returns 0 for non-Compaq ROM, 1 for Compaq ROM */ static int check_for_compaq_ROM (void __iomem *rom_start) { u8 temp1, temp2, temp3, temp4, temp5, temp6; int result = 0; temp1 = readb(rom_start + 0xffea + 0); temp2 = readb(rom_start + 0xffea + 1); temp3 = readb(rom_start + 0xffea + 2); temp4 = readb(rom_start + 0xffea + 3); temp5 = readb(rom_start + 0xffea + 4); temp6 = readb(rom_start + 0xffea + 5); if ((temp1 == 'C') && (temp2 == 'O') && (temp3 == 'M') && (temp4 == 'P') && (temp5 == 'A') && (temp6 == 'Q')) { result = 1; } dbg ("%s - returned %d\n", __func__, result); return result; } static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size) { unsigned long flags; int op = operation; int ret_val; if (!compaq_int15_entry_point) return -ENODEV; spin_lock_irqsave(&int15_lock, flags); __asm__ ( "xorl %%ebx,%%ebx\n" \ "xorl %%edx,%%edx\n" \ "pushf\n" \ "push %%cs\n" \ "cli\n" \ "call *%6\n" : "=c" (*buf_size), "=a" (ret_val) : "a" (op), "c" (*buf_size), "S" (ev_name), "D" (buffer), "m" (compaq_int15_entry_point) : "%ebx", "%edx"); spin_unlock_irqrestore(&int15_lock, flags); return((ret_val & 0xFF00) >> 8); } /* * load_HRT * * Read the hot plug Resource Table from NVRAM */ static int load_HRT (void __iomem *rom_start) { u32 available; u32 temp_dword; u8 temp_byte = 0xFF; u32 rc; if (!check_for_compaq_ROM(rom_start)) { return -ENODEV; } available = 1024; /* Now load the EV */ temp_dword = available; rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); evbuffer_length = temp_dword; /* We're maintaining the resource lists so write FF to invalidate old * info */ temp_dword = 1; rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); return rc; } /* * store_HRT * * Save the hot plug Resource Table in NVRAM */ static u32 store_HRT (void __iomem *rom_start) { u32 *buffer; u32 *pFill; u32 usedbytes; u32 available; u32 temp_dword; u32 rc; u8 loop; u8 numCtrl = 0; struct controller *ctrl; struct pci_resource *resNode; struct ev_hrt_header *p_EV_header; struct ev_hrt_ctrl *p_ev_ctrl; available = 1024; if (!check_for_compaq_ROM(rom_start)) { return(1); } buffer = (u32*) evbuffer; if (!buffer) return(1); pFill = buffer; usedbytes = 0; p_EV_header = (struct ev_hrt_header *) pFill; ctrl = cpqhp_ctrl_list; /* The revision of this structure */ rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available); if (rc) return(rc); /* The number of controllers */ rc = add_byte( &pFill, 1, &usedbytes, &available); if (rc) return(rc); while (ctrl) { p_ev_ctrl = (struct ev_hrt_ctrl *) pFill; numCtrl++; /* The bus number */ rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available); if (rc) return(rc); /* The device Number */ rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* The function Number */ rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* Skip the number of available entries */ rc = add_dword( &pFill, 0, &usedbytes, &available); if (rc) return(rc); /* Figure out memory Available */ resNode = ctrl->mem_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->mem_avail = loop; /* Figure out prefetchable memory Available */ resNode = ctrl->p_mem_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->p_mem_avail = loop; /* Figure out IO Available */ resNode = ctrl->io_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->io_avail = loop; /* Figure out bus Available */ resNode = ctrl->bus_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->bus_avail = loop; ctrl = ctrl->next; } p_EV_header->num_of_ctrl = numCtrl; /* Now store the EV */ temp_dword = usedbytes; rc = access_EV(WRITE_EV, "CQTHPS", (u8*) buffer, &temp_dword); dbg("usedbytes = 0x%x, length = 0x%x\n", usedbytes, temp_dword); evbuffer_length = temp_dword; if (rc) { err(msg_unable_to_save); return(1); } return(0); } void compaq_nvram_init (void __iomem *rom_start) { if (rom_start) { compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); } dbg("int15 entry = %p\n", compaq_int15_entry_point); /* initialize our int15 lock */ spin_lock_init(&int15_lock); } int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl) { u8 bus, device, function; u8 nummem, numpmem, numio, numbus; u32 rc; u8 *p_byte; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct ev_hrt_ctrl *p_ev_ctrl; struct ev_hrt_header *p_EV_header; if (!evbuffer_init) { /* Read the resource list information in from NVRAM */ if (load_HRT(rom_start)) memset (evbuffer, 0, 1024); evbuffer_init = 1; } /* If we saved information in NVRAM, use it now */ p_EV_header = (struct ev_hrt_header *) evbuffer; /* The following code is for systems where version 1.0 of this * driver has been loaded, but doesn't support the hardware. * In that case, the driver would incorrectly store something * in NVRAM. */ if ((p_EV_header->Version == 2) || ((p_EV_header->Version == 1) && !ctrl->push_flag)) { p_byte = &(p_EV_header->next); p_ev_ctrl = (struct ev_hrt_ctrl *) &(p_EV_header->next); p_byte += 3; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; while ((bus != ctrl->bus) || (device != PCI_SLOT(ctrl->pci_dev->devfn)) || (function != PCI_FUNC(ctrl->pci_dev->devfn))) { nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; /* Skip forward to the next entry */ p_byte += (nummem + numpmem + numio + numbus) * 8; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; p_ev_ctrl = (struct ev_hrt_ctrl *) p_byte; p_byte += 3; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; } nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; while (nummem--) { mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!mem_node) break; mem_node->base = *(u32*)p_byte; dbg("mem base = %8.8x\n",mem_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->length = *(u32*)p_byte; dbg("mem length = %8.8x\n",mem_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->next = ctrl->mem_head; ctrl->mem_head = mem_node; } while (numpmem--) { p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!p_mem_node) break; p_mem_node->base = *(u32*)p_byte; dbg("pre-mem base = %8.8x\n",p_mem_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->length = *(u32*)p_byte; dbg("pre-mem length = %8.8x\n",p_mem_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->next = ctrl->p_mem_head; ctrl->p_mem_head = p_mem_node; } while (numio--) { io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!io_node) break; io_node->base = *(u32*)p_byte; dbg("io base = %8.8x\n",io_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->length = *(u32*)p_byte; dbg("io length = %8.8x\n",io_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->next = ctrl->io_head; ctrl->io_head = io_node; } while (numbus--) { bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!bus_node) break; bus_node->base = *(u32*)p_byte; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->length = *(u32*)p_byte; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->next = ctrl->bus_head; ctrl->bus_head = bus_node; } /* If all of the following fail, we don't have any resources for * hot plug add */ rc = 1; rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) return(rc); } else { if ((evbuffer[0] != 0) && (!ctrl->push_flag)) return 1; } return 0; } int compaq_nvram_store (void __iomem *rom_start) { int rc = 1; if (rom_start == NULL) return -ENODEV; if (evbuffer_init) { rc = store_HRT(rom_start); if (rc) { err(msg_unable_to_save); } } return rc; }
gpl-2.0
scotthartbti/samsung_kernel_d2att
drivers/parisc/wax.c
14611
3227
/* * WAX Device Driver * * (c) Copyright 2000 The Puffin Group Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Helge Deller <deller@gmx.de> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include "gsc.h" #define WAX_GSC_IRQ 7 /* Hardcoded Interrupt for GSC */ static void wax_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x73: irq = 1; break; /* i8042 General */ case 0x8c: irq = 6; break; /* Serial */ case 0x90: irq = 10; break; /* EISA */ default: return; /* Unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); switch (dev->id.sversion) { case 0x73: irq = 2; break; /* i8042 High-priority */ case 0x90: irq = 0; break; /* EISA NMI */ default: return; /* No secondary IRQ */ } gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq); } static void __init wax_init_irq(struct gsc_asic *wax) { unsigned long base = wax->hpa; /* Wax-off */ gsc_writel(0x00000000, base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ // gsc_writel(0xFFFFFFFF, base+0x1000); /* HIL */ // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ } static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); if (!wax) return -ENOMEM; wax->name = "wax"; wax->hpa = dev->hpa.start; wax->version = 0; /* gsc_readb(wax->hpa+WAX_VER); */ printk(KERN_INFO "%s at 0x%lx found.\n", wax->name, wax->hpa); /* Stop wax hissing for a bit */ wax_init_irq(wax); /* the IRQ wax should use */ dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(wax); return -EBUSY; } wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; } /* enable IRQ's for devices below WAX */ gsc_writel(wax->eim, wax->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, wax); if (ret) { kfree(wax); return ret; } gsc_fixup_irqs(dev, wax, wax_choose_irq); /* On 715-class machines, Wax EISA is a sibling of Wax, not a child. */ parent = parisc_parent(dev); if (parent->id.hw_type != HPHW_IOA) { gsc_fixup_irqs(parent, wax, wax_choose_irq); } return ret; } static struct parisc_device_id wax_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008e }, { 0, } }; MODULE_DEVICE_TABLE(parisc, wax_tbl); struct parisc_driver wax_driver = { .name = "wax", .id_table = wax_tbl, .probe = wax_init_chip, };
gpl-2.0
gdachs/linux
drivers/amlogic/wifi/rtl8811AU/hal/OUTSRC/rtl8812a/HalHWImg8812A_MAC.c
20
8504
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #include "../odm_precomp.h" #if (RTL8812A_SUPPORT == 1) static BOOLEAN CheckPositive( IN PDM_ODM_T pDM_Odm, IN const u4Byte Condition1, IN const u4Byte Condition2 ) { u1Byte _GLNA = (pDM_Odm->BoardType & BIT4) >> 4; u1Byte _GPA = (pDM_Odm->BoardType & BIT3) >> 3; u1Byte _ALNA = (pDM_Odm->BoardType & BIT7) >> 7; u1Byte _APA = (pDM_Odm->BoardType & BIT6) >> 6; u1Byte cBoard = (u1Byte)((Condition1 & bMaskByte0) >> 0); u1Byte cInterface = (u1Byte)((Condition1 & (BIT11|BIT10|BIT9|BIT8)) >> 8); u1Byte cPackage = (u1Byte)((Condition1 & (BIT15|BIT14|BIT13|BIT12)) >> 12); u1Byte cPlatform = (u1Byte)((Condition1 & (BIT19|BIT18|BIT17|BIT16)) >> 16); u1Byte cCut = (u1Byte)((Condition1 & (BIT27|BIT26|BIT25|BIT24)) >> 24); u1Byte cGLNA = (cBoard & BIT0) >> 0; u1Byte cGPA = (cBoard & BIT1) >> 1; u1Byte cALNA = (cBoard & BIT2) >> 2; u1Byte cAPA = (cBoard & BIT3) >> 3; u1Byte cTypeGLNA = (u1Byte)((Condition2 & bMaskByte0) >> 0); u1Byte cTypeGPA = (u1Byte)((Condition2 & bMaskByte1) >> 8); u1Byte cTypeALNA = (u1Byte)((Condition2 & bMaskByte2) >> 16); u1Byte cTypeAPA = (u1Byte)((Condition2 & bMaskByte3) >> 24); ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> [8812A] CheckPositive(0x%X 0x%X)\n", Condition1, Condition2)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, (" (Platform, Interface) = (0x%X, 0x%X)", pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, (" (Board, Package) = (0x%X, 0x%X\n", pDM_Odm->BoardType, pDM_Odm->PackageType)); if ((cPlatform != pDM_Odm->SupportPlatform && cPlatform != 0) || (cInterface != pDM_Odm->SupportInterface && cInterface != 0) || (cCut != pDM_Odm->CutVersion && cCut != 0)) return FALSE; if (cPackage != pDM_Odm->PackageType && cPackage != 0) return FALSE; if (((_GLNA != 0) && (_GLNA == cGLNA) && (cTypeGLNA == pDM_Odm->TypeGLNA)) || ((_GPA != 0) && (_GPA == cGPA ) && (cTypeGPA == pDM_Odm->TypeGPA )) || ((_ALNA != 0) && (_ALNA == cALNA) && (cTypeALNA == pDM_Odm->TypeALNA)) || ((_APA != 0) && (_APA == cAPA ) && (cTypeAPA == pDM_Odm->TypeAPA ))) return TRUE; else return FALSE; } static BOOLEAN CheckNegative( IN PDM_ODM_T pDM_Odm, IN const u4Byte Condition1, IN const u4Byte Condition2 ) { return TRUE; } /****************************************************************************** * MAC_REG.TXT ******************************************************************************/ u4Byte Array_MP_8812A_MAC_REG[] = { 0x010, 0x0000000C, 0x025, 0x0000000F, 0x072, 0x00000000, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, 0x431, 0x00000000, 0x432, 0x00000000, 0x433, 0x00000001, 0x434, 0x00000004, 0x435, 0x00000005, 0x436, 0x00000007, 0x437, 0x00000008, 0x43C, 0x00000004, 0x43D, 0x00000005, 0x43E, 0x00000007, 0x43F, 0x00000008, 0x440, 0x0000005D, 0x441, 0x00000001, 0x442, 0x00000000, 0x444, 0x00000010, 0x445, 0x00000000, 0x446, 0x00000000, 0x447, 0x00000000, 0x448, 0x00000000, 0x449, 0x000000F0, 0x44A, 0x0000000F, 0x44B, 0x0000003E, 0x44C, 0x00000010, 0x44D, 0x00000000, 0x44E, 0x00000000, 0x44F, 0x00000000, 0x450, 0x00000000, 0x451, 0x000000F0, 0x452, 0x0000000F, 0x453, 0x00000000, 0x45B, 0x00000080, 0x460, 0x00000066, 0x461, 0x00000066, 0x4C8, 0x000000FF, 0x4C9, 0x00000008, 0x4CC, 0x000000FF, 0x4CD, 0x000000FF, 0x4CE, 0x00000001, 0x500, 0x00000026, 0x501, 0x000000A2, 0x502, 0x0000002F, 0x503, 0x00000000, 0x504, 0x00000028, 0x505, 0x000000A3, 0x506, 0x0000005E, 0x507, 0x00000000, 0x508, 0x0000002B, 0x509, 0x000000A4, 0x50A, 0x0000005E, 0x50B, 0x00000000, 0x50C, 0x0000004F, 0x50D, 0x000000A4, 0x50E, 0x00000000, 0x50F, 0x00000000, 0x512, 0x0000001C, 0x514, 0x0000000A, 0x516, 0x0000000A, 0x525, 0x0000004F, 0x550, 0x00000010, 0x551, 0x00000010, 0x559, 0x00000002, 0x55C, 0x00000050, 0x55D, 0x000000FF, 0x604, 0x00000001, 0x605, 0x00000030, 0x607, 0x00000003, 0x608, 0x0000000E, 0x609, 0x0000002A, 0x620, 0x000000FF, 0x621, 0x000000FF, 0x622, 0x000000FF, 0x623, 0x000000FF, 0x624, 0x000000FF, 0x625, 0x000000FF, 0x626, 0x000000FF, 0x627, 0x000000FF, 0x638, 0x00000050, 0x63C, 0x0000000A, 0x63D, 0x0000000A, 0x63E, 0x0000000E, 0x63F, 0x0000000E, 0x640, 0x00000080, 0x642, 0x00000040, 0x643, 0x00000000, 0x652, 0x000000C8, 0x66E, 0x00000005, 0x700, 0x00000021, 0x701, 0x00000043, 0x702, 0x00000065, 0x703, 0x00000087, 0x708, 0x00000021, 0x709, 0x00000043, 0x70A, 0x00000065, 0x70B, 0x00000087, 0x718, 0x00000040, }; void ODM_ReadAndConfig_MP_8812A_MAC_REG( IN PDM_ODM_T pDM_Odm ) { #define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = Array[i]; v2 = Array[i+1]; } while(0) #define COND_ELSE 2 #define COND_ENDIF 3 u4Byte i = 0; u4Byte ArrayLen = sizeof(Array_MP_8812A_MAC_REG)/sizeof(u4Byte); pu4Byte Array = Array_MP_8812A_MAC_REG; ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===> ODM_ReadAndConfig_MP_8812A_MAC_REG\n")); for (i = 0; i < ArrayLen; i += 2 ) { u4Byte v1 = Array[i]; u4Byte v2 = Array[i+1]; // This (offset, data) pair doesn't care the condition. if ( v1 < 0x40000000 ) { odm_ConfigMAC_8812A(pDM_Odm, v1, (u1Byte)v2); continue; } else { // This line is the beginning of branch. BOOLEAN bMatched = TRUE; u1Byte cCond = (u1Byte)((v1 & (BIT29|BIT28)) >> 28); if (cCond == COND_ELSE) { // ELSE, ENDIF bMatched = TRUE; READ_NEXT_PAIR(v1, v2, i); } else if ( ! CheckPositive(pDM_Odm, v1, v2) ) { bMatched = FALSE; READ_NEXT_PAIR(v1, v2, i); READ_NEXT_PAIR(v1, v2, i); } else { READ_NEXT_PAIR(v1, v2, i); if ( ! CheckNegative(pDM_Odm, v1, v2) ) bMatched = FALSE; else bMatched = TRUE; READ_NEXT_PAIR(v1, v2, i); } if ( bMatched == FALSE ) { // Condition isn't matched. Discard the following (offset, data) pairs. while (v1 < 0x40000000 && i < ArrayLen -2) READ_NEXT_PAIR(v1, v2, i); i -= 2; // prevent from for-loop += 2 } else // Configure matched pairs and skip to end of if-else. { while (v1 < 0x40000000 && i < ArrayLen-2) { odm_ConfigMAC_8812A(pDM_Odm, v1, (u1Byte)v2); READ_NEXT_PAIR(v1, v2, i); } // Keeps reading until ENDIF. cCond = (u1Byte)((v1 & (BIT29|BIT28)) >> 28); while (cCond != COND_ENDIF && i < ArrayLen-2) { READ_NEXT_PAIR(v1, v2, i); cCond = (u1Byte)((v1 & (BIT29|BIT28)) >> 28); } } } } } u4Byte ODM_GetVersion_MP_8812A_MAC_REG( ) { return 40; } #endif // end of HWIMG_SUPPORT
gpl-2.0
holyangel/M8-GPE_M
drivers/platform/msm/avtimer.c
20
14718
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/device.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/avtimer.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/wait.h> #include <linux/sched.h> #include <mach/qdsp6v2/apr.h> #include <sound/q6core.h> #define DEVICE_NAME "avtimer" #define TIMEOUT_MS 1000 #define CORE_CLIENT 1 #define TEMP_PORT ((CORE_CLIENT << 8) | 0x0001) #define SSR_WAKETIME 1000 #define Q6_READY_RETRY 250 #define Q6_READY_MAX_RETRIES 40 #define AVCS_CMD_REMOTE_AVTIMER_VOTE_REQUEST 0x00012914 #define AVCS_CMD_RSP_REMOTE_AVTIMER_VOTE_REQUEST 0x00012915 #define AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST 0x00012916 #define AVTIMER_REG_CNT 2 struct adsp_avt_timer { struct apr_hdr hdr; union { char client_name[8]; u32 avtimer_handle; }; } __packed; static int major; struct avtimer_t { struct apr_svc *core_handle_q; struct cdev myc; struct class *avtimer_class; struct mutex avtimer_lock; int avtimer_open_cnt; struct dev_avtimer_data avtimer_pdata; struct delayed_work ssr_dwork; wait_queue_head_t adsp_resp_wait; int enable_timer_resp_recieved; int timer_handle; void __iomem *p_avtimer_msw; void __iomem *p_avtimer_lsw; uint32_t clk_div; atomic_t adsp_ready; int num_retries; }; static struct avtimer_t avtimer; static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv) { uint32_t *payload1; pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%X\n", __func__, data->payload_size, data->opcode); if (!data) { pr_err("%s: Invalid params\n", __func__); return -EINVAL; } switch (data->opcode) { case APR_BASIC_RSP_RESULT:{ if (!data->payload_size) { pr_err("%s: APR_BASIC_RSP_RESULT No Payload ", __func__); return 0; } payload1 = data->payload; switch (payload1[0]) { case AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST: pr_debug("%s: Cmd = TIMER RELEASE status[0x%x]\n", __func__, payload1[1]); break; default: pr_err("Invalid cmd rsp[0x%x][0x%x]\n", payload1[0], payload1[1]); break; } break; } case RESET_EVENTS:{ pr_debug("%s: Reset event received in AV timer\n", __func__); apr_reset(avtimer.core_handle_q); avtimer.core_handle_q = NULL; avtimer.avtimer_open_cnt = 0; atomic_set(&avtimer.adsp_ready, 0); schedule_delayed_work(&avtimer.ssr_dwork, msecs_to_jiffies(SSR_WAKETIME)); break; } case AVCS_CMD_RSP_REMOTE_AVTIMER_VOTE_REQUEST: payload1 = data->payload; pr_debug("%s: RSP_REMOTE_AVTIMER_VOTE_REQUEST handle %x\n", __func__, payload1[0]); avtimer.timer_handle = payload1[0]; avtimer.enable_timer_resp_recieved = 1; wake_up(&avtimer.adsp_resp_wait); break; default: pr_err("%s: Message adspcore svc: %d\n", __func__, data->opcode); break; } return 0; } int avcs_core_open(void) { if (!avtimer.core_handle_q) avtimer.core_handle_q = apr_register("ADSP", "CORE", aprv2_core_fn_q, TEMP_PORT, NULL); pr_debug("%s: Open_q %p\n", __func__, avtimer.core_handle_q); if (!avtimer.core_handle_q) { pr_err("%s: Unable to register CORE\n", __func__); return -EINVAL; } return 0; } EXPORT_SYMBOL(avcs_core_open); static int avcs_core_disable_avtimer(int timerhandle) { int rc = -EINVAL; struct adsp_avt_timer payload; if (!timerhandle) { pr_err("%s: Invalid timer handle\n", __func__); return -EINVAL; } memset(&payload, 0, sizeof(payload)); rc = avcs_core_open(); if (!rc && avtimer.core_handle_q) { payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); payload.hdr.pkt_size = sizeof(struct adsp_avt_timer); payload.hdr.src_svc = avtimer.core_handle_q->id; payload.hdr.src_domain = APR_DOMAIN_APPS; payload.hdr.dest_domain = APR_DOMAIN_ADSP; payload.hdr.dest_svc = APR_SVC_ADSP_CORE; payload.hdr.src_port = TEMP_PORT; payload.hdr.dest_port = TEMP_PORT; payload.hdr.token = CORE_CLIENT; payload.hdr.opcode = AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST; payload.avtimer_handle = timerhandle; pr_debug("%s: disable avtimer opcode %x handle %x\n", __func__, payload.hdr.opcode, payload.avtimer_handle); rc = apr_send_pkt(avtimer.core_handle_q, (uint32_t *)&payload); if (rc < 0) pr_err("%s: Enable AVtimer failed op[0x%x]rc[%d]\n", __func__, payload.hdr.opcode, rc); else rc = 0; } return rc; } static int avcs_core_enable_avtimer(char *client_name) { int rc = -EINVAL, ret = -EINVAL; struct adsp_avt_timer payload; if (!client_name) { pr_err("%s: Invalid params\n", __func__); return -EINVAL; } memset(&payload, 0, sizeof(payload)); rc = avcs_core_open(); if (!rc && avtimer.core_handle_q) { avtimer.enable_timer_resp_recieved = 0; payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); payload.hdr.pkt_size = sizeof(struct adsp_avt_timer); payload.hdr.src_svc = avtimer.core_handle_q->id; payload.hdr.src_domain = APR_DOMAIN_APPS; payload.hdr.dest_domain = APR_DOMAIN_ADSP; payload.hdr.dest_svc = APR_SVC_ADSP_CORE; payload.hdr.src_port = TEMP_PORT; payload.hdr.dest_port = TEMP_PORT; payload.hdr.token = CORE_CLIENT; payload.hdr.opcode = AVCS_CMD_REMOTE_AVTIMER_VOTE_REQUEST; strlcpy(payload.client_name, client_name, sizeof(payload.client_name)); pr_debug("%s: enable avtimer opcode %x client name %s\n", __func__, payload.hdr.opcode, payload.client_name); rc = apr_send_pkt(avtimer.core_handle_q, (uint32_t *)&payload); if (rc < 0) { pr_err("%s: Enable AVtimer failed op[0x%x]rc[%d]\n", __func__, payload.hdr.opcode, rc); goto bail; } else rc = 0; ret = wait_event_timeout(avtimer.adsp_resp_wait, (avtimer.enable_timer_resp_recieved == 1), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout for Enable timer\n", __func__); rc = -ETIMEDOUT; } if (rc) avtimer.timer_handle = 0; } bail: return rc; } int avcs_core_disable_power_collapse(int enable) { int rc = 0; mutex_lock(&avtimer.avtimer_lock); if (enable) { if (avtimer.avtimer_open_cnt) { // Modified by Sumida Y. (2014/06/17) //--- from here ---// // Original Code //avtimer.avtimer_open_cnt++; // Modified Code avtimer.avtimer_open_cnt = 1; //--- up to here ---// pr_debug("%s: opened avtimer open count=%d\n", __func__, avtimer.avtimer_open_cnt); rc = 0; goto done; } rc = avcs_core_enable_avtimer("timer"); if (!rc) { avtimer.avtimer_open_cnt++; atomic_set(&avtimer.adsp_ready, 1); } } else { if (avtimer.avtimer_open_cnt > 0) { // Modified by Sumida Y. (2014/06/17) //--- from here ---// // Original Code //avtimer.avtimer_open_cnt--; // Modified Code avtimer.avtimer_open_cnt = 0; //--- up to here ---// if (!avtimer.avtimer_open_cnt) { rc = avcs_core_disable_avtimer( avtimer.timer_handle); avtimer.timer_handle = 0; } } } done: mutex_unlock(&avtimer.avtimer_lock); return rc; } EXPORT_SYMBOL(avcs_core_disable_power_collapse); static void reset_work(struct work_struct *work) { if (q6core_is_adsp_ready()) { avcs_core_disable_power_collapse(1); avtimer.num_retries = Q6_READY_MAX_RETRIES; return; } pr_debug("%s:Q6 not ready-retry after sometime\n", __func__); if (--avtimer.num_retries > 0) { schedule_delayed_work(&avtimer.ssr_dwork, msecs_to_jiffies(Q6_READY_RETRY)); } else { pr_err("%s: Q6 failed responding after multiple retries\n", __func__); avtimer.num_retries = Q6_READY_MAX_RETRIES; } } int avcs_core_query_timer(uint64_t *avtimer_tick) { uint32_t avtimer_msw = 0, avtimer_lsw = 0; uint32_t res = 0; uint64_t avtimer_tick_temp; if (!atomic_read(&avtimer.adsp_ready)) { pr_debug("%s:In SSR, return\n", __func__); return -ENETRESET; } avtimer_lsw = ioread32(avtimer.p_avtimer_lsw); avtimer_msw = ioread32(avtimer.p_avtimer_msw); avtimer_tick_temp = (uint64_t)((uint64_t)avtimer_msw << 32) | avtimer_lsw; res = do_div(avtimer_tick_temp, avtimer.clk_div); *avtimer_tick = avtimer_tick_temp; pr_debug("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n", __func__, avtimer_msw, avtimer_lsw, *avtimer_tick); return 0; } EXPORT_SYMBOL(avcs_core_query_timer); static int avtimer_open(struct inode *inode, struct file *file) { return avcs_core_disable_power_collapse(1); } static int avtimer_release(struct inode *inode, struct file *file) { return avcs_core_disable_power_collapse(0); } /* * ioctl call provides GET_AVTIMER */ static long avtimer_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { switch (ioctl_num) { case IOCTL_GET_AVTIMER_TICK: { uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0; uint32_t avtimer_msw_2nd = 0; uint64_t avtimer_tick; do { avtimer_msw_1st = ioread32(avtimer.p_avtimer_msw); avtimer_lsw = ioread32(avtimer.p_avtimer_lsw); avtimer_msw_2nd = ioread32(avtimer.p_avtimer_msw); } while (avtimer_msw_1st != avtimer_msw_2nd); avtimer_lsw = avtimer_lsw/avtimer.clk_div; avtimer_tick = ((uint64_t) avtimer_msw_1st << 32) | avtimer_lsw; pr_debug("%s: AV Timer tick: msw: %x, lsw: %x time %llx\n", __func__, avtimer_msw_1st, avtimer_lsw, avtimer_tick); if (copy_to_user((void *) ioctl_param, &avtimer_tick, sizeof(avtimer_tick))) { pr_err("copy_to_user failed\n"); return -EFAULT; } } break; default: pr_err("%s: invalid cmd\n", __func__); return -EINVAL; } return 0; } static const struct file_operations avtimer_fops = { .unlocked_ioctl = avtimer_ioctl, .open = avtimer_open, .release = avtimer_release }; static int dev_avtimer_probe(struct platform_device *pdev) { int result = 0; dev_t dev = MKDEV(major, 0); struct device *device_handle; struct resource *reg_lsb = NULL, *reg_msb = NULL; uint32_t clk_div_val; if (!pdev) { pr_err("%s: Invalid params\n", __func__); return -EINVAL; } reg_lsb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "avtimer_lsb_addr"); if (!reg_lsb) { dev_err(&pdev->dev, "%s: Looking up %s property", "avtimer_lsb_addr", __func__); return -EINVAL; } reg_msb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "avtimer_msb_addr"); if (!reg_msb) { dev_err(&pdev->dev, "%s: Looking up %s property", "avtimer_msb_addr", __func__); return -EINVAL; } INIT_DELAYED_WORK(&avtimer.ssr_dwork, reset_work); avtimer.p_avtimer_lsw = devm_ioremap_nocache(&pdev->dev, reg_lsb->start, resource_size(reg_lsb)); if (!avtimer.p_avtimer_lsw) { dev_err(&pdev->dev, "%s: ioremap failed for lsb avtimer register", __func__); return -ENOMEM; } avtimer.p_avtimer_msw = devm_ioremap_nocache(&pdev->dev, reg_msb->start, resource_size(reg_msb)); if (!avtimer.p_avtimer_msw) { dev_err(&pdev->dev, "%s: ioremap failed for msb avtimer register", __func__); goto unmap; } avtimer.num_retries = Q6_READY_MAX_RETRIES; /* get the device number */ if (major) result = register_chrdev_region(dev, 1, DEVICE_NAME); else { result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME); major = MAJOR(dev); } if (result < 0) { dev_err(&pdev->dev, "%s: Registering avtimer device failed\n", __func__); goto unmap; } avtimer.avtimer_class = class_create(THIS_MODULE, "avtimer"); if (IS_ERR(avtimer.avtimer_class)) { result = PTR_ERR(avtimer.avtimer_class); dev_err(&pdev->dev, "%s: Error creating avtimer class: %d\n", __func__, result); goto unregister_chrdev_region; } cdev_init(&avtimer.myc, &avtimer_fops); result = cdev_add(&avtimer.myc, dev, 1); if (result < 0) { dev_err(&pdev->dev, "%s: Registering file operations failed\n", __func__); goto class_destroy; } device_handle = device_create(avtimer.avtimer_class, NULL, avtimer.myc.dev, NULL, "avtimer"); if (IS_ERR(device_handle)) { result = PTR_ERR(device_handle); pr_err("%s: device_create failed: %d\n", __func__, result); goto class_destroy; } init_waitqueue_head(&avtimer.adsp_resp_wait); mutex_init(&avtimer.avtimer_lock); avtimer.avtimer_open_cnt = 0; pr_debug("%s: Device create done for avtimer major=%d\n", __func__, major); if (of_property_read_u32(pdev->dev.of_node, "qcom,clk_div", &clk_div_val)) avtimer.clk_div = 1; else avtimer.clk_div = clk_div_val; pr_debug("avtimer.clk_div = %d\n", avtimer.clk_div); return 0; class_destroy: class_destroy(avtimer.avtimer_class); unregister_chrdev_region: unregister_chrdev_region(MKDEV(major, 0), 1); unmap: if (avtimer.p_avtimer_lsw) devm_iounmap(&pdev->dev, avtimer.p_avtimer_lsw); if (avtimer.p_avtimer_msw) devm_iounmap(&pdev->dev, avtimer.p_avtimer_msw); avtimer.p_avtimer_lsw = NULL; avtimer.p_avtimer_msw = NULL; return result; } static int __devexit dev_avtimer_remove(struct platform_device *pdev) { pr_debug("%s: dev_avtimer_remove\n", __func__); if (avtimer.p_avtimer_lsw) devm_iounmap(&pdev->dev, avtimer.p_avtimer_lsw); if (avtimer.p_avtimer_msw) devm_iounmap(&pdev->dev, avtimer.p_avtimer_msw); device_destroy(avtimer.avtimer_class, avtimer.myc.dev); cdev_del(&avtimer.myc); class_destroy(avtimer.avtimer_class); unregister_chrdev_region(MKDEV(major, 0), 1); return 0; } static const struct of_device_id avtimer_machine_of_match[] = { { .compatible = "qcom,avtimer", }, {}, }; static struct platform_driver dev_avtimer_driver = { .probe = dev_avtimer_probe, .remove = dev_avtimer_remove, .driver = { .name = "dev_avtimer", .of_match_table = avtimer_machine_of_match, }, }; static int __init avtimer_init(void) { s32 rc; rc = platform_driver_register(&dev_avtimer_driver); if (IS_ERR_VALUE(rc)) { pr_err("%s: platform_driver_register failed\n", __func__); goto error_platform_driver; } pr_debug("%s: dev_avtimer_init : done\n", __func__); return 0; error_platform_driver: pr_err("%s: encounterd error\n", __func__); return rc; } static void __exit avtimer_exit(void) { pr_debug("%s: avtimer_exit\n", __func__); platform_driver_unregister(&dev_avtimer_driver); } module_init(avtimer_init); module_exit(avtimer_exit); MODULE_DESCRIPTION("avtimer driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
dorapanda/qemu-2.0.0-with-fm3
roms/ipxe/src/drivers/net/via-velocity.c
20
54582
/************************************************************************** * via-velocity.c: Etherboot device driver for the VIA 6120 Gigabit * Changes for Etherboot port: * Copyright (c) 2006 by Timothy Legge <tlegge@rogers.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * This driver is based on: * via-velocity.c: VIA Velocity VT6120, VT6122 Ethernet driver * The changes are (c) Copyright 2004, Red Hat Inc. * <alan@redhat.com> * Additional fixes and clean up: Francois Romieu * * Original code: * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * Author: Chuang Liang-Shing, AJ Jiang * * Linux Driver Version 2.6.15.4 * * REVISION HISTORY: * ================ * * v1.0 03-06-2006 timlegge Initial port of Linux driver * * Indent Options: indent -kr -i8 *************************************************************************/ FILE_LICENCE ( GPL2_OR_LATER ); #include "etherboot.h" #include "nic.h" #include <ipxe/pci.h> #include <ipxe/ethernet.h> #include "via-velocity.h" typedef int pci_power_t; #define PCI_D0 ((int) 0) #define PCI_D1 ((int) 1) #define PCI_D2 ((int) 2) #define PCI_D3hot ((int) 3) #define PCI_D3cold ((int) 4) #define PCI_POWER_ERROR ((int) -1) /* Condensed operations for readability. */ #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr)) #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr)) //FIXME: Move to pci.c int pci_set_power_state(struct pci_device *dev, int state); /* FIXME: Move BASE to the private structure */ static u32 BASE; /* NIC specific static variables go here */ #define VELOCITY_PARAM(N,D) \ static const int N[MAX_UNITS]=OPTION_DEFAULT; /* MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\ MODULE_PARM_DESC(N, D); */ VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); #define VLAN_ID_MIN 0 #define VLAN_ID_MAX 4095 #define VLAN_ID_DEF 0 /* VID_setting[] is used for setting the VID of NIC. 0: default VID. 1-4094: other VIDs. */ VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); #define RX_THRESH_MIN 0 #define RX_THRESH_MAX 3 #define RX_THRESH_DEF 0 /* rx_thresh[] is used for controlling the receive fifo threshold. 0: indicate the rxfifo threshold is 128 bytes. 1: indicate the rxfifo threshold is 512 bytes. 2: indicate the rxfifo threshold is 1024 bytes. 3: indicate the rxfifo threshold is store & forward. */ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); #define DMA_LENGTH_MIN 0 #define DMA_LENGTH_MAX 7 #define DMA_LENGTH_DEF 0 /* DMA_length[] is used for controlling the DMA length 0: 8 DWORDs 1: 16 DWORDs 2: 32 DWORDs 3: 64 DWORDs 4: 128 DWORDs 5: 256 DWORDs 6: SF(flush till emply) 7: SF(flush till emply) */ VELOCITY_PARAM(DMA_length, "DMA length"); #define TAGGING_DEF 0 /* enable_tagging[] is used for enabling 802.1Q VID tagging. 0: disable VID seeting(default). 1: enable VID setting. */ VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); #define IP_ALIG_DEF 0 /* IP_byte_align[] is used for IP header DWORD byte aligned 0: indicate the IP header won't be DWORD byte aligned.(Default) . 1: indicate the IP header will be DWORD byte aligned. In some environment, the IP header should be DWORD byte aligned, or the packet will be droped when we receive it. (eg: IPVS) */ VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); #define TX_CSUM_DEF 1 /* txcsum_offload[] is used for setting the checksum offload ability of NIC. (We only support RX checksum offload now) 0: disable csum_offload[checksum offload 1: enable checksum offload. (Default) */ VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload"); #define FLOW_CNTL_DEF 1 #define FLOW_CNTL_MIN 1 #define FLOW_CNTL_MAX 5 /* flow_control[] is used for setting the flow control ability of NIC. 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. 2: enable TX flow control. 3: enable RX flow control. 4: enable RX/TX flow control. 5: disable */ VELOCITY_PARAM(flow_control, "Enable flow control ability"); #define MED_LNK_DEF 0 #define MED_LNK_MIN 0 #define MED_LNK_MAX 4 /* speed_duplex[] is used for setting the speed and duplex mode of NIC. 0: indicate autonegotiation for both speed and duplex mode 1: indicate 100Mbps half duplex mode 2: indicate 100Mbps full duplex mode 3: indicate 10Mbps half duplex mode 4: indicate 10Mbps full duplex mode Note: if EEPROM have been set to the force mode, this option is ignored by driver. */ VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); #define VAL_PKT_LEN_DEF 0 /* ValPktLen[] is used for setting the checksum offload ability of NIC. 0: Receive frame with invalid layer 2 length (Default) 1: Drop frame with invalid layer 2 length */ VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); #define WOL_OPT_DEF 0 #define WOL_OPT_MIN 0 #define WOL_OPT_MAX 7 /* wol_opts[] is used for controlling wake on lan behavior. 0: Wake up if recevied a magic packet. (Default) 1: Wake up if link status is on/off. 2: Wake up if recevied an arp packet. 4: Wake up if recevied any unicast packet. Those value can be sumed up to support more than one option. */ VELOCITY_PARAM(wol_opts, "Wake On Lan options"); #define INT_WORKS_DEF 20 #define INT_WORKS_MIN 10 #define INT_WORKS_MAX 64 VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); /* The descriptors for this card are required to be aligned on 64 byte boundaries. As the align attribute does not guarantee alignment greater than the alignment of the start address (which for Etherboot is 16 bytes of alignment) it requires some extra steps. Add 64 to the size of the array and the init_ring adjusts the alignment */ /* Define the TX Descriptor */ static u8 tx_ring[TX_DESC_DEF * sizeof(struct tx_desc) + 64]; /* Create a static buffer of size PKT_BUF_SZ for each TX Descriptor. All descriptors point to a part of this buffer */ static u8 txb[(TX_DESC_DEF * PKT_BUF_SZ) + 64]; /* Define the RX Descriptor */ static u8 rx_ring[RX_DESC_DEF * sizeof(struct rx_desc) + 64]; /* Create a static buffer of size PKT_BUF_SZ for each RX Descriptor All descriptors point to a part of this buffer */ static u8 rxb[(RX_DESC_DEF * PKT_BUF_SZ) + 64]; static void velocity_init_info(struct pci_device *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); static int velocity_get_pci_info(struct velocity_info *, struct pci_device *pdev); static int velocity_open(struct nic *nic, struct pci_device *pci); static int velocity_soft_reset(struct velocity_info *vptr); static void velocity_init_cam_filter(struct velocity_info *vptr); static void mii_init(struct velocity_info *vptr, u32 mii_status); static u32 velocity_get_opt_media_mode(struct velocity_info *vptr); static void velocity_print_link_status(struct velocity_info *vptr); static void safe_disable_mii_autopoll(struct mac_regs *regs); static void enable_flow_control_ability(struct velocity_info *vptr); static void enable_mii_autopoll(struct mac_regs *regs); static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata); static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data); static u32 mii_check_media_mode(struct mac_regs *regs); static u32 check_connection_type(struct mac_regs *regs); static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); /* * Internal board variants. At the moment we have only one */ static struct velocity_info_tbl chip_info_table[] = { {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL}, {0, NULL, 0, 0, 0} }; /** * velocity_set_int_opt - parser for integer options * @opt: pointer to option value * @val: value the user requested (or -1 for default) * @min: lowest value allowed * @max: highest value allowed * @def: default value * @name: property name * @dev: device name * * Set an integer property in the module options. This function does * all the verification and checking as well as reporting so that * we don't duplicate code for each option. */ static void velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname) { if (val == -1) { printf("%s: set value of parameter %s to %d\n", devname, name, def); *opt = def; } else if (val < min || val > max) { printf ("%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", devname, name, min, max); *opt = def; } else { printf("%s: set value of parameter %s to %d\n", devname, name, val); *opt = val; } } /** * velocity_set_bool_opt - parser for boolean options * @opt: pointer to option value * @val: value the user requested (or -1 for default) * @def: default value (yes/no) * @flag: numeric value to set for true. * @name: property name * @dev: device name * * Set a boolean property in the module options. This function does * all the verification and checking as well as reporting so that * we don't duplicate code for each option. */ static void velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, const char *devname) { (*opt) &= (~flag); if (val == -1) { printf("%s: set parameter %s to %s\n", devname, name, def ? "TRUE" : "FALSE"); *opt |= (def ? flag : 0); } else if (val < 0 || val > 1) { printf ("%s: the value of parameter %s is invalid, the valid range is (0-1)\n", devname, name); *opt |= (def ? flag : 0); } else { printf("%s: set parameter %s to %s\n", devname, name, val ? "TRUE" : "FALSE"); *opt |= (val ? flag : 0); } } /** * velocity_get_options - set options on device * @opts: option structure for the device * @index: index of option to use in module options array * @devname: device name * * Turn the module and command options into a single structure * for the current device */ static void velocity_get_options(struct velocity_opt *opts, int index, const char *devname) { /* FIXME Do the options need to be configurable */ velocity_set_int_opt(&opts->rx_thresh, -1, RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname); velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname); velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); velocity_set_int_opt((void *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname); opts->numrx = (opts->numrx & ~3); } /** * velocity_init_cam_filter - initialise CAM * @vptr: velocity to program * * Initialize the content addressable memory used for filters. Load * appropriately according to the presence of VLAN */ static void velocity_init_cam_filter(struct velocity_info *vptr) { struct mac_regs *regs = vptr->mac_regs; /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG); WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG); /* Disable all CAMs */ memset(vptr->vCAMmask, 0, sizeof(u8) * 8); memset(vptr->mCAMmask, 0, sizeof(u8) * 8); mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); /* Enable first VCAM */ if (vptr->flags & VELOCITY_FLAGS_TAGGING) { /* If Tagging option is enabled and VLAN ID is not zero, then turn on MCFG_RTGOPT also */ if (vptr->options.vid != 0) WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG); mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM); vptr->vCAMmask[0] |= 1; mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); } else { u16 temp = 0; mac_set_cam(regs, 0, (u8 *) & temp, VELOCITY_VLAN_ID_CAM); temp = 1; mac_set_cam_mask(regs, (u8 *) & temp, VELOCITY_VLAN_ID_CAM); } } static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) { struct mac_regs *regs = vptr->mac_regs; int avail, dirty, unusable; /* * RD number must be equal to 4X per hardware spec * (programming guide rev 1.20, p.13) */ if (vptr->rd_filled < 4) return; wmb(); unusable = vptr->rd_filled & 0x0003; dirty = vptr->rd_dirty - unusable; for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; // printf("return dirty: %d\n", dirty); vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; } writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); vptr->rd_filled = unusable; } static int velocity_rx_refill(struct velocity_info *vptr) { int dirty = vptr->rd_dirty, done = 0, ret = 0; // printf("rx_refill - rd_curr = %d, dirty = %d\n", vptr->rd_curr, dirty); do { struct rx_desc *rd = vptr->rd_ring + dirty; /* Fine for an all zero Rx desc at init time as well */ if (rd->rdesc0.owner == OWNED_BY_NIC) break; // printf("rx_refill - after owner %d\n", dirty); rd->inten = 1; rd->pa_high = 0; rd->rdesc0.len = cpu_to_le32(vptr->rx_buf_sz);; done++; dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; } while (dirty != vptr->rd_curr); if (done) { // printf("\nGive Back Desc\n"); vptr->rd_dirty = dirty; vptr->rd_filled += done; velocity_give_many_rx_descs(vptr); } return ret; } extern void hex_dump(const char *data, const unsigned int len); /************************************************************************** POLL - Wait for a frame ***************************************************************************/ static int velocity_poll(struct nic *nic, int retrieve) { /* Work out whether or not there's an ethernet packet ready to * read. Return 0 if not. */ int rd_curr = vptr->rd_curr % RX_DESC_DEF; struct rx_desc *rd = &(vptr->rd_ring[rd_curr]); if (rd->rdesc0.owner == OWNED_BY_NIC) return 0; rmb(); if ( ! retrieve ) return 1; /* * Don't drop CE or RL error frame although RXOK is off */ if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { nic->packetlen = rd->rdesc0.len; // ptr->rxb + (rd_curr * PKT_BUF_SZ) memcpy(nic->packet, bus_to_virt(rd->pa_low), nic->packetlen - 4); vptr->rd_curr++; vptr->rd_curr = vptr->rd_curr % RX_DESC_DEF; velocity_rx_refill(vptr); return 1; /* Remove this line once this method is implemented */ } return 0; } #define TX_TIMEOUT (1000); /************************************************************************** TRANSMIT - Transmit a frame ***************************************************************************/ static void velocity_transmit(struct nic *nic, const char *dest, /* Destination */ unsigned int type, /* Type */ unsigned int size, /* size */ const char *packet) { /* Packet */ u16 nstype; u32 to; u8 *ptxb; unsigned int pktlen; struct tx_desc *td_ptr; int entry = vptr->td_curr % TX_DESC_DEF; td_ptr = &(vptr->td_rings[entry]); /* point to the current txb incase multiple tx_rings are used */ ptxb = vptr->txb + (entry * PKT_BUF_SZ); memcpy(ptxb, dest, ETH_ALEN); /* Destination */ memcpy(ptxb + ETH_ALEN, nic->node_addr, ETH_ALEN); /* Source */ nstype = htons((u16) type); /* Type */ memcpy(ptxb + 2 * ETH_ALEN, (u8 *) & nstype, 2); /* Type */ memcpy(ptxb + ETH_HLEN, packet, size); td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; td_ptr->tdesc1.TCR = TCR0_TIC; td_ptr->td_buf[0].queue = 0; size += ETH_HLEN; while (size < ETH_ZLEN) /* pad to min length */ ptxb[size++] = '\0'; if (size < ETH_ZLEN) { // printf("Padd that packet\n"); pktlen = ETH_ZLEN; // memcpy(ptxb, skb->data, skb->len); memset(ptxb + size, 0, ETH_ZLEN - size); vptr->td_rings[entry].tdesc0.pktsize = pktlen; vptr->td_rings[entry].td_buf[0].pa_low = virt_to_bus(ptxb); vptr->td_rings[entry].td_buf[0].pa_high &= cpu_to_le32(0xffff0000UL); vptr->td_rings[entry].td_buf[0].bufsize = vptr->td_rings[entry].tdesc0.pktsize; vptr->td_rings[entry].tdesc1.CMDZ = 2; } else { // printf("Correct size packet\n"); td_ptr->tdesc0.pktsize = size; td_ptr->td_buf[0].pa_low = virt_to_bus(ptxb); td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; // tdinfo->nskb_dma = 1; td_ptr->tdesc1.CMDZ = 2; } if (vptr->flags & VELOCITY_FLAGS_TAGGING) { td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); td_ptr->tdesc1.pqinf.priority = 0; td_ptr->tdesc1.pqinf.CFI = 0; td_ptr->tdesc1.TCR |= TCR0_VETAG; } vptr->td_curr = (entry + 1); { int prev = entry - 1; if (prev < 0) prev = TX_DESC_DEF - 1; td_ptr->tdesc0.owner |= OWNED_BY_NIC; td_ptr = &(vptr->td_rings[prev]); td_ptr->td_buf[0].queue = 1; mac_tx_queue_wake(vptr->mac_regs, 0); } to = currticks() + TX_TIMEOUT; while ((td_ptr->tdesc0.owner & OWNED_BY_NIC) && (currticks() < to)); /* wait */ if (currticks() >= to) { printf("TX Time Out"); } } /************************************************************************** DISABLE - Turn off ethernet interface ***************************************************************************/ static void velocity_disable(struct nic *nic __unused) { /* put the card in its initial state */ /* This function serves 3 purposes. * This disables DMA and interrupts so we don't receive * unexpected packets or interrupts from the card after * etherboot has finished. * This frees resources so etherboot may use * this driver on another interface * This allows etherboot to reinitialize the interface * if something is something goes wrong. */ struct mac_regs *regs = vptr->mac_regs; mac_disable_int(regs); writel(CR0_STOP, &regs->CR0Set); writew(0xFFFF, &regs->TDCSRClr); writeb(0xFF, &regs->RDCSRClr); safe_disable_mii_autopoll(regs); mac_clear_isr(regs); /* Power down the chip */ // pci_set_power_state(vptr->pdev, PCI_D3hot); vptr->flags &= (~VELOCITY_FLAGS_OPENED); } /************************************************************************** IRQ - handle interrupts ***************************************************************************/ static void velocity_irq(struct nic *nic __unused, irq_action_t action) { /* This routine is somewhat optional. Etherboot itself * doesn't use interrupts, but they are required under some * circumstances when we're acting as a PXE stack. * * If you don't implement this routine, the only effect will * be that your driver cannot be used via Etherboot's UNDI * API. This won't affect programs that use only the UDP * portion of the PXE API, such as pxelinux. */ switch (action) { case DISABLE: case ENABLE: /* Set receive interrupt enabled/disabled state */ /* outb ( action == ENABLE ? IntrMaskEnabled : IntrMaskDisabled, nic->ioaddr + IntrMaskRegister ); */ break; case FORCE: /* Force NIC to generate a receive interrupt */ /* outb ( ForceInterrupt, nic->ioaddr + IntrForceRegister ); */ break; } } static struct nic_operations velocity_operations = { .connect = dummy_connect, .poll = velocity_poll, .transmit = velocity_transmit, .irq = velocity_irq, }; /************************************************************************** PROBE - Look for an adapter, this routine's visible to the outside ***************************************************************************/ static int velocity_probe( struct nic *nic, struct pci_device *pci) { int ret, i; struct mac_regs *regs; printf("via-velocity.c: Found %s Vendor=0x%hX Device=0x%hX\n", pci->id->name, pci->vendor, pci->device); /* point to private storage */ vptr = &vptx; info = chip_info_table; velocity_init_info(pci, vptr, info); //FIXME: pci_enable_device(pci); //FIXME: pci_set_power_state(pci, PCI_D0); ret = velocity_get_pci_info(vptr, pci); if (ret < 0) { printf("Failed to find PCI device.\n"); return 0; } regs = ioremap(vptr->memaddr, vptr->io_size); if (regs == NULL) { printf("Unable to remap io\n"); return 0; } vptr->mac_regs = regs; BASE = vptr->ioaddr; printf("Chip ID: %hX\n", vptr->chip_id); for (i = 0; i < 6; i++) nic->node_addr[i] = readb(&regs->PAR[i]); DBG ( "%s: %s at ioaddr %#hX\n", pci->id->name, eth_ntoa ( nic->node_addr ), (unsigned int) BASE ); velocity_get_options(&vptr->options, 0, pci->id->name); /* * Mask out the options cannot be set to the chip */ vptr->options.flags &= 0x00FFFFFFUL; //info->flags = 0x00FFFFFFUL; /* * Enable the chip specified capbilities */ vptr->flags = vptr->options. flags | (0x00FFFFFFUL /*info->flags */ & 0xFF000000UL); vptr->wol_opts = vptr->options.wol_opts; vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { printf("features missing\n"); } /* and leave the chip powered down */ // FIXME: pci_set_power_state(pci, PCI_D3hot); check_connection_type(vptr->mac_regs); velocity_open(nic, pci); /* store NIC parameters */ nic->nic_op = &velocity_operations; return 1; } //#define IORESOURCE_IO 0x00000100 /* Resource type */ /** * velocity_init_info - init private data * @pdev: PCI device * @vptr: Velocity info * @info: Board type * * Set up the initial velocity_info struct for the device that has been * discovered. */ static void velocity_init_info(struct pci_device *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info) { memset(vptr, 0, sizeof(struct velocity_info)); vptr->pdev = pdev; vptr->chip_id = info->chip_id; vptr->io_size = info->io_size; vptr->num_txq = info->txqueue; vptr->multicast_limit = MCAM_SIZE; printf ("chip_id: 0x%hX, io_size: %d, num_txq %d, multicast_limit: %d\n", vptr->chip_id, (unsigned int) vptr->io_size, vptr->num_txq, vptr->multicast_limit); printf("Name: %s\n", info->name); // spin_lock_init(&vptr->lock); // INIT_LIST_HEAD(&vptr->list); } /** * velocity_get_pci_info - retrieve PCI info for device * @vptr: velocity device * @pdev: PCI device it matches * * Retrieve the PCI configuration space data that interests us from * the kernel PCI layer */ #define IORESOURCE_IO 0x00000100 /* Resource type */ #define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ #define IORESOURCE_MEM 0x00000200 #define BAR_0 0 #define BAR_1 1 #define BAR_5 5 #define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ #define PCI_BASE_ADDRESS_SPACE_IO 0x01 #define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 #define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 #define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ #define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ #define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ #define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ //#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) // #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) unsigned long pci_resource_flags(struct pci_device *pdev, unsigned int bar) { uint32_t l, sz; unsigned long flags = 0; pci_read_config_dword(pdev, bar, &l); pci_write_config_dword(pdev, bar, ~0); pci_read_config_dword(pdev, bar, &sz); pci_write_config_dword(pdev, bar, l); if (!sz || sz == 0xffffffff) printf("Weird size\n"); if (l == 0xffffffff) l = 0; if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { /* sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK); if (!sz) continue; res->start = l & PCI_BASE_ADDRESS_MEM_MASK; */ flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; printf("Memory Resource\n"); } else { // sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); /// if (!sz) /// continue; // res->start = l & PCI_BASE_ADDRESS_IO_MASK; flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; printf("I/O Resource\n"); } if (flags & PCI_BASE_ADDRESS_SPACE_IO) { printf("Why is it here\n"); flags |= IORESOURCE_IO; } else { printf("here\n"); //flags &= ~IORESOURCE_IO; } if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; return flags; } static int velocity_get_pci_info(struct velocity_info *vptr, struct pci_device *pdev) { if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) { printf("DEBUG: pci_read_config_byte failed\n"); return -1; } adjust_pci_device(pdev); vptr->ioaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_0); vptr->memaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_1); printf("Looking for I/O Resource - Found:"); if (! (pci_resource_flags(pdev, PCI_BASE_ADDRESS_0) & IORESOURCE_IO)) { printf ("DEBUG: region #0 is not an I/O resource, aborting.\n"); return -1; } printf("Looking for Memory Resource - Found:"); if ((pci_resource_flags(pdev, PCI_BASE_ADDRESS_1) & IORESOURCE_IO)) { printf("DEBUG: region #1 is an I/O resource, aborting.\n"); return -1; } if (pci_bar_size(pdev, PCI_BASE_ADDRESS_1) < 256) { printf("DEBUG: region #1 is too small.\n"); return -1; } vptr->pdev = pdev; return 0; } /** * velocity_print_link_status - link status reporting * @vptr: velocity to report on * * Turn the link status of the velocity card into a kernel log * description of the new link state, detailing speed and duplex * status */ static void velocity_print_link_status(struct velocity_info *vptr) { if (vptr->mii_status & VELOCITY_LINK_FAIL) { printf("failed to detect cable link\n"); } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { printf("Link autonegation"); if (vptr->mii_status & VELOCITY_SPEED_1000) printf(" speed 1000M bps"); else if (vptr->mii_status & VELOCITY_SPEED_100) printf(" speed 100M bps"); else printf(" speed 10M bps"); if (vptr->mii_status & VELOCITY_DUPLEX_FULL) printf(" full duplex\n"); else printf(" half duplex\n"); } else { printf("Link forced"); switch (vptr->options.spd_dpx) { case SPD_DPX_100_HALF: printf(" speed 100M bps half duplex\n"); break; case SPD_DPX_100_FULL: printf(" speed 100M bps full duplex\n"); break; case SPD_DPX_10_HALF: printf(" speed 10M bps half duplex\n"); break; case SPD_DPX_10_FULL: printf(" speed 10M bps full duplex\n"); break; default: break; } } } /** * velocity_rx_reset - handle a receive reset * @vptr: velocity we are resetting * * Reset the ownership and status for the receive ring side. * Hand all the receive queue to the NIC. */ static void velocity_rx_reset(struct velocity_info *vptr) { struct mac_regs *regs = vptr->mac_regs; int i; //ptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; /* * Init state, all RD entries belong to the NIC */ for (i = 0; i < vptr->options.numrx; ++i) vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; writew(RX_DESC_DEF, &regs->RBRDU); writel(virt_to_le32desc(vptr->rd_ring), &regs->RDBaseLo); writew(0, &regs->RDIdx); writew(RX_DESC_DEF - 1, &regs->RDCSize); } /** * velocity_init_registers - initialise MAC registers * @vptr: velocity to init * @type: type of initialisation (hot or cold) * * Initialise the MAC on a reset or on first set up on the * hardware. */ static void velocity_init_registers(struct nic *nic, struct velocity_info *vptr, enum velocity_init_type type) { struct mac_regs *regs = vptr->mac_regs; int i, mii_status; mac_wol_reset(regs); switch (type) { case VELOCITY_INIT_RESET: case VELOCITY_INIT_WOL: //netif_stop_queue(vptr->dev); /* * Reset RX to prevent RX pointer not on the 4X location */ velocity_rx_reset(vptr); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); mii_status = velocity_get_opt_media_mode(vptr); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) printf("Link Failed\n"); // netif_wake_queue(vptr->dev); } enable_flow_control_ability(vptr); mac_clear_isr(regs); writel(CR0_STOP, &regs->CR0Clr); //writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set); break; case VELOCITY_INIT_COLD: default: /* * Do reset */ velocity_soft_reset(vptr); mdelay(5); mac_eeprom_reload(regs); for (i = 0; i < 6; i++) { writeb(nic->node_addr[i], &(regs->PAR[i])); } /* * clear Pre_ACPI bit. */ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); mac_set_rx_thresh(regs, vptr->options.rx_thresh); mac_set_dma_length(regs, vptr->options.DMA_length); writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet); /* * Back off algorithm use original IEEE standard */ BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB); /* * Init CAM filter */ velocity_init_cam_filter(vptr); /* * Set packet filter: Receive directed and broadcast address */ //FIXME Multicast velocity_set_multi(nic); /* * Enable MII auto-polling */ enable_mii_autopoll(regs); vptr->int_mask = INT_MASK_DEF; writel(virt_to_le32desc(vptr->rd_ring), &regs->RDBaseLo); writew(vptr->options.numrx - 1, &regs->RDCSize); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); writew(vptr->options.numtx - 1, &regs->TDCSize); // for (i = 0; i < vptr->num_txq; i++) { writel(virt_to_le32desc(vptr->td_rings), &(regs->TDBaseLo[0])); mac_tx_queue_run(regs, 0); // } init_flow_control_register(vptr); writel(CR0_STOP, &regs->CR0Clr); writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set); mii_status = velocity_get_opt_media_mode(vptr); // netif_stop_queue(vptr->dev); mii_init(vptr, mii_status); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) printf("Link Faaailll\n"); // netif_wake_queue(vptr->dev); } enable_flow_control_ability(vptr); mac_hw_mibs_init(regs); mac_write_int_mask(vptr->int_mask, regs); mac_clear_isr(regs); } velocity_print_link_status(vptr); } /** * velocity_soft_reset - soft reset * @vptr: velocity to reset * * Kick off a soft reset of the velocity adapter and then poll * until the reset sequence has completed before returning. */ static int velocity_soft_reset(struct velocity_info *vptr) { struct mac_regs *regs = vptr->mac_regs; unsigned int i = 0; writel(CR0_SFRST, &regs->CR0Set); for (i = 0; i < W_MAX_TIMEOUT; i++) { udelay(5); if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set)) break; } if (i == W_MAX_TIMEOUT) { writel(CR0_FORSRST, &regs->CR0Set); /* FIXME: PCI POSTING */ /* delay 2ms */ mdelay(2); } return 0; } /** * velocity_init_rings - set up DMA rings * @vptr: Velocity to set up * * Allocate PCI mapped DMA rings for the receive and transmit layer * to use. */ static int velocity_init_rings(struct velocity_info *vptr) { int idx; vptr->rd_curr = 0; vptr->td_curr = 0; memset(vptr->td_rings, 0, TX_DESC_DEF * sizeof(struct tx_desc)); memset(vptr->rd_ring, 0, RX_DESC_DEF * sizeof(struct rx_desc)); // memset(vptr->tx_buffs, 0, TX_DESC_DEF * PKT_BUF_SZ); for (idx = 0; idx < RX_DESC_DEF; idx++) { vptr->rd_ring[idx].rdesc0.RSR = 0; vptr->rd_ring[idx].rdesc0.len = 0; vptr->rd_ring[idx].rdesc0.reserved = 0; vptr->rd_ring[idx].rdesc0.owner = 0; vptr->rd_ring[idx].len = cpu_to_le32(vptr->rx_buf_sz); vptr->rd_ring[idx].inten = 1; vptr->rd_ring[idx].pa_low = virt_to_bus(vptr->rxb + (RX_DESC_DEF * idx)); vptr->rd_ring[idx].pa_high = 0; vptr->rd_ring[idx].rdesc0.owner = OWNED_BY_NIC; } /* for (i = 0; idx < TX_DESC_DEF; idx++ ) { vptr->td_rings[idx].tdesc1.TCPLS = TCPLS_NORMAL; vptr->td_rings[idx].tdesc1.TCR = TCR0_TIC; vptr->td_rings[idx].td_buf[0].queue = 0; vptr->td_rings[idx].tdesc0.owner = ~OWNED_BY_NIC; vptr->td_rings[idx].tdesc0.pktsize = 0; vptr->td_rings[idx].td_buf[0].pa_low = cpu_to_le32(virt_to_bus(vptr->txb + (idx * PKT_BUF_SZ))); vptr->td_rings[idx].td_buf[0].pa_high = 0; vptr->td_rings[idx].td_buf[0].bufsize = 0; vptr->td_rings[idx].tdesc1.CMDZ = 2; } */ return 0; } /** * velocity_open - interface activation callback * @dev: network layer device to open * * Called when the network layer brings the interface up. Returns * a negative posix error code on failure, or zero on success. * * All the ring allocation and set up is done on open for this * adapter to minimise memory usage when inactive */ #define PCI_BYTE_REG_BITS_ON(x,i,p) do{\ u8 byReg;\ pci_read_config_byte((p), (i), &(byReg));\ (byReg) |= (x);\ pci_write_config_byte((p), (i), (byReg));\ } while (0) // // Registers in the PCI configuration space // #define PCI_REG_COMMAND 0x04 // #define PCI_REG_MODE0 0x60 // #define PCI_REG_MODE1 0x61 // #define PCI_REG_MODE2 0x62 // #define PCI_REG_MODE3 0x63 // #define PCI_REG_DELAY_TIMER 0x64 // // Bits in the (MODE2, 0x62) register // #define MODE2_PCEROPT 0x80 // take PCI bus ERror as a fatal and shutdown from software control #define MODE2_TXQ16 0x40 // TX write-back Queue control. 0->32 entries available in Tx write-back queue, 1->16 entries #define MODE2_TXPOST 0x08 // (Not support in VT3119) #define MODE2_AUTOOPT 0x04 // (VT3119 GHCI without such behavior) #define MODE2_MODE10T 0x02 // used to control tx Threshold for 10M case #define MODE2_TCPLSOPT 0x01 // TCP large send field update disable, hardware will not update related fields, leave it to software. // // Bits in the MODE3 register // #define MODE3_MIION 0x04 // MII symbol codine error detect enable ?? // Bits in the (COMMAND, 0x04) register #define COMMAND_BUSM 0x04 #define COMMAND_WAIT 0x80 static int velocity_open(struct nic *nic, struct pci_device *pci __unused) { u8 diff; u32 TxPhyAddr, RxPhyAddr; u32 TxBufPhyAddr, RxBufPhyAddr; vptr->TxDescArrays = tx_ring; if (vptr->TxDescArrays == 0) printf("Allot Error"); /* Tx Descriptor needs 64 bytes alignment; */ TxPhyAddr = virt_to_bus(vptr->TxDescArrays); printf("Unaligned Address : %X\n", TxPhyAddr); diff = 64 - (TxPhyAddr - ((TxPhyAddr >> 6) << 6)); TxPhyAddr += diff; vptr->td_rings = (struct tx_desc *) (vptr->TxDescArrays + diff); printf("Aligned Address: %lX\n", virt_to_bus(vptr->td_rings)); vptr->tx_buffs = txb; /* Rx Buffer needs 64 bytes alignment; */ TxBufPhyAddr = virt_to_bus(vptr->tx_buffs); diff = 64 - (TxBufPhyAddr - ((TxBufPhyAddr >> 6) << 6)); TxBufPhyAddr += diff; vptr->txb = (unsigned char *) (vptr->tx_buffs + diff); vptr->RxDescArrays = rx_ring; /* Rx Descriptor needs 64 bytes alignment; */ RxPhyAddr = virt_to_bus(vptr->RxDescArrays); diff = 64 - (RxPhyAddr - ((RxPhyAddr >> 6) << 6)); RxPhyAddr += diff; vptr->rd_ring = (struct rx_desc *) (vptr->RxDescArrays + diff); vptr->rx_buffs = rxb; /* Rx Buffer needs 64 bytes alignment; */ RxBufPhyAddr = virt_to_bus(vptr->rx_buffs); diff = 64 - (RxBufPhyAddr - ((RxBufPhyAddr >> 6) << 6)); RxBufPhyAddr += diff; vptr->rxb = (unsigned char *) (vptr->rx_buffs + diff); if (vptr->RxDescArrays == NULL || vptr->RxDescArrays == NULL) { printf("Allocate tx_ring or rd_ring failed\n"); return 0; } vptr->rx_buf_sz = PKT_BUF_SZ; /* // turn this on to avoid retry forever PCI_BYTE_REG_BITS_ON(MODE2_PCEROPT, PCI_REG_MODE2, pci); // for some legacy BIOS and OS don't open BusM // bit in PCI configuration space. So, turn it on. PCI_BYTE_REG_BITS_ON(COMMAND_BUSM, PCI_REG_COMMAND, pci); // turn this on to detect MII coding error PCI_BYTE_REG_BITS_ON(MODE3_MIION, PCI_REG_MODE3, pci); */ velocity_init_rings(vptr); /* Ensure chip is running */ //FIXME: pci_set_power_state(vptr->pdev, PCI_D0); velocity_init_registers(nic, vptr, VELOCITY_INIT_COLD); mac_write_int_mask(0, vptr->mac_regs); // _int(vptr->mac_regs); //mac_enable_int(vptr->mac_regs); vptr->flags |= VELOCITY_FLAGS_OPENED; return 1; } /* * MII access , media link mode setting functions */ /** * mii_init - set up MII * @vptr: velocity adapter * @mii_status: links tatus * * Set up the PHY for the current link state. */ static void mii_init(struct velocity_info *vptr, u32 mii_status __unused) { u16 BMCR; switch (PHYID_GET_PHY_ID(vptr->phy_id)) { case PHYID_CICADA_CS8201: /* * Reset to hardware default */ MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); /* * Turn on ECHODIS bit in NWay-forced full mode and turn it * off it in NWay-forced half mode for NWay-forced v.s. * legacy-forced issue. */ if (vptr->mii_status & VELOCITY_DUPLEX_FULL) MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); else MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); /* * Turn on Link/Activity LED enable bit for CIS8201 */ MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); break; case PHYID_VT3216_32BIT: case PHYID_VT3216_64BIT: /* * Reset to hardware default */ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); /* * Turn on ECHODIS bit in NWay-forced full mode and turn it * off it in NWay-forced half mode for NWay-forced v.s. * legacy-forced issue */ if (vptr->mii_status & VELOCITY_DUPLEX_FULL) MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); else MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); break; case PHYID_MARVELL_1000: case PHYID_MARVELL_1000S: /* * Assert CRS on Transmit */ MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); /* * Reset to hardware default */ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); break; default: ; } velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); if (BMCR & BMCR_ISO) { BMCR &= ~BMCR_ISO; velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); } } /** * safe_disable_mii_autopoll - autopoll off * @regs: velocity registers * * Turn off the autopoll and wait for it to disable on the chip */ static void safe_disable_mii_autopoll(struct mac_regs *regs) { u16 ww; /* turn off MAUTO */ writeb(0, &regs->MIICR); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { udelay(1); if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR)) break; } } /** * enable_mii_autopoll - turn on autopolling * @regs: velocity registers * * Enable the MII link status autopoll feature on the Velocity * hardware. Wait for it to enable. */ static void enable_mii_autopoll(struct mac_regs *regs) { unsigned int ii; writeb(0, &(regs->MIICR)); writeb(MIIADR_SWMPL, &regs->MIIADR); for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { udelay(1); if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR)) break; } writeb(MIICR_MAUTO, &regs->MIICR); for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { udelay(1); if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR)) break; } } /** * velocity_mii_read - read MII data * @regs: velocity registers * @index: MII register index * @data: buffer for received data * * Perform a single read of an MII 16bit register. Returns zero * on success or -ETIMEDOUT if the PHY did not respond. */ static int velocity_mii_read(struct mac_regs *regs, u8 index, u16 * data) { u16 ww; /* * Disable MIICR_MAUTO, so that mii addr can be set normally */ safe_disable_mii_autopoll(regs); writeb(index, &regs->MIIADR); BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(readb(&regs->MIICR) & MIICR_RCMD)) break; } *data = readw(&regs->MIIDATA); enable_mii_autopoll(regs); if (ww == W_MAX_TIMEOUT) return -1; return 0; } /** * velocity_mii_write - write MII data * @regs: velocity registers * @index: MII register index * @data: 16bit data for the MII register * * Perform a single write to an MII 16bit register. Returns zero * on success or -ETIMEDOUT if the PHY did not respond. */ static int velocity_mii_write(struct mac_regs *regs, u8 mii_addr, u16 data) { u16 ww; /* * Disable MIICR_MAUTO, so that mii addr can be set normally */ safe_disable_mii_autopoll(regs); /* MII reg offset */ writeb(mii_addr, &regs->MIIADR); /* set MII data */ writew(data, &regs->MIIDATA); /* turn on MIICR_WCMD */ BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { udelay(5); if (!(readb(&regs->MIICR) & MIICR_WCMD)) break; } enable_mii_autopoll(regs); if (ww == W_MAX_TIMEOUT) return -1; return 0; } /** * velocity_get_opt_media_mode - get media selection * @vptr: velocity adapter * * Get the media mode stored in EEPROM or module options and load * mii_status accordingly. The requested link state information * is also returned. */ static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) { u32 status = 0; switch (vptr->options.spd_dpx) { case SPD_DPX_AUTO: status = VELOCITY_AUTONEG_ENABLE; break; case SPD_DPX_100_FULL: status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; break; case SPD_DPX_10_FULL: status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; break; case SPD_DPX_100_HALF: status = VELOCITY_SPEED_100; break; case SPD_DPX_10_HALF: status = VELOCITY_SPEED_10; break; } vptr->mii_status = status; return status; } /** * mii_set_auto_on - autonegotiate on * @vptr: velocity * * Enable autonegotation on this interface */ static void mii_set_auto_on(struct velocity_info *vptr) { if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); else MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); } /* static void mii_set_auto_off(struct velocity_info * vptr) { MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); } */ /** * set_mii_flow_control - flow control setup * @vptr: velocity interface * * Set up the flow control on this interface according to * the supplied user/eeprom options. */ static void set_mii_flow_control(struct velocity_info *vptr) { /*Enable or Disable PAUSE in ANAR */ switch (vptr->options.flow_cntl) { case FLOW_CNTL_TX: MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); break; case FLOW_CNTL_RX: MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); break; case FLOW_CNTL_TX_RX: MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); break; case FLOW_CNTL_DISABLE: MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); break; default: break; } } /** * velocity_set_media_mode - set media mode * @mii_status: old MII link state * * Check the media link state and configure the flow control * PHY and also velocity hardware setup accordingly. In particular * we need to set up CD polling and frame bursting. */ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) { struct mac_regs *regs = vptr->mac_regs; vptr->mii_status = mii_check_media_mode(vptr->mac_regs); /* Set mii link status */ set_mii_flow_control(vptr); if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); } /* * If connection type is AUTO */ if (mii_status & VELOCITY_AUTONEG_ENABLE) { printf("Velocity is AUTO mode\n"); /* clear force MAC mode bit */ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR); /* set duplex mode of MAC according to duplex mode of MII */ MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); /* enable AUTO-NEGO mode */ mii_set_auto_on(vptr); } else { u16 ANAR; u8 CHIPGCR; /* * 1. if it's 3119, disable frame bursting in halfduplex mode * and enable it in fullduplex mode * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR * 3. only enable CD heart beat counter in 10HD mode */ /* set force MAC mode bit */ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); CHIPGCR = readb(&regs->CHIPGCR); CHIPGCR &= ~CHIPGCR_FCGMII; if (mii_status & VELOCITY_DUPLEX_FULL) { CHIPGCR |= CHIPGCR_FCFDX; writeb(CHIPGCR, &regs->CHIPGCR); printf ("DEBUG: set Velocity to forced full mode\n"); if (vptr->rev_id < REV_ID_VT3216_A0) BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR); } else { CHIPGCR &= ~CHIPGCR_FCFDX; printf ("DEBUG: set Velocity to forced half mode\n"); writeb(CHIPGCR, &regs->CHIPGCR); if (vptr->rev_id < REV_ID_VT3216_A0) BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); } MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) { BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); } else { BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG); } /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); if (mii_status & VELOCITY_SPEED_100) { if (mii_status & VELOCITY_DUPLEX_FULL) ANAR |= ANAR_TXFD; else ANAR |= ANAR_TX; } else { if (mii_status & VELOCITY_DUPLEX_FULL) ANAR |= ANAR_10FD; else ANAR |= ANAR_10; } velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); /* enable AUTO-NEGO mode */ mii_set_auto_on(vptr); /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ } /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ return VELOCITY_LINK_CHANGE; } /** * mii_check_media_mode - check media state * @regs: velocity registers * * Check the current MII status and determine the link status * accordingly */ static u32 mii_check_media_mode(struct mac_regs *regs) { u32 status = 0; u16 ANAR; if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) status |= VELOCITY_LINK_FAIL; if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) status |= (VELOCITY_SPEED_1000); else { velocity_mii_read(regs, MII_REG_ANAR, &ANAR); if (ANAR & ANAR_TXFD) status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); else if (ANAR & ANAR_TX) status |= VELOCITY_SPEED_100; else if (ANAR & ANAR_10FD) status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); else status |= (VELOCITY_SPEED_10); } if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { velocity_mii_read(regs, MII_REG_ANAR, &ANAR); if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { if (MII_REG_BITS_IS_ON (G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) status |= VELOCITY_AUTONEG_ENABLE; } } return status; } static u32 check_connection_type(struct mac_regs *regs) { u32 status = 0; u8 PHYSR0; u16 ANAR; PHYSR0 = readb(&regs->PHYSR0); /* if (!(PHYSR0 & PHYSR0_LINKGD)) status|=VELOCITY_LINK_FAIL; */ if (PHYSR0 & PHYSR0_FDPX) status |= VELOCITY_DUPLEX_FULL; if (PHYSR0 & PHYSR0_SPDG) status |= VELOCITY_SPEED_1000; if (PHYSR0 & PHYSR0_SPD10) status |= VELOCITY_SPEED_10; else status |= VELOCITY_SPEED_100; if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { velocity_mii_read(regs, MII_REG_ANAR, &ANAR); if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { if (MII_REG_BITS_IS_ON (G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) status |= VELOCITY_AUTONEG_ENABLE; } } return status; } /** * enable_flow_control_ability - flow control * @vptr: veloity to configure * * Set up flow control according to the flow control options * determined by the eeprom/configuration. */ static void enable_flow_control_ability(struct velocity_info *vptr) { struct mac_regs *regs = vptr->mac_regs; switch (vptr->options.flow_cntl) { case FLOW_CNTL_DEFAULT: if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0)) writel(CR0_FDXRFCEN, &regs->CR0Set); else writel(CR0_FDXRFCEN, &regs->CR0Clr); if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0)) writel(CR0_FDXTFCEN, &regs->CR0Set); else writel(CR0_FDXTFCEN, &regs->CR0Clr); break; case FLOW_CNTL_TX: writel(CR0_FDXTFCEN, &regs->CR0Set); writel(CR0_FDXRFCEN, &regs->CR0Clr); break; case FLOW_CNTL_RX: writel(CR0_FDXRFCEN, &regs->CR0Set); writel(CR0_FDXTFCEN, &regs->CR0Clr); break; case FLOW_CNTL_TX_RX: writel(CR0_FDXTFCEN, &regs->CR0Set); writel(CR0_FDXRFCEN, &regs->CR0Set); break; case FLOW_CNTL_DISABLE: writel(CR0_FDXRFCEN, &regs->CR0Clr); writel(CR0_FDXTFCEN, &regs->CR0Clr); break; default: break; } } /* FIXME: Move to pci.c */ /** * pci_set_power_state - Set the power state of a PCI device * @dev: PCI device to be suspended * @state: Power state we're entering * * Transition a device to a new power state, using the Power Management * Capabilities in the device's config space. * * RETURN VALUE: * -EINVAL if trying to enter a lower state than we're already in. * 0 if we're already in the requested state. * -EIO if device does not support PCI PM. * 0 if we can successfully change the power state. */ int pci_set_power_state(struct pci_device *dev, int state) { int pm; u16 pmcsr; int current_state = 0; /* bound the state we're entering */ if (state > 3) state = 3; /* Validate current state: * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ if (state > 0 && current_state > state) return -1; else if (current_state == state) return 0; /* we're already there */ /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); /* abort if the device doesn't support PM capabilities */ if (!pm) return -2; /* check if this device supports the desired state */ if (state == 1 || state == 2) { u16 pmc; pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if (state == 1 && !(pmc & PCI_PM_CAP_D1)) return -2; else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -2; } /* If we're in D3, force entire word to 0. * This doesn't affect PME_Status, disables PME_En, and * sets PowerState to 0. */ if (current_state >= 3) pmcsr = 0; else { pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= state; } /* enter specified state */ pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); /* Mandatory power management transition delays */ /* see PCI PM 1.1 5.6.1 table 18 */ if (state == 3 || current_state == 3) mdelay(10); else if (state == 2 || current_state == 2) udelay(200); current_state = state; return 0; } static struct pci_device_id velocity_nics[] = { PCI_ROM(0x1106, 0x3119, "via-velocity", "VIA Networking Velocity Family Gigabit Ethernet Adapter", 0), }; PCI_DRIVER ( velocity_driver, velocity_nics, PCI_NO_CLASS ); DRIVER ( "VIA-VELOCITY/PCI", nic_driver, pci_driver, velocity_driver, velocity_probe, velocity_disable );
gpl-2.0
allenloves/fmCalculator
JuceLibraryCode/modules/juce_audio_basics/midi/juce_MidiMessageSequence.cpp
20
10325
/* ============================================================================== This file is part of the JUCE library. Copyright (c) 2013 - Raw Material Software Ltd. Permission is granted to use this software under the terms of either: a) the GPL v2 (or any later version) b) the Affero GPL v3 Details of these licenses can be found at: www.gnu.org/licenses JUCE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ------------------------------------------------------------------------------ To release a closed-source product which uses JUCE, commercial licenses are available: visit www.juce.com for more information. ============================================================================== */ MidiMessageSequence::MidiMessageSequence() { } MidiMessageSequence::MidiMessageSequence (const MidiMessageSequence& other) { list.addCopiesOf (other.list); updateMatchedPairs(); } MidiMessageSequence& MidiMessageSequence::operator= (const MidiMessageSequence& other) { MidiMessageSequence otherCopy (other); swapWith (otherCopy); return *this; } void MidiMessageSequence::swapWith (MidiMessageSequence& other) noexcept { list.swapWith (other.list); } MidiMessageSequence::~MidiMessageSequence() { } void MidiMessageSequence::clear() { list.clear(); } int MidiMessageSequence::getNumEvents() const noexcept { return list.size(); } MidiMessageSequence::MidiEventHolder* MidiMessageSequence::getEventPointer (const int index) const noexcept { return list [index]; } double MidiMessageSequence::getTimeOfMatchingKeyUp (const int index) const noexcept { if (const MidiEventHolder* const meh = list [index]) if (meh->noteOffObject != nullptr) return meh->noteOffObject->message.getTimeStamp(); return 0.0; } int MidiMessageSequence::getIndexOfMatchingKeyUp (const int index) const noexcept { if (const MidiEventHolder* const meh = list [index]) return list.indexOf (meh->noteOffObject); return -1; } int MidiMessageSequence::getIndexOf (MidiEventHolder* const event) const noexcept { return list.indexOf (event); } int MidiMessageSequence::getNextIndexAtTime (const double timeStamp) const noexcept { const int numEvents = list.size(); int i; for (i = 0; i < numEvents; ++i) if (list.getUnchecked(i)->message.getTimeStamp() >= timeStamp) break; return i; } //============================================================================== double MidiMessageSequence::getStartTime() const noexcept { return getEventTime (0); } double MidiMessageSequence::getEndTime() const noexcept { return getEventTime (list.size() - 1); } double MidiMessageSequence::getEventTime (const int index) const noexcept { if (const MidiEventHolder* const meh = list [index]) return meh->message.getTimeStamp(); return 0.0; } //============================================================================== MidiMessageSequence::MidiEventHolder* MidiMessageSequence::addEvent (const MidiMessage& newMessage, double timeAdjustment) { MidiEventHolder* const newOne = new MidiEventHolder (newMessage); timeAdjustment += newMessage.getTimeStamp(); newOne->message.setTimeStamp (timeAdjustment); int i; for (i = list.size(); --i >= 0;) if (list.getUnchecked(i)->message.getTimeStamp() <= timeAdjustment) break; list.insert (i + 1, newOne); return newOne; } void MidiMessageSequence::deleteEvent (const int index, const bool deleteMatchingNoteUp) { if (isPositiveAndBelow (index, list.size())) { if (deleteMatchingNoteUp) deleteEvent (getIndexOfMatchingKeyUp (index), false); list.remove (index); } } struct MidiMessageSequenceSorter { static int compareElements (const MidiMessageSequence::MidiEventHolder* const first, const MidiMessageSequence::MidiEventHolder* const second) noexcept { const double diff = first->message.getTimeStamp() - second->message.getTimeStamp(); return (diff > 0) - (diff < 0); } }; void MidiMessageSequence::addSequence (const MidiMessageSequence& other, double timeAdjustment, double firstAllowableTime, double endOfAllowableDestTimes) { firstAllowableTime -= timeAdjustment; endOfAllowableDestTimes -= timeAdjustment; for (int i = 0; i < other.list.size(); ++i) { const MidiMessage& m = other.list.getUnchecked(i)->message; const double t = m.getTimeStamp(); if (t >= firstAllowableTime && t < endOfAllowableDestTimes) { MidiEventHolder* const newOne = new MidiEventHolder (m); newOne->message.setTimeStamp (timeAdjustment + t); list.add (newOne); } } sort(); } //============================================================================== void MidiMessageSequence::sort() noexcept { MidiMessageSequenceSorter sorter; list.sort (sorter, true); } void MidiMessageSequence::updateMatchedPairs() noexcept { for (int i = 0; i < list.size(); ++i) { MidiEventHolder* const meh = list.getUnchecked(i); const MidiMessage& m1 = meh->message; if (m1.isNoteOn()) { meh->noteOffObject = nullptr; const int note = m1.getNoteNumber(); const int chan = m1.getChannel(); const int len = list.size(); for (int j = i + 1; j < len; ++j) { const MidiMessage& m = list.getUnchecked(j)->message; if (m.getNoteNumber() == note && m.getChannel() == chan) { if (m.isNoteOff()) { meh->noteOffObject = list[j]; break; } else if (m.isNoteOn()) { MidiEventHolder* const newEvent = new MidiEventHolder (MidiMessage::noteOff (chan, note)); list.insert (j, newEvent); newEvent->message.setTimeStamp (m.getTimeStamp()); meh->noteOffObject = newEvent; break; } } } } } } void MidiMessageSequence::addTimeToMessages (const double delta) noexcept { for (int i = list.size(); --i >= 0;) { MidiMessage& mm = list.getUnchecked(i)->message; mm.setTimeStamp (mm.getTimeStamp() + delta); } } //============================================================================== void MidiMessageSequence::extractMidiChannelMessages (const int channelNumberToExtract, MidiMessageSequence& destSequence, const bool alsoIncludeMetaEvents) const { for (int i = 0; i < list.size(); ++i) { const MidiMessage& mm = list.getUnchecked(i)->message; if (mm.isForChannel (channelNumberToExtract) || (alsoIncludeMetaEvents && mm.isMetaEvent())) destSequence.addEvent (mm); } } void MidiMessageSequence::extractSysExMessages (MidiMessageSequence& destSequence) const { for (int i = 0; i < list.size(); ++i) { const MidiMessage& mm = list.getUnchecked(i)->message; if (mm.isSysEx()) destSequence.addEvent (mm); } } void MidiMessageSequence::deleteMidiChannelMessages (const int channelNumberToRemove) { for (int i = list.size(); --i >= 0;) if (list.getUnchecked(i)->message.isForChannel (channelNumberToRemove)) list.remove(i); } void MidiMessageSequence::deleteSysExMessages() { for (int i = list.size(); --i >= 0;) if (list.getUnchecked(i)->message.isSysEx()) list.remove(i); } //============================================================================== void MidiMessageSequence::createControllerUpdatesForTime (const int channelNumber, const double time, OwnedArray<MidiMessage>& dest) { bool doneProg = false; bool donePitchWheel = false; Array<int> doneControllers; doneControllers.ensureStorageAllocated (32); for (int i = list.size(); --i >= 0;) { const MidiMessage& mm = list.getUnchecked(i)->message; if (mm.isForChannel (channelNumber) && mm.getTimeStamp() <= time) { if (mm.isProgramChange()) { if (! doneProg) { dest.add (new MidiMessage (mm, 0.0)); doneProg = true; } } else if (mm.isController()) { if (! doneControllers.contains (mm.getControllerNumber())) { dest.add (new MidiMessage (mm, 0.0)); doneControllers.add (mm.getControllerNumber()); } } else if (mm.isPitchWheel()) { if (! donePitchWheel) { dest.add (new MidiMessage (mm, 0.0)); donePitchWheel = true; } } } } } //============================================================================== MidiMessageSequence::MidiEventHolder::MidiEventHolder (const MidiMessage& mm) : message (mm), noteOffObject (nullptr) { } MidiMessageSequence::MidiEventHolder::~MidiEventHolder() { }
gpl-2.0
david-visteon/uboot-imx
board/netstal/mcu25/mcu25.c
20
5412
/* *(C) Copyright 2005-2008 Netstal Maschinen AG * Niklaus Giger (Niklaus.Giger@netstal.com) * * This source code is free software; you can redistribute it * and/or modify it in source code form under the terms of the GNU * General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <common.h> #include <ppc4xx.h> #include <asm/processor.h> #include <asm/io.h> #include <asm-ppc/u-boot.h> #include "../common/nm.h" DECLARE_GLOBAL_DATA_PTR; #define MCU25_SLOT_ADDRESS (0x7A000000 + 0x0A) #define MCU25_DIGITAL_IO_REGISTER (0x7A000000 + 0xc0) #define MCU25_LED_REGISTER_ADDRESS (0x7C000000 + 0x10) #define MCU25_VERSIONS_REGISTER (0x7C000000 + 0x0C) #define MCU25_IO_CONFIGURATION (0x7C000000 + 0x0e) #define MCU_SW_INSTALL_REQUESTED 0x08 #define SDRAM_LEN (32 << 20) /* 32 MB - RAM */ /* * This function is run very early, out of flash, and before devices are * initialized. It is called by lib_ppc/board.c:board_init_f by virtue * of being in the init_sequence array. * * The SDRAM has been initialized already -- start.S:start called * init.S:init_sdram early on -- but it is not yet being used for * anything, not even stack. So be careful. */ /* Attention: If you want 1 microsecs times from the external oscillator * 0x00004051 is okay for u-boot/linux, but different from old vxworks values * 0x00804051 causes problems with u-boot and linux! */ #define CPC0_CR0_VALUE 0x0007F03C #define CPC0_CR1_VALUE 0x00004051 int board_early_init_f (void) { /* Documented in A-1171 * * Interrupt controller setup for the MCU25 board. * Note: IRQ 0-15 405GP internally generated; high; level sensitive * IRQ 16 405GP internally generated; low; level sensitive * IRQ 17-24 RESERVED/UNUSED * IRQ 31 (EXT IRQ 6) (unused) */ mtdcr(uicsr, 0xFFFFFFFF); /* clear all ints */ mtdcr(uicer, 0x00000000); /* disable all ints */ mtdcr(uiccr, 0x00000000); /* set all to be non-critical */ mtdcr(uicpr, 0xFFFFE000); /* set int polarities */ mtdcr(uictr, 0x00000000); /* set int trigger levels */ mtdcr(uicsr, 0xFFFFFFFF); /* clear all ints */ mtdcr(cntrl1, CPC0_CR1_VALUE); mtdcr(ecr, 0x60606000); mtdcr(CPC0_EIRR, 0x7C000000); out32(GPIO0_OR, CONFIG_SYS_GPIO0_OR ); out32(GPIO0_TCR, CONFIG_SYS_GPIO0_TCR); out32(GPIO0_ODR, CONFIG_SYS_GPIO0_ODR); mtspr(SPRN_CCR0, 0x00700000); return 0; } #ifdef CONFIG_BOARD_PRE_INIT int board_pre_init (void) { return board_early_init_f (); } #endif int sys_install_requested(void) { u16 ioValue = in_be16((u16 *)MCU25_DIGITAL_IO_REGISTER); return (ioValue & MCU_SW_INSTALL_REQUESTED) != 0; } int checkboard (void) { u16 boardVersReg = in_be16((u16 *)MCU25_VERSIONS_REGISTER); u16 hwConfig = in_be16((u16 *)MCU25_IO_CONFIGURATION); u16 generation = boardVersReg & 0x0f; u16 index = boardVersReg & 0xf0; /* Cannot be done in board_early_init */ mtdcr(cntrl0, CPC0_CR0_VALUE); /* Force /RTS to active. The board it not wired quite * correctly to use cts/rtc flow control, so just force the * /RST active and forget about it. */ writeb (readb (0xef600404) | 0x03, 0xef600404); nm_show_print(generation, index, hwConfig); return 0; } u32 hcu_led_get(void) { return in_be16((u16 *)MCU25_LED_REGISTER_ADDRESS) & 0x3ff; } /* * hcu_led_set value to be placed into the LEDs (max 6 bit) */ void hcu_led_set(u32 value) { out_be16((u16 *)MCU25_LED_REGISTER_ADDRESS, value); } /* * hcu_get_slot */ u32 hcu_get_slot(void) { u16 slot = in_be16((u16 *)MCU25_SLOT_ADDRESS); return slot & 0x7f; } /* * get_serial_number */ u32 get_serial_number(void) { u32 serial = in_be32((u32 *)CONFIG_SYS_FLASH_BASE); if (serial == 0xffffffff) return 0; return serial; } /* * misc_init_r. */ int misc_init_r(void) { common_misc_init_r(); set_params_for_sw_install( sys_install_requested(), "mcu25" ); return 0; } phys_size_t initdram(int board_type) { unsigned int dram_size = 64*1024*1024; init_ppc405_sdram(dram_size); #ifdef DEBUG show_sdram_registers(); #endif return dram_size; } #if defined(CONFIG_POST) /* * Returns 1 if keys pressed to start the power-on long-running tests * Called from board_init_f(). */ int post_hotkeys_pressed(void) { return 0; /* No hotkeys supported */ } #endif /* CONFIG_POST */ #if defined(CONFIG_OF_LIBFDT) && defined(CONFIG_OF_BOARD_SETUP) void ft_board_setup(void *blob, bd_t *bd) { ft_cpu_setup(blob, bd); } #endif /* defined(CONFIG_OF_LIBFDT) && defined(CONFIG_OF_BOARD_SETUP) */ /* * Hardcoded flash setup: * Flash 0 is a non-CFI AMD AM29F040 flash, 8 bit flash / 8 bit bus. */ ulong board_flash_get_legacy (ulong base, int banknum, flash_info_t * info) { if (banknum == 0) { /* non-CFI boot flash */ info->portwidth = 1; info->chipwidth = 1; info->interface = FLASH_CFI_X8; return 1; } else return 0; }
gpl-2.0
hzhuang1/linux
arch/arm/mach-shmobile/timer.c
276
2134
/* * SH-Mobile Timer * * Copyright (C) 2010 Magnus Damm * Copyright (C) 2002 - 2009 Paul Mundt * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/platform_device.h> #include <linux/delay.h> #include <asm/mach/time.h> #include <asm/smp_twd.h> void __init shmobile_setup_delay(unsigned int max_cpu_core_mhz, unsigned int mult, unsigned int div) { /* calculate a worst-case loops-per-jiffy value * based on maximum cpu core mhz setting and the * __delay() implementation in arch/arm/lib/delay.S * * this will result in a longer delay than expected * when the cpu core runs on lower frequencies. */ unsigned int value = (1000000 * mult) / (HZ * div); if (!preset_lpj) preset_lpj = max_cpu_core_mhz * value; } static void __init shmobile_late_time_init(void) { /* * Make sure all compiled-in early timers register themselves. * * Run probe() for two "earlytimer" devices, these will be the * clockevents and clocksource devices respectively. In the event * that only a clockevents device is available, we -ENODEV on the * clocksource and the jiffies clocksource is used transparently * instead. No error handling is necessary here. */ early_platform_driver_register_all("earlytimer"); early_platform_driver_probe("earlytimer", 2, 0); } void __init shmobile_earlytimer_init(void) { late_time_init = shmobile_late_time_init; } static void __init shmobile_timer_init(void) { } struct sys_timer shmobile_timer = { .init = shmobile_timer_init, };
gpl-2.0
JackWindows/OpenWRT-14.07-JS9331
package/libs/libnl-tiny/src/error.c
532
3538
/* * lib/error.c Error Handling * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation version 2.1 * of the License. * * Copyright (c) 2008 Thomas Graf <tgraf@suug.ch> */ #include <netlink-local.h> #include <netlink/netlink.h> static const char *errmsg[NLE_MAX+1] = { [NLE_SUCCESS] = "Success", [NLE_FAILURE] = "Unspecific failure", [NLE_INTR] = "Interrupted system call", [NLE_BAD_SOCK] = "Bad socket", [NLE_AGAIN] = "Try again", [NLE_NOMEM] = "Out of memory", [NLE_EXIST] = "Object exists", [NLE_INVAL] = "Invalid input data or parameter", [NLE_RANGE] = "Input data out of range", [NLE_MSGSIZE] = "Message size not sufficient", [NLE_OPNOTSUPP] = "Operation not supported", [NLE_AF_NOSUPPORT] = "Address family not supported", [NLE_OBJ_NOTFOUND] = "Object not found", [NLE_NOATTR] = "Attribute not available", [NLE_MISSING_ATTR] = "Missing attribute", [NLE_AF_MISMATCH] = "Address family mismatch", [NLE_SEQ_MISMATCH] = "Message sequence number mismatch", [NLE_MSG_OVERFLOW] = "Kernel reported message overflow", [NLE_MSG_TRUNC] = "Kernel reported truncated message", [NLE_NOADDR] = "Invalid address for specified address family", [NLE_SRCRT_NOSUPPORT] = "Source based routing not supported", [NLE_MSG_TOOSHORT] = "Netlink message is too short", [NLE_MSGTYPE_NOSUPPORT] = "Netlink message type is not supported", [NLE_OBJ_MISMATCH] = "Object type does not match cache", [NLE_NOCACHE] = "Unknown or invalid cache type", [NLE_BUSY] = "Object busy", [NLE_PROTO_MISMATCH] = "Protocol mismatch", [NLE_NOACCESS] = "No Access", [NLE_PERM] = "Operation not permitted", [NLE_PKTLOC_FILE] = "Unable to open packet location file", [NLE_PARSE_ERR] = "Unable to parse object", [NLE_NODEV] = "No such device", [NLE_IMMUTABLE] = "Immutable attribute", [NLE_DUMP_INTR] = "Dump inconsistency detected, interrupted", }; /** * Return error message for an error code * @return error message */ const char *nl_geterror(int error) { error = abs(error); if (error > NLE_MAX) error = NLE_FAILURE; return errmsg[error]; } /** * Print a libnl error message * @arg s error message prefix * * Prints the error message of the call that failed last. * * If s is not NULL and *s is not a null byte the argument * string is printed, followed by a colon and a blank. Then * the error message and a new-line. */ void nl_perror(int error, const char *s) { if (s && *s) fprintf(stderr, "%s: %s\n", s, nl_geterror(error)); else fprintf(stderr, "%s\n", nl_geterror(error)); } int nl_syserr2nlerr(int error) { error = abs(error); switch (error) { case EBADF: return NLE_BAD_SOCK; case EADDRINUSE: return NLE_EXIST; case EEXIST: return NLE_EXIST; case EADDRNOTAVAIL: return NLE_NOADDR; case ESRCH: /* fall through */ case ENOENT: return NLE_OBJ_NOTFOUND; case EINTR: return NLE_INTR; case EAGAIN: return NLE_AGAIN; case ENOTSOCK: return NLE_BAD_SOCK; case ENOPROTOOPT: return NLE_INVAL; case EFAULT: return NLE_INVAL; case EACCES: return NLE_NOACCESS; case EINVAL: return NLE_INVAL; case ENOBUFS: return NLE_NOMEM; case ENOMEM: return NLE_NOMEM; case EAFNOSUPPORT: return NLE_AF_NOSUPPORT; case EPROTONOSUPPORT: return NLE_PROTO_MISMATCH; case EOPNOTSUPP: return NLE_OPNOTSUPP; case EPERM: return NLE_PERM; case EBUSY: return NLE_BUSY; case ERANGE: return NLE_RANGE; case ENODEV: return NLE_NODEV; default: return NLE_FAILURE; } } /** @} */
gpl-2.0
JPG-Consulting/linux
arch/arm/mach-pxa/devices.c
532
24548
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/i2c/pxa-i2c.h> #include <mach/udc.h> #include <linux/platform_data/usb-pxa3xx-ulpi.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/irda-pxaficp.h> #include <mach/irqs.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/keypad-pxa27x.h> #include <linux/platform_data/camera-pxa.h> #include <mach/audio.h> #include <mach/hardware.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #include "devices.h" #include "generic.h" void __init pxa_register_device(struct platform_device *dev, void *data) { int ret; dev->dev.platform_data = data; ret = platform_device_register(dev); if (ret) dev_err(&dev->dev, "unable to register device: %d\n", ret); } static struct resource pxa_resource_pmu = { .start = IRQ_PMU, .end = IRQ_PMU, .flags = IORESOURCE_IRQ, }; struct platform_device pxa_device_pmu = { .name = "xscale-pmu", .id = -1, .resource = &pxa_resource_pmu, .num_resources = 1, }; static struct resource pxamci_resources[] = { [0] = { .start = 0x41100000, .end = 0x41100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC, .end = IRQ_MMC, .flags = IORESOURCE_IRQ, }, [2] = { .start = 21, .end = 21, .flags = IORESOURCE_DMA, }, [3] = { .start = 22, .end = 22, .flags = IORESOURCE_DMA, }, }; static u64 pxamci_dmamask = 0xffffffffUL; struct platform_device pxa_device_mci = { .name = "pxa2xx-mci", .id = 0, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxamci_resources), .resource = pxamci_resources, }; void __init pxa_set_mci_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa_device_mci, info); } static struct pxa2xx_udc_mach_info pxa_udc_info = { .gpio_pullup = -1, }; void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) { memcpy(&pxa_udc_info, info, sizeof *info); } static struct resource pxa2xx_udc_resources[] = { [0] = { .start = 0x40600000, .end = 0x4060ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB, .end = IRQ_USB, .flags = IORESOURCE_IRQ, }, }; static u64 udc_dma_mask = ~(u32)0; struct platform_device pxa25x_device_udc = { .name = "pxa25x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; struct platform_device pxa27x_device_udc = { .name = "pxa27x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; #ifdef CONFIG_PXA3xx static struct resource pxa3xx_u2d_resources[] = { [0] = { .start = 0x54100000, .end = 0x54100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB2, .end = IRQ_USB2, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_u2d = { .name = "pxa3xx-u2d", .id = -1, .resource = pxa3xx_u2d_resources, .num_resources = ARRAY_SIZE(pxa3xx_u2d_resources), }; void __init pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info) { pxa_register_device(&pxa3xx_device_u2d, info); } #endif /* CONFIG_PXA3xx */ static struct resource pxafb_resources[] = { [0] = { .start = 0x44000000, .end = 0x4400ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_LCD, .end = IRQ_LCD, .flags = IORESOURCE_IRQ, }, }; static u64 fb_dma_mask = ~(u64)0; struct platform_device pxa_device_fb = { .name = "pxa2xx-fb", .id = -1, .dev = { .dma_mask = &fb_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxafb_resources), .resource = pxafb_resources, }; void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info) { pxa_device_fb.dev.parent = parent; pxa_register_device(&pxa_device_fb, info); } static struct resource pxa_resource_ffuart[] = { { .start = 0x40100000, .end = 0x40100023, .flags = IORESOURCE_MEM, }, { .start = IRQ_FFUART, .end = IRQ_FFUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_ffuart = { .name = "pxa2xx-uart", .id = 0, .resource = pxa_resource_ffuart, .num_resources = ARRAY_SIZE(pxa_resource_ffuart), }; void __init pxa_set_ffuart_info(void *info) { pxa_register_device(&pxa_device_ffuart, info); } static struct resource pxa_resource_btuart[] = { { .start = 0x40200000, .end = 0x40200023, .flags = IORESOURCE_MEM, }, { .start = IRQ_BTUART, .end = IRQ_BTUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_btuart = { .name = "pxa2xx-uart", .id = 1, .resource = pxa_resource_btuart, .num_resources = ARRAY_SIZE(pxa_resource_btuart), }; void __init pxa_set_btuart_info(void *info) { pxa_register_device(&pxa_device_btuart, info); } static struct resource pxa_resource_stuart[] = { { .start = 0x40700000, .end = 0x40700023, .flags = IORESOURCE_MEM, }, { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_stuart = { .name = "pxa2xx-uart", .id = 2, .resource = pxa_resource_stuart, .num_resources = ARRAY_SIZE(pxa_resource_stuart), }; void __init pxa_set_stuart_info(void *info) { pxa_register_device(&pxa_device_stuart, info); } static struct resource pxa_resource_hwuart[] = { { .start = 0x41600000, .end = 0x4160002F, .flags = IORESOURCE_MEM, }, { .start = IRQ_HWUART, .end = IRQ_HWUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_hwuart = { .name = "pxa2xx-uart", .id = 3, .resource = pxa_resource_hwuart, .num_resources = ARRAY_SIZE(pxa_resource_hwuart), }; void __init pxa_set_hwuart_info(void *info) { if (cpu_is_pxa255()) pxa_register_device(&pxa_device_hwuart, info); else pr_info("UART: Ignoring attempt to register HWUART on non-PXA255 hardware"); } static struct resource pxai2c_resources[] = { { .start = 0x40301680, .end = 0x403016a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2C, .end = IRQ_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2c = { .name = "pxa2xx-i2c", .id = 0, .resource = pxai2c_resources, .num_resources = ARRAY_SIZE(pxai2c_resources), }; void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) { pxa_register_device(&pxa_device_i2c, info); } #ifdef CONFIG_PXA27x static struct resource pxa27x_resources_i2c_power[] = { { .start = 0x40f00180, .end = 0x40f001a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_i2c_power = { .name = "pxa2xx-i2c", .id = 1, .resource = pxa27x_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa27x_resources_i2c_power), }; #endif static struct resource pxai2s_resources[] = { { .start = 0x40400000, .end = 0x40400083, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2S, .end = IRQ_I2S, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2s = { .name = "pxa2xx-i2s", .id = -1, .resource = pxai2s_resources, .num_resources = ARRAY_SIZE(pxai2s_resources), }; struct platform_device pxa_device_asoc_ssp1 = { .name = "pxa-ssp-dai", .id = 0, }; struct platform_device pxa_device_asoc_ssp2= { .name = "pxa-ssp-dai", .id = 1, }; struct platform_device pxa_device_asoc_ssp3 = { .name = "pxa-ssp-dai", .id = 2, }; struct platform_device pxa_device_asoc_ssp4 = { .name = "pxa-ssp-dai", .id = 3, }; struct platform_device pxa_device_asoc_platform = { .name = "pxa-pcm-audio", .id = -1, }; static u64 pxaficp_dmamask = ~(u32)0; static struct resource pxa_ir_resources[] = { [0] = { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, }, [1] = { .start = IRQ_ICP, .end = IRQ_ICP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_ficp = { .name = "pxa2xx-ir", .id = -1, .num_resources = ARRAY_SIZE(pxa_ir_resources), .resource = pxa_ir_resources, .dev = { .dma_mask = &pxaficp_dmamask, .coherent_dma_mask = 0xffffffff, }, }; void __init pxa_set_ficp_info(struct pxaficp_platform_data *info) { pxa_register_device(&pxa_device_ficp, info); } static struct resource pxa_rtc_resources[] = { [0] = { .start = 0x40900000, .end = 0x40900000 + 0x3b, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_rtc = { .name = "pxa-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa_rtc_resources), .resource = pxa_rtc_resources, }; static struct resource sa1100_rtc_resources[] = { { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device sa1100_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(sa1100_rtc_resources), .resource = sa1100_rtc_resources, }; static struct resource pxa_ac97_resources[] = { [0] = { .start = 0x40500000, .end = 0x40500000 + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_AC97, .end = IRQ_AC97, .flags = IORESOURCE_IRQ, }, }; static u64 pxa_ac97_dmamask = 0xffffffffUL; struct platform_device pxa_device_ac97 = { .name = "pxa2xx-ac97", .id = -1, .dev = { .dma_mask = &pxa_ac97_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa_ac97_resources), .resource = pxa_ac97_resources, }; void __init pxa_set_ac97_info(pxa2xx_audio_ops_t *ops) { pxa_register_device(&pxa_device_ac97, ops); } #ifdef CONFIG_PXA25x static struct resource pxa25x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm0 = { .name = "pxa25x-pwm", .id = 0, .resource = pxa25x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm0), }; static struct resource pxa25x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm1 = { .name = "pxa25x-pwm", .id = 1, .resource = pxa25x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm1), }; static u64 pxa25x_ssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_ssp[] = { [0] = { .start = 0x41000000, .end = 0x4100001f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_ssp = { .name = "pxa25x-ssp", .id = 0, .dev = { .dma_mask = &pxa25x_ssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_ssp, .num_resources = ARRAY_SIZE(pxa25x_resource_ssp), }; static u64 pxa25x_nssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_nssp[] = { [0] = { .start = 0x41400000, .end = 0x4140002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NSSP, .end = IRQ_NSSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_nssp = { .name = "pxa25x-nssp", .id = 1, .dev = { .dma_mask = &pxa25x_nssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_nssp, .num_resources = ARRAY_SIZE(pxa25x_resource_nssp), }; static u64 pxa25x_assp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_assp[] = { [0] = { .start = 0x41500000, .end = 0x4150002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_ASSP, .end = IRQ_ASSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 23, .end = 23, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 24, .end = 24, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_assp = { /* ASSP is basically equivalent to NSSP */ .name = "pxa25x-nssp", .id = 2, .dev = { .dma_mask = &pxa25x_assp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_assp, .num_resources = ARRAY_SIZE(pxa25x_resource_assp), }; #endif /* CONFIG_PXA25x */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static struct resource pxa27x_resource_camera[] = { [0] = { .start = 0x50000000, .end = 0x50000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_CAMERA, .end = IRQ_CAMERA, .flags = IORESOURCE_IRQ, }, }; static u64 pxa27x_dma_mask_camera = DMA_BIT_MASK(32); static struct platform_device pxa27x_device_camera = { .name = "pxa27x-camera", .id = 0, /* This is used to put cameras on this interface */ .dev = { .dma_mask = &pxa27x_dma_mask_camera, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa27x_resource_camera), .resource = pxa27x_resource_camera, }; void __init pxa_set_camera_info(struct pxacamera_platform_data *info) { pxa_register_device(&pxa27x_device_camera, info); } static u64 pxa27x_ohci_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ohci[] = { [0] = { .start = 0x4C000000, .end = 0x4C00ff6f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USBH1, .end = IRQ_USBH1, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ohci = { .name = "pxa27x-ohci", .id = -1, .dev = { .dma_mask = &pxa27x_ohci_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa27x_resource_ohci), .resource = pxa27x_resource_ohci, }; void __init pxa_set_ohci_info(struct pxaohci_platform_data *info) { pxa_register_device(&pxa27x_device_ohci, info); } #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static struct resource pxa27x_resource_keypad[] = { [0] = { .start = 0x41500000, .end = 0x4150004c, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_KEYPAD, .end = IRQ_KEYPAD, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_keypad = { .name = "pxa27x-keypad", .id = -1, .resource = pxa27x_resource_keypad, .num_resources = ARRAY_SIZE(pxa27x_resource_keypad), }; void __init pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info) { pxa_register_device(&pxa27x_device_keypad, info); } static u64 pxa27x_ssp1_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp1[] = { [0] = { .start = 0x41000000, .end = 0x4100003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp1 = { .name = "pxa27x-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; static u64 pxa27x_ssp2_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp2[] = { [0] = { .start = 0x41700000, .end = 0x4170003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP2, .end = IRQ_SSP2, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp2 = { .name = "pxa27x-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; static u64 pxa27x_ssp3_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp3[] = { [0] = { .start = 0x41900000, .end = 0x4190003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP3, .end = IRQ_SSP3, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 66, .end = 66, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 67, .end = 67, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp3 = { .name = "pxa27x-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; static struct resource pxa27x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm0 = { .name = "pxa27x-pwm", .id = 0, .resource = pxa27x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm0), }; static struct resource pxa27x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm1 = { .name = "pxa27x-pwm", .id = 1, .resource = pxa27x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm1), }; #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #ifdef CONFIG_PXA3xx static struct resource pxa3xx_resources_mci2[] = { [0] = { .start = 0x42000000, .end = 0x42000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC2, .end = IRQ_MMC2, .flags = IORESOURCE_IRQ, }, [2] = { .start = 93, .end = 93, .flags = IORESOURCE_DMA, }, [3] = { .start = 94, .end = 94, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci2 = { .name = "pxa2xx-mci", .id = 1, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci2), .resource = pxa3xx_resources_mci2, }; void __init pxa3xx_set_mci2_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci2, info); } static struct resource pxa3xx_resources_mci3[] = { [0] = { .start = 0x42500000, .end = 0x42500fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC3, .end = IRQ_MMC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = 100, .end = 100, .flags = IORESOURCE_DMA, }, [3] = { .start = 101, .end = 101, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci3 = { .name = "pxa2xx-mci", .id = 2, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci3), .resource = pxa3xx_resources_mci3, }; void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci3, info); } static struct resource pxa3xx_resources_gcu[] = { { .start = 0x54000000, .end = 0x54000fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GCU, .end = IRQ_GCU, .flags = IORESOURCE_IRQ, }, }; static u64 pxa3xx_gcu_dmamask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_gcu = { .name = "pxa3xx-gcu", .id = -1, .num_resources = ARRAY_SIZE(pxa3xx_resources_gcu), .resource = pxa3xx_resources_gcu, .dev = { .dma_mask = &pxa3xx_gcu_dmamask, .coherent_dma_mask = 0xffffffff, }, }; #endif /* CONFIG_PXA3xx */ #if defined(CONFIG_PXA3xx) static struct resource pxa3xx_resources_i2c_power[] = { { .start = 0x40f500c0, .end = 0x40f500d3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_i2c_power = { .name = "pxa3xx-pwri2c", .id = 1, .resource = pxa3xx_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa3xx_resources_i2c_power), }; static struct resource pxa3xx_resources_nand[] = { [0] = { .start = 0x43100000, .end = 0x43100053, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NAND, .end = IRQ_NAND, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for Data DMA */ .start = 97, .end = 97, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for Command DMA */ .start = 99, .end = 99, .flags = IORESOURCE_DMA, }, }; static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_nand = { .name = "pxa3xx-nand", .id = -1, .dev = { .dma_mask = &pxa3xx_nand_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa3xx_resources_nand), .resource = pxa3xx_resources_nand, }; void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info) { pxa_register_device(&pxa3xx_device_nand, info); } static u64 pxa3xx_ssp4_dma_mask = DMA_BIT_MASK(32); static struct resource pxa3xx_resource_ssp4[] = { [0] = { .start = 0x41a00000, .end = 0x41a0003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP4, .end = IRQ_SSP4, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 2, .end = 2, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 3, .end = 3, .flags = IORESOURCE_DMA, }, }; /* * PXA3xx SSP is basically equivalent to PXA27x. * However, we need to register the device by the correct name in order to * make the driver set the correct internal type, hence we provide specific * platform_devices for each of them. */ struct platform_device pxa3xx_device_ssp1 = { .name = "pxa3xx-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; struct platform_device pxa3xx_device_ssp2 = { .name = "pxa3xx-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; struct platform_device pxa3xx_device_ssp3 = { .name = "pxa3xx-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; struct platform_device pxa3xx_device_ssp4 = { .name = "pxa3xx-ssp", .id = 3, .dev = { .dma_mask = &pxa3xx_ssp4_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa3xx_resource_ssp4, .num_resources = ARRAY_SIZE(pxa3xx_resource_ssp4), }; #endif /* CONFIG_PXA3xx */ struct resource pxa_resource_gpio[] = { { .start = 0x40e00000, .end = 0x40e0ffff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GPIO0, .end = IRQ_GPIO0, .name = "gpio0", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO1, .end = IRQ_GPIO1, .name = "gpio1", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO_2_x, .end = IRQ_GPIO_2_x, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_gpio = { #ifdef CONFIG_CPU_PXA26x .name = "pxa26x-gpio", #else .name = "pxa25x-gpio", #endif .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa27x_device_gpio = { .name = "pxa27x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa3xx_device_gpio = { .name = "pxa3xx-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa93x_device_gpio = { .name = "pxa93x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) { struct platform_device *pd; pd = platform_device_alloc("pxa2xx-spi", id); if (pd == NULL) { printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", id); return; } pd->dev.platform_data = info; platform_device_add(pd); }
gpl-2.0
CyanogenMod/semc-kernel-msm7x30
arch/powerpc/sysdev/of_rtc.c
788
1573
/* * Instantiate mmio-mapped RTC chips based on device tree information * * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> #include <linux/of_platform.h> static __initdata struct { const char *compatible; char *plat_name; } of_rtc_table[] = { { "ds1743-nvram", "rtc-ds1742" }, }; void __init of_instantiate_rtc(void) { struct device_node *node; int err; int i; for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) { char *plat_name = of_rtc_table[i].plat_name; for_each_compatible_node(node, NULL, of_rtc_table[i].compatible) { struct resource *res; res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) { printk(KERN_ERR "OF RTC: Out of memory " "allocating resource structure for %s\n", node->full_name); continue; } err = of_address_to_resource(node, 0, res); if (err) { printk(KERN_ERR "OF RTC: Error " "translating resources for %s\n", node->full_name); continue; } printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n", node->full_name, plat_name, (unsigned long long)res->start, (unsigned long long)res->end); platform_device_register_simple(plat_name, -1, res, 1); } } }
gpl-2.0
treejames/android_kernel_htc_msm7x30
drivers/media/video/pvrusb2/pvrusb2-ioread.c
1556
12501
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "pvrusb2-ioread.h" #include "pvrusb2-debug.h" #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/mutex.h> #include <asm/uaccess.h> #define BUFFER_COUNT 32 #define BUFFER_SIZE PAGE_ALIGN(0x4000) struct pvr2_ioread { struct pvr2_stream *stream; char *buffer_storage[BUFFER_COUNT]; char *sync_key_ptr; unsigned int sync_key_len; unsigned int sync_buf_offs; unsigned int sync_state; unsigned int sync_trashed_count; int enabled; // Streaming is on int spigot_open; // OK to pass data to client int stream_running; // Passing data to client now /* State relevant to current buffer being read */ struct pvr2_buffer *c_buf; char *c_data_ptr; unsigned int c_data_len; unsigned int c_data_offs; struct mutex mutex; }; static int pvr2_ioread_init(struct pvr2_ioread *cp) { unsigned int idx; cp->stream = NULL; mutex_init(&cp->mutex); for (idx = 0; idx < BUFFER_COUNT; idx++) { cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL); if (!(cp->buffer_storage[idx])) break; } if (idx < BUFFER_COUNT) { // An allocation appears to have failed for (idx = 0; idx < BUFFER_COUNT; idx++) { if (!(cp->buffer_storage[idx])) continue; kfree(cp->buffer_storage[idx]); } return -ENOMEM; } return 0; } static void pvr2_ioread_done(struct pvr2_ioread *cp) { unsigned int idx; pvr2_ioread_setup(cp,NULL); for (idx = 0; idx < BUFFER_COUNT; idx++) { if (!(cp->buffer_storage[idx])) continue; kfree(cp->buffer_storage[idx]); } } struct pvr2_ioread *pvr2_ioread_create(void) { struct pvr2_ioread *cp; cp = kzalloc(sizeof(*cp),GFP_KERNEL); if (!cp) return NULL; pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp); if (pvr2_ioread_init(cp) < 0) { kfree(cp); return NULL; } return cp; } void pvr2_ioread_destroy(struct pvr2_ioread *cp) { if (!cp) return; pvr2_ioread_done(cp); pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp); if (cp->sync_key_ptr) { kfree(cp->sync_key_ptr); cp->sync_key_ptr = NULL; } kfree(cp); } void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp, const char *sync_key_ptr, unsigned int sync_key_len) { if (!cp) return; if (!sync_key_ptr) sync_key_len = 0; if ((sync_key_len == cp->sync_key_len) && ((!sync_key_len) || (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return; if (sync_key_len != cp->sync_key_len) { if (cp->sync_key_ptr) { kfree(cp->sync_key_ptr); cp->sync_key_ptr = NULL; } cp->sync_key_len = 0; if (sync_key_len) { cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL); if (cp->sync_key_ptr) { cp->sync_key_len = sync_key_len; } } } if (!cp->sync_key_len) return; memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len); } static void pvr2_ioread_stop(struct pvr2_ioread *cp) { if (!(cp->enabled)) return; pvr2_trace(PVR2_TRACE_START_STOP, "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp); pvr2_stream_kill(cp->stream); cp->c_buf = NULL; cp->c_data_ptr = NULL; cp->c_data_len = 0; cp->c_data_offs = 0; cp->enabled = 0; cp->stream_running = 0; cp->spigot_open = 0; if (cp->sync_state) { pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/ sync_state <== 0"); cp->sync_state = 0; } } static int pvr2_ioread_start(struct pvr2_ioread *cp) { int stat; struct pvr2_buffer *bp; if (cp->enabled) return 0; if (!(cp->stream)) return 0; pvr2_trace(PVR2_TRACE_START_STOP, "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp); while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != NULL) { stat = pvr2_buffer_queue(bp); if (stat < 0) { pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/" " pvr2_ioread_start id=%p" " error=%d", cp,stat); pvr2_ioread_stop(cp); return stat; } } cp->enabled = !0; cp->c_buf = NULL; cp->c_data_ptr = NULL; cp->c_data_len = 0; cp->c_data_offs = 0; cp->stream_running = 0; if (cp->sync_key_len) { pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/ sync_state <== 1"); cp->sync_state = 1; cp->sync_trashed_count = 0; cp->sync_buf_offs = 0; } cp->spigot_open = 0; return 0; } struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp) { return cp->stream; } int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp) { int ret; unsigned int idx; struct pvr2_buffer *bp; mutex_lock(&cp->mutex); do { if (cp->stream) { pvr2_trace(PVR2_TRACE_START_STOP, "/*---TRACE_READ---*/" " pvr2_ioread_setup (tear-down) id=%p",cp); pvr2_ioread_stop(cp); pvr2_stream_kill(cp->stream); if (pvr2_stream_get_buffer_count(cp->stream)) { pvr2_stream_set_buffer_count(cp->stream,0); } cp->stream = NULL; } if (sp) { pvr2_trace(PVR2_TRACE_START_STOP, "/*---TRACE_READ---*/" " pvr2_ioread_setup (setup) id=%p",cp); pvr2_stream_kill(sp); ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT); if (ret < 0) return ret; for (idx = 0; idx < BUFFER_COUNT; idx++) { bp = pvr2_stream_get_buffer(sp,idx); pvr2_buffer_set_buffer(bp, cp->buffer_storage[idx], BUFFER_SIZE); } cp->stream = sp; } } while (0); mutex_unlock(&cp->mutex); return 0; } int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl) { int ret = 0; if ((!fl) == (!(cp->enabled))) return ret; mutex_lock(&cp->mutex); do { if (fl) { ret = pvr2_ioread_start(cp); } else { pvr2_ioread_stop(cp); } } while (0); mutex_unlock(&cp->mutex); return ret; } static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp) { int stat; while (cp->c_data_len <= cp->c_data_offs) { if (cp->c_buf) { // Flush out current buffer first. stat = pvr2_buffer_queue(cp->c_buf); if (stat < 0) { // Streaming error... pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/" " pvr2_ioread_read id=%p" " queue_error=%d", cp,stat); pvr2_ioread_stop(cp); return 0; } cp->c_buf = NULL; cp->c_data_ptr = NULL; cp->c_data_len = 0; cp->c_data_offs = 0; } // Now get a freshly filled buffer. cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream); if (!cp->c_buf) break; // Nothing ready; done. cp->c_data_len = pvr2_buffer_get_count(cp->c_buf); if (!cp->c_data_len) { // Nothing transferred. Was there an error? stat = pvr2_buffer_get_status(cp->c_buf); if (stat < 0) { // Streaming error... pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/" " pvr2_ioread_read id=%p" " buffer_error=%d", cp,stat); pvr2_ioread_stop(cp); // Give up. return 0; } // Start over... continue; } cp->c_data_offs = 0; cp->c_data_ptr = cp->buffer_storage[ pvr2_buffer_get_id(cp->c_buf)]; } return !0; } static void pvr2_ioread_filter(struct pvr2_ioread *cp) { unsigned int idx; if (!cp->enabled) return; if (cp->sync_state != 1) return; // Search the stream for our synchronization key. This is made // complicated by the fact that in order to be honest with // ourselves here we must search across buffer boundaries... mutex_lock(&cp->mutex); while (1) { // Ensure we have a buffer if (!pvr2_ioread_get_buffer(cp)) break; if (!cp->c_data_len) break; // Now walk the buffer contents until we match the key or // run out of buffer data. for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) { if (cp->sync_buf_offs >= cp->sync_key_len) break; if (cp->c_data_ptr[idx] == cp->sync_key_ptr[cp->sync_buf_offs]) { // Found the next key byte (cp->sync_buf_offs)++; } else { // Whoops, mismatched. Start key over... cp->sync_buf_offs = 0; } } // Consume what we've walked through cp->c_data_offs += idx; cp->sync_trashed_count += idx; // If we've found the key, then update state and get out. if (cp->sync_buf_offs >= cp->sync_key_len) { cp->sync_trashed_count -= cp->sync_key_len; pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/" " sync_state <== 2 (skipped %u bytes)", cp->sync_trashed_count); cp->sync_state = 2; cp->sync_buf_offs = 0; break; } if (cp->c_data_offs < cp->c_data_len) { // Sanity check - should NEVER get here pvr2_trace(PVR2_TRACE_ERROR_LEGS, "ERROR: pvr2_ioread filter sync problem" " len=%u offs=%u", cp->c_data_len,cp->c_data_offs); // Get out so we don't get stuck in an infinite // loop. break; } continue; // (for clarity) } mutex_unlock(&cp->mutex); } int pvr2_ioread_avail(struct pvr2_ioread *cp) { int ret; if (!(cp->enabled)) { // Stream is not enabled; so this is an I/O error return -EIO; } if (cp->sync_state == 1) { pvr2_ioread_filter(cp); if (cp->sync_state == 1) return -EAGAIN; } ret = 0; if (cp->stream_running) { if (!pvr2_stream_get_ready_count(cp->stream)) { // No data available at all right now. ret = -EAGAIN; } } else { if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) { // Haven't buffered up enough yet; try again later ret = -EAGAIN; } } if ((!(cp->spigot_open)) != (!(ret == 0))) { cp->spigot_open = (ret == 0); pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/ data is %s", cp->spigot_open ? "available" : "pending"); } return ret; } int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt) { unsigned int copied_cnt; unsigned int bcnt; const char *src; int stat; int ret = 0; unsigned int req_cnt = cnt; if (!cnt) { pvr2_trace(PVR2_TRACE_TRAP, "/*---TRACE_READ---*/ pvr2_ioread_read id=%p" " ZERO Request? Returning zero.",cp); return 0; } stat = pvr2_ioread_avail(cp); if (stat < 0) return stat; cp->stream_running = !0; mutex_lock(&cp->mutex); do { // Suck data out of the buffers and copy to the user copied_cnt = 0; if (!buf) cnt = 0; while (1) { if (!pvr2_ioread_get_buffer(cp)) { ret = -EIO; break; } if (!cnt) break; if (cp->sync_state == 2) { // We're repeating the sync key data into // the stream. src = cp->sync_key_ptr + cp->sync_buf_offs; bcnt = cp->sync_key_len - cp->sync_buf_offs; } else { // Normal buffer copy src = cp->c_data_ptr + cp->c_data_offs; bcnt = cp->c_data_len - cp->c_data_offs; } if (!bcnt) break; // Don't run past user's buffer if (bcnt > cnt) bcnt = cnt; if (copy_to_user(buf,src,bcnt)) { // User supplied a bad pointer? // Give up - this *will* cause data // to be lost. ret = -EFAULT; break; } cnt -= bcnt; buf += bcnt; copied_cnt += bcnt; if (cp->sync_state == 2) { // Update offset inside sync key that we're // repeating back out. cp->sync_buf_offs += bcnt; if (cp->sync_buf_offs >= cp->sync_key_len) { // Consumed entire key; switch mode // to normal. pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/" " sync_state <== 0"); cp->sync_state = 0; } } else { // Update buffer offset. cp->c_data_offs += bcnt; } } } while (0); mutex_unlock(&cp->mutex); if (!ret) { if (copied_cnt) { // If anything was copied, return that count ret = copied_cnt; } else { // Nothing copied; suggest to caller that another // attempt should be tried again later ret = -EAGAIN; } } pvr2_trace(PVR2_TRACE_DATA_FLOW, "/*---TRACE_READ---*/ pvr2_ioread_read" " id=%p request=%d result=%d", cp,req_cnt,ret); return ret; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
Clouded/linux-rt-rpi2
arch/arm/mach-ixp4xx/omixp-setup.c
2068
6342
/* * arch/arm/mach-ixp4xx/omixp-setup.c * * omicron ixp4xx board setup * Copyright (C) 2009 OMICRON electronics GmbH * * based nslu2-setup.c, ixdp425-setup.c: * Copyright (C) 2003-2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/leds.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <mach/hardware.h> static struct resource omixp_flash_resources[] = { { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, }; static struct mtd_partition omixp_partitions[] = { { .name = "Recovery Bootloader", .size = 0x00020000, .offset = 0, }, { .name = "Calibration Data", .size = 0x00020000, .offset = 0x00020000, }, { .name = "Recovery FPGA", .size = 0x00020000, .offset = 0x00040000, }, { .name = "Release Bootloader", .size = 0x00020000, .offset = 0x00060000, }, { .name = "Release FPGA", .size = 0x00020000, .offset = 0x00080000, }, { .name = "Kernel", .size = 0x00160000, .offset = 0x000a0000, }, { .name = "Filesystem", .size = 0x00C00000, .offset = 0x00200000, }, { .name = "Persistent Storage", .size = 0x00200000, .offset = 0x00E00000, }, }; static struct flash_platform_data omixp_flash_data[] = { { .map_name = "cfi_probe", .parts = omixp_partitions, .nr_parts = ARRAY_SIZE(omixp_partitions), }, { .map_name = "cfi_probe", .parts = NULL, .nr_parts = 0, }, }; static struct platform_device omixp_flash_device[] = { { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &omixp_flash_data[0], }, .resource = &omixp_flash_resources[0], .num_resources = 1, }, { .name = "IXP4XX-Flash", .id = 1, .dev = { .platform_data = &omixp_flash_data[1], }, .resource = &omixp_flash_resources[1], .num_resources = 1, }, }; /* Swap UART's - These boards have the console on UART2. The following * configuration is used: * ttyS0 .. UART2 * ttyS1 .. UART1 * This way standard images can be used with the kernel that expect * the console on ttyS0. */ static struct resource omixp_uart_resources[] = { { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, }; static struct plat_serial8250_port omixp_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { /* list termination */ } }; static struct platform_device omixp_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = omixp_uart_data, .num_resources = 2, .resource = omixp_uart_resources, }; static struct gpio_led mic256_led_pins[] = { { .name = "LED-A", .gpio = 7, }, }; static struct gpio_led_platform_data mic256_led_data = { .num_leds = ARRAY_SIZE(mic256_led_pins), .leds = mic256_led_pins, }; static struct platform_device mic256_leds = { .name = "leds-gpio", .id = -1, .dev.platform_data = &mic256_led_data, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info ixdp425_plat_eth[] = { { .phy = 0, .rxq = 3, .txreadyq = 20, }, { .phy = 1, .rxq = 4, .txreadyq = 21, }, }; static struct platform_device ixdp425_eth[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = ixdp425_plat_eth, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = ixdp425_plat_eth + 1, }, }; static struct platform_device *devixp_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *mic256_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &mic256_leds, &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *miccpt_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &omixp_flash_device[1], &ixdp425_eth[0], &ixdp425_eth[1], }; static void __init omixp_init(void) { ixp4xx_sys_init(); /* 16MiB Boot Flash */ omixp_flash_resources[0].start = IXP4XX_EXP_BUS_BASE(0); omixp_flash_resources[0].end = IXP4XX_EXP_BUS_END(0); /* 32 MiB Data Flash */ omixp_flash_resources[1].start = IXP4XX_EXP_BUS_BASE(2); omixp_flash_resources[1].end = IXP4XX_EXP_BUS_END(2); if (machine_is_devixp()) platform_add_devices(devixp_pldev, ARRAY_SIZE(devixp_pldev)); else if (machine_is_miccpt()) platform_add_devices(miccpt_pldev, ARRAY_SIZE(miccpt_pldev)); else if (machine_is_mic256()) platform_add_devices(mic256_pldev, ARRAY_SIZE(mic256_pldev)); } #ifdef CONFIG_MACH_DEVIXP MACHINE_START(DEVIXP, "Omicron DEVIXP") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .init_time = ixp4xx_timer_init, .init_machine = omixp_init, .restart = ixp4xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MICCPT MACHINE_START(MICCPT, "Omicron MICCPT") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .init_time = ixp4xx_timer_init, .init_machine = omixp_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MIC256 MACHINE_START(MIC256, "Omicron MIC256") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .init_time = ixp4xx_timer_init, .init_machine = omixp_init, .restart = ixp4xx_restart, MACHINE_END #endif
gpl-2.0
theophile/amazon_fire_kffowi_bonus_modules
security/apparmor/audit.c
2324
4817
/* * AppArmor security module * * This file contains AppArmor auditing functions * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/audit.h> #include <linux/socket.h> #include "include/apparmor.h" #include "include/audit.h" #include "include/policy.h" const char *const op_table[] = { "null", "sysctl", "capable", "unlink", "mkdir", "rmdir", "mknod", "truncate", "link", "symlink", "rename_src", "rename_dest", "chmod", "chown", "getattr", "open", "file_perm", "file_lock", "file_mmap", "file_mprotect", "create", "post_create", "bind", "connect", "listen", "accept", "sendmsg", "recvmsg", "getsockname", "getpeername", "getsockopt", "setsockopt", "socket_shutdown", "ptrace", "exec", "change_hat", "change_profile", "change_onexec", "setprocattr", "setrlimit", "profile_replace", "profile_load", "profile_remove" }; const char *const audit_mode_names[] = { "normal", "quiet_denied", "quiet", "noquiet", "all" }; static const char *const aa_audit_type[] = { "AUDIT", "ALLOWED", "DENIED", "HINT", "STATUS", "ERROR", "KILLED" "AUTO" }; /* * Currently AppArmor auditing is fed straight into the audit framework. * * TODO: * netlink interface for complain mode * user auditing, - send user auditing to netlink interface * system control of whether user audit messages go to system log */ /** * audit_base - core AppArmor function. * @ab: audit buffer to fill (NOT NULL) * @ca: audit structure containing data to audit (NOT NULL) * * Record common AppArmor audit data from @sa */ static void audit_pre(struct audit_buffer *ab, void *ca) { struct common_audit_data *sa = ca; struct task_struct *tsk = sa->aad->tsk ? sa->aad->tsk : current; if (aa_g_audit_header) { audit_log_format(ab, "apparmor="); audit_log_string(ab, aa_audit_type[sa->aad->type]); } if (sa->aad->op) { audit_log_format(ab, " operation="); audit_log_string(ab, op_table[sa->aad->op]); } if (sa->aad->info) { audit_log_format(ab, " info="); audit_log_string(ab, sa->aad->info); if (sa->aad->error) audit_log_format(ab, " error=%d", sa->aad->error); } if (sa->aad->profile) { struct aa_profile *profile = sa->aad->profile; pid_t pid; rcu_read_lock(); pid = rcu_dereference(tsk->real_parent)->pid; rcu_read_unlock(); audit_log_format(ab, " parent=%d", pid); if (profile->ns != root_ns) { audit_log_format(ab, " namespace="); audit_log_untrustedstring(ab, profile->ns->base.hname); } audit_log_format(ab, " profile="); audit_log_untrustedstring(ab, profile->base.hname); } if (sa->aad->name) { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, sa->aad->name); } if (sa->aad->tsk) { audit_log_format(ab, " pid=%d comm=", tsk->pid); audit_log_untrustedstring(ab, tsk->comm); } } /** * aa_audit_msg - Log a message to the audit subsystem * @sa: audit event structure (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) */ void aa_audit_msg(int type, struct common_audit_data *sa, void (*cb) (struct audit_buffer *, void *)) { sa->aad->type = type; common_lsm_audit(sa, audit_pre, cb); } /** * aa_audit - Log a profile based audit event to the audit subsystem * @type: audit type for the message * @profile: profile to check against (NOT NULL) * @gfp: allocation flags to use * @sa: audit event (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) * * Handle default message switching based off of audit mode flags * * Returns: error on failure */ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, struct common_audit_data *sa, void (*cb) (struct audit_buffer *, void *)) { BUG_ON(!profile); if (type == AUDIT_APPARMOR_AUTO) { if (likely(!sa->aad->error)) { if (AUDIT_MODE(profile) != AUDIT_ALL) return 0; type = AUDIT_APPARMOR_AUDIT; } else if (COMPLAIN_MODE(profile)) type = AUDIT_APPARMOR_ALLOWED; else type = AUDIT_APPARMOR_DENIED; } if (AUDIT_MODE(profile) == AUDIT_QUIET || (type == AUDIT_APPARMOR_DENIED && AUDIT_MODE(profile) == AUDIT_QUIET)) return sa->aad->error; if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) type = AUDIT_APPARMOR_KILL; if (!unconfined(profile)) sa->aad->profile = profile; aa_audit_msg(type, sa, cb); if (sa->aad->type == AUDIT_APPARMOR_KILL) (void)send_sig_info(SIGKILL, NULL, sa->aad->tsk ? sa->aad->tsk : current); if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) return complain_error(sa->aad->error); return sa->aad->error; }
gpl-2.0
venue3x40-dev/android_kernel_dell_venue3x40
drivers/media/common/siano/smsdvb-debugfs.c
2580
17670
/*********************************************************************** * * Copyright(c) 2013 Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * ***********************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/spinlock.h> #include <linux/usb.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "smscoreapi.h" #include "smsdvb.h" static struct dentry *smsdvb_debugfs_usb_root; struct smsdvb_debugfs { struct kref refcount; spinlock_t lock; char stats_data[PAGE_SIZE]; unsigned stats_count; bool stats_was_read; wait_queue_head_t stats_queue; }; static void smsdvb_print_dvb_stats(struct smsdvb_debugfs *debug_data, struct sms_stats *p) { int n = 0; char *buf; spin_lock(&debug_data->lock); if (debug_data->stats_count) { spin_unlock(&debug_data->lock); return; } buf = debug_data->stats_data; n += snprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\n", p->is_rf_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\n", p->is_demod_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); n += snprintf(&buf[n], PAGE_SIZE - n, "SNR = %d\n", p->SNR); n += snprintf(&buf[n], PAGE_SIZE - n, "ber = %d\n", p->ber); n += snprintf(&buf[n], PAGE_SIZE - n, "FIB_CRC = %d\n", p->FIB_CRC); n += snprintf(&buf[n], PAGE_SIZE - n, "ts_per = %d\n", p->ts_per); n += snprintf(&buf[n], PAGE_SIZE - n, "MFER = %d\n", p->MFER); n += snprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d\n", p->RSSI); n += snprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d\n", p->in_band_pwr); n += snprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\n", p->carrier_offset); n += snprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\n", p->modem_state); n += snprintf(&buf[n], PAGE_SIZE - n, "frequency = %d\n", p->frequency); n += snprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\n", p->bandwidth); n += snprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\n", p->transmission_mode); n += snprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\n", p->modem_state); n += snprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); n += snprintf(&buf[n], PAGE_SIZE - n, "code_rate = %d\n", p->code_rate); n += snprintf(&buf[n], PAGE_SIZE - n, "lp_code_rate = %d\n", p->lp_code_rate); n += snprintf(&buf[n], PAGE_SIZE - n, "hierarchy = %d\n", p->hierarchy); n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->constellation); n += snprintf(&buf[n], PAGE_SIZE - n, "burst_size = %d\n", p->burst_size); n += snprintf(&buf[n], PAGE_SIZE - n, "burst_duration = %d\n", p->burst_duration); n += snprintf(&buf[n], PAGE_SIZE - n, "burst_cycle_time = %d\n", p->burst_cycle_time); n += snprintf(&buf[n], PAGE_SIZE - n, "calc_burst_cycle_time = %d\n", p->calc_burst_cycle_time); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_rows = %d\n", p->num_of_rows); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_padd_cols = %d\n", p->num_of_padd_cols); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_punct_cols = %d\n", p->num_of_punct_cols); n += snprintf(&buf[n], PAGE_SIZE - n, "error_ts_packets = %d\n", p->error_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %d\n", p->total_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_corrected_mpe_tlbs = %d\n", p->num_of_corrected_mpe_tlbs); n += snprintf(&buf[n], PAGE_SIZE - n, "ber_error_count = %d\n", p->ber_error_count); n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %d\n", p->ber_bit_count); n += snprintf(&buf[n], PAGE_SIZE - n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); n += snprintf(&buf[n], PAGE_SIZE - n, "pre_ber = %d\n", p->pre_ber); n += snprintf(&buf[n], PAGE_SIZE - n, "cell_id = %d\n", p->cell_id); n += snprintf(&buf[n], PAGE_SIZE - n, "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp); n += snprintf(&buf[n], PAGE_SIZE - n, "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp); n += snprintf(&buf[n], PAGE_SIZE - n, "num_mpe_received = %d\n", p->num_mpe_received); debug_data->stats_count = n; spin_unlock(&debug_data->lock); wake_up(&debug_data->stats_queue); } static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data, struct sms_isdbt_stats *p) { int i, n = 0; char *buf; spin_lock(&debug_data->lock); if (debug_data->stats_count) { spin_unlock(&debug_data->lock); return; } buf = debug_data->stats_data; n += snprintf(&buf[n], PAGE_SIZE - n, "statistics_type = %d\t", p->statistics_type); n += snprintf(&buf[n], PAGE_SIZE - n, "full_size = %d\n", p->full_size); n += snprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\t\t", p->is_rf_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\t", p->is_demod_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); n += snprintf(&buf[n], PAGE_SIZE - n, "SNR = %d dB\t\t", p->SNR); n += snprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d dBm\t\t", p->RSSI); n += snprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d dBm\n", p->in_band_pwr); n += snprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\t", p->carrier_offset); n += snprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\t\t", p->bandwidth); n += snprintf(&buf[n], PAGE_SIZE - n, "frequency = %d Hz\n", p->frequency); n += snprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\t", p->transmission_mode); n += snprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\t\t", p->modem_state); n += snprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); n += snprintf(&buf[n], PAGE_SIZE - n, "system_type = %d\t\t", p->system_type); n += snprintf(&buf[n], PAGE_SIZE - n, "partial_reception = %d\t", p->partial_reception); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_layers = %d\n", p->num_of_layers); n += snprintf(&buf[n], PAGE_SIZE - n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); for (i = 0; i < 3; i++) { if (p->layer_info[i].number_of_segments < 1 || p->layer_info[i].number_of_segments > 13) continue; n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->layer_info[i].constellation); n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", p->layer_info[i].ber); n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", p->layer_info[i].ber_error_count); n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", p->layer_info[i].ber_bit_count); n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", p->layer_info[i].error_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", p->layer_info[i].total_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); n += snprintf(&buf[n], PAGE_SIZE - n, "\tnumber_of_segments = %d\t", p->layer_info[i].number_of_segments); n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); } debug_data->stats_count = n; spin_unlock(&debug_data->lock); wake_up(&debug_data->stats_queue); } static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data, struct sms_isdbt_stats_ex *p) { int i, n = 0; char *buf; spin_lock(&debug_data->lock); if (debug_data->stats_count) { spin_unlock(&debug_data->lock); return; } buf = debug_data->stats_data; n += snprintf(&buf[n], PAGE_SIZE - n, "statistics_type = %d\t", p->statistics_type); n += snprintf(&buf[n], PAGE_SIZE - n, "full_size = %d\n", p->full_size); n += snprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\t\t", p->is_rf_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\t", p->is_demod_locked); n += snprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); n += snprintf(&buf[n], PAGE_SIZE - n, "SNR = %d dB\t\t", p->SNR); n += snprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d dBm\t\t", p->RSSI); n += snprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d dBm\n", p->in_band_pwr); n += snprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\t", p->carrier_offset); n += snprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\t\t", p->bandwidth); n += snprintf(&buf[n], PAGE_SIZE - n, "frequency = %d Hz\n", p->frequency); n += snprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\t", p->transmission_mode); n += snprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\t\t", p->modem_state); n += snprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); n += snprintf(&buf[n], PAGE_SIZE - n, "system_type = %d\t\t", p->system_type); n += snprintf(&buf[n], PAGE_SIZE - n, "partial_reception = %d\t", p->partial_reception); n += snprintf(&buf[n], PAGE_SIZE - n, "num_of_layers = %d\n", p->num_of_layers); n += snprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t", p->segment_number); n += snprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n", p->tune_bw); for (i = 0; i < 3; i++) { if (p->layer_info[i].number_of_segments < 1 || p->layer_info[i].number_of_segments > 13) continue; n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->layer_info[i].constellation); n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", p->layer_info[i].ber); n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", p->layer_info[i].ber_error_count); n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", p->layer_info[i].ber_bit_count); n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", p->layer_info[i].error_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", p->layer_info[i].total_ts_packets); n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); n += snprintf(&buf[n], PAGE_SIZE - n, "\tnumber_of_segments = %d\t", p->layer_info[i].number_of_segments); n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); } debug_data->stats_count = n; spin_unlock(&debug_data->lock); wake_up(&debug_data->stats_queue); } static int smsdvb_stats_open(struct inode *inode, struct file *file) { struct smsdvb_client_t *client = inode->i_private; struct smsdvb_debugfs *debug_data = client->debug_data; kref_get(&debug_data->refcount); spin_lock(&debug_data->lock); debug_data->stats_count = 0; debug_data->stats_was_read = false; spin_unlock(&debug_data->lock); file->private_data = debug_data; return 0; } static void smsdvb_debugfs_data_release(struct kref *ref) { struct smsdvb_debugfs *debug_data; debug_data = container_of(ref, struct smsdvb_debugfs, refcount); kfree(debug_data); } static int smsdvb_stats_wait_read(struct smsdvb_debugfs *debug_data) { int rc = 1; spin_lock(&debug_data->lock); if (debug_data->stats_was_read) goto exit; rc = debug_data->stats_count; exit: spin_unlock(&debug_data->lock); return rc; } static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait) { struct smsdvb_debugfs *debug_data = file->private_data; int rc; kref_get(&debug_data->refcount); poll_wait(file, &debug_data->stats_queue, wait); rc = smsdvb_stats_wait_read(debug_data); if (rc > 0) rc = POLLIN | POLLRDNORM; kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); return rc; } static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf, size_t nbytes, loff_t *ppos) { int rc = 0, len; struct smsdvb_debugfs *debug_data = file->private_data; kref_get(&debug_data->refcount); if (file->f_flags & O_NONBLOCK) { rc = smsdvb_stats_wait_read(debug_data); if (!rc) { rc = -EWOULDBLOCK; goto ret; } } else { rc = wait_event_interruptible(debug_data->stats_queue, smsdvb_stats_wait_read(debug_data)); if (rc < 0) goto ret; } if (debug_data->stats_was_read) { rc = 0; /* EOF */ goto ret; } len = debug_data->stats_count - *ppos; if (len >= 0) rc = simple_read_from_buffer(user_buf, nbytes, ppos, debug_data->stats_data, len); else rc = 0; if (*ppos >= debug_data->stats_count) { spin_lock(&debug_data->lock); debug_data->stats_was_read = true; spin_unlock(&debug_data->lock); } ret: kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); return rc; } static int smsdvb_stats_release(struct inode *inode, struct file *file) { struct smsdvb_debugfs *debug_data = file->private_data; spin_lock(&debug_data->lock); debug_data->stats_was_read = true; /* return EOF to read() */ spin_unlock(&debug_data->lock); wake_up_interruptible_sync(&debug_data->stats_queue); kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); file->private_data = NULL; return 0; } static const struct file_operations debugfs_stats_ops = { .open = smsdvb_stats_open, .poll = smsdvb_stats_poll, .read = smsdvb_stats_read, .release = smsdvb_stats_release, .llseek = generic_file_llseek, }; /* * Functions used by smsdvb, in order to create the interfaces */ int smsdvb_debugfs_create(struct smsdvb_client_t *client) { struct smscore_device_t *coredev = client->coredev; struct dentry *d; struct smsdvb_debugfs *debug_data; if (!smsdvb_debugfs_usb_root || !coredev->is_usb_device) return -ENODEV; client->debugfs = debugfs_create_dir(coredev->devpath, smsdvb_debugfs_usb_root); if (IS_ERR_OR_NULL(client->debugfs)) { pr_info("Unable to create debugfs %s directory.\n", coredev->devpath); return -ENODEV; } d = debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs, client, &debugfs_stats_ops); if (!d) { debugfs_remove(client->debugfs); return -ENOMEM; } debug_data = kzalloc(sizeof(*client->debug_data), GFP_KERNEL); if (!debug_data) return -ENOMEM; client->debug_data = debug_data; client->prt_dvb_stats = smsdvb_print_dvb_stats; client->prt_isdb_stats = smsdvb_print_isdb_stats; client->prt_isdb_stats_ex = smsdvb_print_isdb_stats_ex; init_waitqueue_head(&debug_data->stats_queue); spin_lock_init(&debug_data->lock); kref_init(&debug_data->refcount); return 0; } void smsdvb_debugfs_release(struct smsdvb_client_t *client) { if (!client->debugfs) return; client->prt_dvb_stats = NULL; client->prt_isdb_stats = NULL; client->prt_isdb_stats_ex = NULL; debugfs_remove_recursive(client->debugfs); kref_put(&client->debug_data->refcount, smsdvb_debugfs_data_release); client->debug_data = NULL; client->debugfs = NULL; } int smsdvb_debugfs_register(void) { struct dentry *d; /* * FIXME: This was written to debug Siano USB devices. So, it creates * the debugfs node under <debugfs>/usb. * A similar logic would be needed for Siano sdio devices, but, in that * case, usb_debug_root is not a good choice. * * Perhaps the right fix here would be to create another sysfs root * node for sdio-based boards, but this may need some logic at sdio * subsystem. */ d = debugfs_create_dir("smsdvb", usb_debug_root); if (IS_ERR_OR_NULL(d)) { sms_err("Couldn't create sysfs node for smsdvb"); return PTR_ERR(d); } else { smsdvb_debugfs_usb_root = d; } return 0; } void smsdvb_debugfs_unregister(void) { debugfs_remove_recursive(smsdvb_debugfs_usb_root); smsdvb_debugfs_usb_root = NULL; }
gpl-2.0
DSMKexec/kexec-kernel-g720n0
drivers/hwmon/sht21.c
2836
7475
/* Sensirion SHT21 humidity and temperature sensor driver * * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA * * Data sheet available (5/2010) at * http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/jiffies.h> /* I2C command bytes */ #define SHT21_TRIG_T_MEASUREMENT_HM 0xe3 #define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5 /** * struct sht21 - SHT21 device specific data * @hwmon_dev: device registered with hwmon * @lock: mutex to protect measurement values * @valid: only 0 before first measurement is taken * @last_update: time of last update (jiffies) * @temperature: cached temperature measurement value * @humidity: cached humidity measurement value */ struct sht21 { struct device *hwmon_dev; struct mutex lock; char valid; unsigned long last_update; int temperature; int humidity; }; /** * sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to * milli celsius * @ticks: temperature ticks value received from sensor */ static inline int sht21_temp_ticks_to_millicelsius(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2, * optimized for integer fixed point (3 digits) arithmetic */ return ((21965 * ticks) >> 13) - 46850; } /** * sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to * one-thousandths of a percent relative humidity * @ticks: humidity ticks value received from sensor */ static inline int sht21_rh_ticks_to_per_cent_mille(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1, * optimized for integer fixed point (3 digits) arithmetic */ return ((15625 * ticks) >> 13) - 6000; } /** * sht21_update_measurements() - get updated measurements from device * @client: I2C client device * * Returns 0 on success, else negative errno. */ static int sht21_update_measurements(struct i2c_client *client) { int ret = 0; struct sht21 *sht21 = i2c_get_clientdata(client); mutex_lock(&sht21->lock); /* * Data sheet 2.4: * SHT2x should not be active for more than 10% of the time - e.g. * maximum two measurements per second at 12bit accuracy shall be made. */ if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) { ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_T_MEASUREMENT_HM); if (ret < 0) goto out; sht21->temperature = sht21_temp_ticks_to_millicelsius(ret); ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_RH_MEASUREMENT_HM); if (ret < 0) goto out; sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret); sht21->last_update = jiffies; sht21->valid = 1; } out: mutex_unlock(&sht21->lock); return ret >= 0 ? 0 : ret; } /** * sht21_show_temperature() - show temperature measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to temp1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->temperature); } /** * sht21_show_humidity() - show humidity measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to humidity1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_humidity(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->humidity); } /* sysfs attributes */ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature, NULL, 0); static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity, NULL, 0); static struct attribute *sht21_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_humidity1_input.dev_attr.attr, NULL }; static const struct attribute_group sht21_attr_group = { .attrs = sht21_attributes, }; /** * sht21_probe() - probe device * @client: I2C client device * @id: device ID * * Called by the I2C core when an entry in the ID table matches a * device's name. * Returns 0 on success. */ static int sht21_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sht21 *sht21; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "adapter does not support SMBus word transactions\n"); return -ENODEV; } sht21 = devm_kzalloc(&client->dev, sizeof(*sht21), GFP_KERNEL); if (!sht21) return -ENOMEM; i2c_set_clientdata(client, sht21); mutex_init(&sht21->lock); err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group); if (err) { dev_dbg(&client->dev, "could not create sysfs files\n"); return err; } sht21->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(sht21->hwmon_dev)) { dev_dbg(&client->dev, "unable to register hwmon device\n"); err = PTR_ERR(sht21->hwmon_dev); goto fail_remove_sysfs; } dev_info(&client->dev, "initialized\n"); return 0; fail_remove_sysfs: sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); return err; } /** * sht21_remove() - remove device * @client: I2C client device */ static int sht21_remove(struct i2c_client *client) { struct sht21 *sht21 = i2c_get_clientdata(client); hwmon_device_unregister(sht21->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); return 0; } /* Device ID table */ static const struct i2c_device_id sht21_id[] = { { "sht21", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sht21_id); static struct i2c_driver sht21_driver = { .driver.name = "sht21", .probe = sht21_probe, .remove = sht21_remove, .id_table = sht21_id, }; module_i2c_driver(sht21_driver); MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>"); MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
sooorajjj/android_kernel_yu_msm8916-caf
drivers/staging/speakup/speakup_soft.c
3092
9712
/* speakup_soft.c - speakup driver to register and make available * a user space device for software synthesizers. written by: Kirk * Reiser <kirk@braille.uwo.ca> * * Copyright (C) 2003 Kirk Reiser. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/unistd.h> #include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ #include <linux/poll.h> /* for poll_wait() */ #include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" #define DRV_VERSION "2.6" #define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */ #define PROCSPEECH 0x0d #define CLEAR_SYNTH 0x18 static int softsynth_probe(struct spk_synth *synth); static void softsynth_release(void); static int softsynth_is_alive(struct spk_synth *synth); static unsigned char get_index(void); static struct miscdevice synth_device; static int init_pos; static int misc_registered; static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+3p" } }, { CAPS_STOP, .u.s = {"\x01-3p" } }, { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/soft. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); /* * We should uncomment the following definition, when we agree on a * method of passing a language designation to the software synthesizer. * static struct kobj_attribute lang_attribute = * __ATTR(lang, USER_RW, spk_var_show, spk_var_store); */ static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, /* &lang_attribute.attr, */ &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_soft = { .name = "soft", .version = DRV_VERSION, .long_name = "software synth", .init = "\01@\x01\x31y\n", .procspeech = PROCSPEECH, .delay = 0, .trigger = 0, .jiffies = 0, .full = 0, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = softsynth_probe, .release = softsynth_release, .synth_immediate = NULL, .catch_up = NULL, .flush = NULL, .is_alive = softsynth_is_alive, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = get_index, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "soft", }, }; static char *get_initstring(void) { static char buf[40]; char *cp; struct var_t *var; memset(buf, 0, sizeof(buf)); cp = buf; var = synth_soft.vars; while (var->var_id != MAXVARS) { if (var->var_id != CAPS_START && var->var_id != CAPS_STOP && var->var_id != DIRECT) cp = cp + sprintf(cp, var->u.n.synth_fmt, var->u.n.value); var++; } cp = cp + sprintf(cp, "\n"); return buf; } static int softsynth_open(struct inode *inode, struct file *fp) { unsigned long flags; /*if ((fp->f_flags & O_ACCMODE) != O_RDONLY) */ /* return -EPERM; */ spk_lock(flags); if (synth_soft.alive) { spk_unlock(flags); return -EBUSY; } synth_soft.alive = 1; spk_unlock(flags); return 0; } static int softsynth_close(struct inode *inode, struct file *fp) { unsigned long flags; spk_lock(flags); synth_soft.alive = 0; init_pos = 0; spk_unlock(flags); /* Make sure we let applications go before leaving */ speakup_start_ttys(); return 0; } static ssize_t softsynth_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int chars_sent = 0; char *cp; char *init; char ch; int empty; unsigned long flags; DEFINE_WAIT(wait); spk_lock(flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); if (!synth_buffer_empty() || speakup_info.flushing) break; spk_unlock(flags); if (fp->f_flags & O_NONBLOCK) { finish_wait(&speakup_event, &wait); return -EAGAIN; } if (signal_pending(current)) { finish_wait(&speakup_event, &wait); return -ERESTARTSYS; } schedule(); spk_lock(flags); } finish_wait(&speakup_event, &wait); cp = buf; init = get_initstring(); while (chars_sent < count) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; } else if (synth_buffer_empty()) { break; } else if (init[init_pos]) { ch = init[init_pos++]; } else { ch = synth_buffer_getc(); } spk_unlock(flags); if (copy_to_user(cp, &ch, 1)) return -EFAULT; spk_lock(flags); chars_sent++; cp++; } *pos += chars_sent; empty = synth_buffer_empty(); spk_unlock(flags); if (empty) { speakup_start_ttys(); *pos = 0; } return chars_sent; } static int last_index; static ssize_t softsynth_write(struct file *fp, const char *buf, size_t count, loff_t *pos) { unsigned long supplied_index = 0; int converted; converted = kstrtoul_from_user(buf, count, 0, &supplied_index); if (converted < 0) return converted; last_index = supplied_index; return count; } static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait) { unsigned long flags; int ret = 0; poll_wait(fp, &speakup_event, wait); spk_lock(flags); if (!synth_buffer_empty() || speakup_info.flushing) ret = POLLIN | POLLRDNORM; spk_unlock(flags); return ret; } static unsigned char get_index(void) { int rv; rv = last_index; last_index = 0; return rv; } static const struct file_operations softsynth_fops = { .owner = THIS_MODULE, .poll = softsynth_poll, .read = softsynth_read, .write = softsynth_write, .open = softsynth_open, .release = softsynth_close, }; static int softsynth_probe(struct spk_synth *synth) { if (misc_registered != 0) return 0; memset(&synth_device, 0, sizeof(synth_device)); synth_device.minor = SOFTSYNTH_MINOR; synth_device.name = "softsynth"; synth_device.fops = &softsynth_fops; if (misc_register(&synth_device)) { pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n"); return -ENODEV; } misc_registered = 1; pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n"); return 0; } static void softsynth_release(void) { misc_deregister(&synth_device); misc_registered = 0; pr_info("unregistered /dev/softsynth\n"); } static int softsynth_is_alive(struct spk_synth *synth) { if (synth_soft.alive) return 1; return 0; } module_param_named(start, synth_soft.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init soft_init(void) { return synth_add(&synth_soft); } static void __exit soft_exit(void) { synth_remove(&synth_soft); } module_init(soft_init); module_exit(soft_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_DESCRIPTION("Speakup userspace software synthesizer support"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
mythos234/SimplKernel-5.1.1
lib/strnlen_user.c
3092
3717
#include <linux/kernel.h> #include <linux/export.h> #include <linux/uaccess.h> #include <asm/word-at-a-time.h> /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1ul << 8*(n))-1) #else # define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n))) #endif /* * Do a strnlen, return length of string *with* final '\0'. * 'count' is the user-supplied count, while 'max' is the * address space maximum. * * Return 0 for exceptions (which includes hitting the address * space maximum), or 'count+1' if hitting the user-supplied * maximum count. * * NOTE! We can sometimes overshoot the user-supplied maximum * if it fits in a aligned 'long'. The caller needs to check * the return value against "> max". */ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; long align, res = 0; unsigned long c; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; /* * Do everything aligned. But that means that we * need to also expand the maximum.. */ align = (sizeof(long) - 1) & (unsigned long)src; src -= align; max += align; if (unlikely(__get_user(c,(unsigned long __user *)src))) return 0; c |= aligned_byte_mask(align); for (;;) { unsigned long data; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); if (unlikely(max < sizeof(unsigned long))) break; max -= sizeof(unsigned long); if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) return 0; } res -= align; /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, return the marker for "too long". */ if (res >= count) return count+1; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ return 0; } /** * strnlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * @count: Maximum count (including NUL character) * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * If the string is too long, returns 'count+1'. * On exception (or invalid count), returns 0. */ long strnlen_user(const char __user *str, long count) { unsigned long max_addr, src_addr; if (unlikely(count <= 0)) return 0; max_addr = user_addr_max(); src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; return do_strnlen_user(str, count, max); } return 0; } EXPORT_SYMBOL(strnlen_user); /** * strlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. * * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ long strlen_user(const char __user *str) { unsigned long max_addr, src_addr; max_addr = user_addr_max(); src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; return do_strnlen_user(str, ~0ul, max); } return 0; } EXPORT_SYMBOL(strlen_user);
gpl-2.0
bagnz0r/GT-I8160_Kernel
drivers/input/serio/rpckbd.c
3092
3980
/* * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2002 Russell King */ /* * Acorn RiscPC PS/2 keyboard controller driver for Linux/ARM */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/serio.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/hardware/iomd.h> #include <asm/system.h> MODULE_AUTHOR("Vojtech Pavlik, Russell King"); MODULE_DESCRIPTION("Acorn RiscPC PS/2 keyboard controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kart"); static int rpckbd_write(struct serio *port, unsigned char val) { while (!(iomd_readb(IOMD_KCTRL) & (1 << 7))) cpu_relax(); iomd_writeb(val, IOMD_KARTTX); return 0; } static irqreturn_t rpckbd_rx(int irq, void *dev_id) { struct serio *port = dev_id; unsigned int byte; int handled = IRQ_NONE; while (iomd_readb(IOMD_KCTRL) & (1 << 5)) { byte = iomd_readb(IOMD_KARTRX); serio_interrupt(port, byte, 0); handled = IRQ_HANDLED; } return handled; } static irqreturn_t rpckbd_tx(int irq, void *dev_id) { return IRQ_HANDLED; } static int rpckbd_open(struct serio *port) { /* Reset the keyboard state machine. */ iomd_writeb(0, IOMD_KCTRL); iomd_writeb(8, IOMD_KCTRL); iomd_readb(IOMD_KARTRX); if (request_irq(IRQ_KEYBOARDRX, rpckbd_rx, 0, "rpckbd", port) != 0) { printk(KERN_ERR "rpckbd.c: Could not allocate keyboard receive IRQ\n"); return -EBUSY; } if (request_irq(IRQ_KEYBOARDTX, rpckbd_tx, 0, "rpckbd", port) != 0) { printk(KERN_ERR "rpckbd.c: Could not allocate keyboard transmit IRQ\n"); free_irq(IRQ_KEYBOARDRX, port); return -EBUSY; } return 0; } static void rpckbd_close(struct serio *port) { free_irq(IRQ_KEYBOARDRX, port); free_irq(IRQ_KEYBOARDTX, port); } /* * Allocate and initialize serio structure for subsequent registration * with serio core. */ static int __devinit rpckbd_probe(struct platform_device *dev) { struct serio *serio; serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!serio) return -ENOMEM; serio->id.type = SERIO_8042; serio->write = rpckbd_write; serio->open = rpckbd_open; serio->close = rpckbd_close; serio->dev.parent = &dev->dev; strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name)); strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys)); platform_set_drvdata(dev, serio); serio_register_port(serio); return 0; } static int __devexit rpckbd_remove(struct platform_device *dev) { struct serio *serio = platform_get_drvdata(dev); serio_unregister_port(serio); return 0; } static struct platform_driver rpckbd_driver = { .probe = rpckbd_probe, .remove = __devexit_p(rpckbd_remove), .driver = { .name = "kart", .owner = THIS_MODULE, }, }; static int __init rpckbd_init(void) { return platform_driver_register(&rpckbd_driver); } static void __exit rpckbd_exit(void) { platform_driver_unregister(&rpckbd_driver); } module_init(rpckbd_init); module_exit(rpckbd_exit);
gpl-2.0
andi34/kernel_oneplus_msm8974
arch/powerpc/kvm/book3s_hv.c
4372
34728
/* * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Paul Mackerras <paulus@au1.ibm.com> * Alexander Graf <agraf@suse.de> * Kevin Wolf <mail@kevin-wolf.de> * * Description: KVM functions specific to running on Book 3S * processors in hypervisor mode (specifically POWER7 and later). * * This file is derived from arch/powerpc/kvm/book3s.c, * by Alexander Graf <agraf@suse.de>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/page-flags.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/lppaca.h> #include <asm/processor.h> #include <asm/cputhreads.h> #include <asm/page.h> #include <asm/hvcall.h> #include <asm/switch_to.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/hugetlb.h> /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ /* #define EXIT_DEBUG_INT */ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { local_paca->kvm_hstate.kvm_vcpu = vcpu; local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore; } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { } void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) { vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; } void kvmppc_dump_regs(struct kvm_vcpu *vcpu) { int r; pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); pr_err("pc = %.16lx msr = %.16llx trap = %x\n", vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); for (r = 0; r < 16; ++r) pr_err("r%2d = %.16lx r%d = %.16lx\n", r, kvmppc_get_gpr(vcpu, r), r+16, kvmppc_get_gpr(vcpu, r+16)); pr_err("ctr = %.16lx lr = %.16lx\n", vcpu->arch.ctr, vcpu->arch.lr); pr_err("srr0 = %.16llx srr1 = %.16llx\n", vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); for (r = 0; r < vcpu->arch.slb_max; ++r) pr_err(" ESID = %.16llx VSID = %.16llx\n", vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.last_inst); } struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { int r; struct kvm_vcpu *v, *ret = NULL; mutex_lock(&kvm->lock); kvm_for_each_vcpu(r, v, kvm) { if (v->vcpu_id == id) { ret = v; break; } } mutex_unlock(&kvm->lock); return ret; } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) { vpa->shared_proc = 1; vpa->yield_count = 1; } static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long vcpuid, unsigned long vpa) { struct kvm *kvm = vcpu->kvm; unsigned long len, nb; void *va; struct kvm_vcpu *tvcpu; int err = H_PARAMETER; tvcpu = kvmppc_find_vcpu(kvm, vcpuid); if (!tvcpu) return H_PARAMETER; flags >>= 63 - 18; flags &= 7; if (flags == 0 || flags == 4) return H_PARAMETER; if (flags < 4) { if (vpa & 0x7f) return H_PARAMETER; if (flags >= 2 && !tvcpu->arch.vpa) return H_RESOURCE; /* registering new area; convert logical addr to real */ va = kvmppc_pin_guest_page(kvm, vpa, &nb); if (va == NULL) return H_PARAMETER; if (flags <= 1) len = *(unsigned short *)(va + 4); else len = *(unsigned int *)(va + 4); if (len > nb) goto out_unpin; switch (flags) { case 1: /* register VPA */ if (len < 640) goto out_unpin; if (tvcpu->arch.vpa) kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa); tvcpu->arch.vpa = va; init_vpa(vcpu, va); break; case 2: /* register DTL */ if (len < 48) goto out_unpin; len -= len % 48; if (tvcpu->arch.dtl) kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl); tvcpu->arch.dtl = va; tvcpu->arch.dtl_end = va + len; break; case 3: /* register SLB shadow buffer */ if (len < 16) goto out_unpin; if (tvcpu->arch.slb_shadow) kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow); tvcpu->arch.slb_shadow = va; break; } } else { switch (flags) { case 5: /* unregister VPA */ if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) return H_RESOURCE; if (!tvcpu->arch.vpa) break; kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa); tvcpu->arch.vpa = NULL; break; case 6: /* unregister DTL */ if (!tvcpu->arch.dtl) break; kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl); tvcpu->arch.dtl = NULL; break; case 7: /* unregister SLB shadow buffer */ if (!tvcpu->arch.slb_shadow) break; kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow); tvcpu->arch.slb_shadow = NULL; break; } } return H_SUCCESS; out_unpin: kvmppc_unpin_guest_page(kvm, va); return err; } int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; struct kvm_vcpu *tvcpu; switch (req) { case H_ENTER: ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); break; case H_CEDE: break; case H_PROD: target = kvmppc_get_gpr(vcpu, 4); tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; } tvcpu->arch.prodded = 1; smp_mb(); if (vcpu->arch.ceded) { if (waitqueue_active(&vcpu->wq)) { wake_up_interruptible(&vcpu->wq); vcpu->stat.halt_wakeup++; } } break; case H_CONFER: break; case H_REGISTER_VPA: ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; default: return RESUME_HOST; } kvmppc_set_gpr(vcpu, 3, ret); vcpu->arch.hcall_needed = 0; return RESUME_GUEST; } static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { int r = RESUME_HOST; vcpu->stat.sum_exits++; run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_HV_DECREMENTER: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PERFMON: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PROGRAM: { ulong flags; /* * Normally program interrupts are delivered directly * to the guest by the hardware, but we can get here * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ flags = vcpu->arch.shregs.msr & 0x1f0000ull; kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } case BOOK3S_INTERRUPT_SYSCALL: { /* hcall - punt to userspace */ int i; if (vcpu->arch.shregs.msr & MSR_PR) { /* sc 1 from userspace - reflect to guest syscall */ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); r = RESUME_GUEST; break; } run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); for (i = 0; i < 9; ++i) run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; r = RESUME_HOST; break; } /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding * host page has been paged out. Any other HDSI/HISI interrupts * have been handled already. */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: r = kvmppc_book3s_hv_page_fault(run, vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); break; case BOOK3S_INTERRUPT_H_INST_STORAGE: r = kvmppc_book3s_hv_page_fault(run, vcpu, kvmppc_get_pc(vcpu), 0); break; /* * This occurs if the guest executes an illegal instruction. * We just generate a program interrupt to the guest, since * we don't emulate any guest instructions at this stage. */ case BOOK3S_INTERRUPT_H_EMUL_ASSIST: kvmppc_core_queue_program(vcpu, 0x80000); r = RESUME_GUEST; break; default: kvmppc_dump_regs(vcpu); printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), vcpu->arch.shregs.msr); r = RESUME_HOST; BUG(); break; } return r; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int i; sregs->pvr = vcpu->arch.pvr; memset(sregs, 0, sizeof(struct kvm_sregs)); for (i = 0; i < vcpu->arch.slb_max; i++) { sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; } return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int i, j; kvmppc_set_pvr(vcpu, sregs->pvr); j = 0; for (i = 0; i < vcpu->arch.slb_nr; i++) { if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; ++j; } } vcpu->arch.slb_max = j; return 0; } int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = -EINVAL; switch (reg->id) { case KVM_REG_PPC_HIOR: r = put_user(0, (u64 __user *)reg->addr); break; default: break; } return r; } int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = -EINVAL; switch (reg->id) { case KVM_REG_PPC_HIOR: { u64 hior; /* Only allow this to be set to zero */ r = get_user(hior, (u64 __user *)reg->addr); if (!r && (hior != 0)) r = -EINVAL; break; } default: break; } return r; } int kvmppc_core_check_processor_compat(void) { if (cpu_has_feature(CPU_FTR_HVMODE)) return 0; return -EIO; } struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; int err = -EINVAL; int core; struct kvmppc_vcore *vcore; core = id / threads_per_core; if (core >= KVM_MAX_VCORES) goto out; err = -ENOMEM; vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) goto out; err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_vcpu; vcpu->arch.shared = &vcpu->arch.shregs; vcpu->arch.last_cpu = -1; vcpu->arch.mmcr[0] = MMCR0_FC; vcpu->arch.ctrl = CTRL_RUNLATCH; /* default to host PVR, since we can't spoof it */ vcpu->arch.pvr = mfspr(SPRN_PVR); kvmppc_set_pvr(vcpu, vcpu->arch.pvr); kvmppc_mmu_book3s_hv_init(vcpu); /* * We consider the vcpu stopped until we see the first run ioctl for it. */ vcpu->arch.state = KVMPPC_VCPU_STOPPED; init_waitqueue_head(&vcpu->arch.cpu_run); mutex_lock(&kvm->lock); vcore = kvm->arch.vcores[core]; if (!vcore) { vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); if (vcore) { INIT_LIST_HEAD(&vcore->runnable_threads); spin_lock_init(&vcore->lock); init_waitqueue_head(&vcore->wq); } kvm->arch.vcores[core] = vcore; } mutex_unlock(&kvm->lock); if (!vcore) goto free_vcpu; spin_lock(&vcore->lock); ++vcore->num_threads; spin_unlock(&vcore->lock); vcpu->arch.vcore = vcore; vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); return vcpu; free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { if (vcpu->arch.dtl) kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl); if (vcpu->arch.slb_shadow) kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow); if (vcpu->arch.vpa) kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } static void kvmppc_set_timer(struct kvm_vcpu *vcpu) { unsigned long dec_nsec, now; now = get_tb(); if (now > vcpu->arch.dec_expires) { /* decrementer has already gone negative */ kvmppc_core_queue_dec(vcpu); kvmppc_core_prepare_to_enter(vcpu); return; } dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC / tb_ticks_per_sec; hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), HRTIMER_MODE_REL); vcpu->arch.timer_running = 1; } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern void xics_wake_cpu(int cpu); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; --vc->n_runnable; ++vc->n_busy; /* decrement the physical thread id of each following vcpu */ v = vcpu; list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list) --v->arch.ptid; list_del(&vcpu->arch.run_list); } static void kvmppc_start_thread(struct kvm_vcpu *vcpu) { int cpu; struct paca_struct *tpaca; struct kvmppc_vcore *vc = vcpu->arch.vcore; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } cpu = vc->pcpu + vcpu->arch.ptid; tpaca = &paca[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.kvm_vcore = vc; tpaca->kvm_hstate.napping = 0; vcpu->cpu = vc->pcpu; smp_wmb(); #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (vcpu->arch.ptid) { tpaca->cpu_start = 0x80; wmb(); xics_wake_cpu(cpu); ++vc->n_woken; } #endif } static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) { int i; HMT_low(); i = 0; while (vc->nap_count < vc->n_woken) { if (++i >= 1000000) { pr_err("kvmppc_wait_for_nap timeout %d %d\n", vc->nap_count, vc->n_woken); break; } cpu_relax(); } HMT_medium(); } /* * Check that we are on thread 0 and that any other threads in * this core are off-line. */ static int on_primary_thread(void) { int cpu = smp_processor_id(); int thr = cpu_thread_in_core(cpu); if (thr) return 0; while (++thr < threads_per_core) if (cpu_online(cpu + thr)) return 0; return 1; } /* * Run a set of guest threads on a physical core. * Called with vc->lock held. */ static int kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu, *vcpu0, *vnext; long ret; u64 now; int ptid; /* don't start if any threads have a signal pending */ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) if (signal_pending(vcpu->arch.run_task)) return 0; /* * Make sure we are running on thread 0, and that * secondary threads are offline. * XXX we should also block attempts to bring any * secondary threads online. */ if (threads_per_core > 1 && !on_primary_thread()) { list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) vcpu->arch.ret = -EBUSY; goto out; } /* * Assign physical thread IDs, first to non-ceded vcpus * and then to ceded ones. */ ptid = 0; vcpu0 = NULL; list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { if (!vcpu->arch.ceded) { if (!ptid) vcpu0 = vcpu; vcpu->arch.ptid = ptid++; } } if (!vcpu0) return 0; /* nothing to run */ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) if (vcpu->arch.ceded) vcpu->arch.ptid = ptid++; vc->n_woken = 0; vc->nap_count = 0; vc->entry_exit_count = 0; vc->vcore_state = VCORE_RUNNING; vc->in_guest = 0; vc->pcpu = smp_processor_id(); vc->napping_threads = 0; list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) kvmppc_start_thread(vcpu); preempt_disable(); spin_unlock(&vc->lock); kvm_guest_enter(); __kvmppc_vcore_entry(NULL, vcpu0); spin_lock(&vc->lock); /* disable sending of IPIs on virtual external irqs */ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) vcpu->cpu = -1; /* wait for secondary threads to finish writing their state to memory */ if (vc->nap_count < vc->n_woken) kvmppc_wait_for_nap(vc); /* prevent other vcpu threads from doing kvmppc_start_thread() now */ vc->vcore_state = VCORE_EXITING; spin_unlock(&vc->lock); /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); kvm_guest_exit(); preempt_enable(); kvm_resched(vcpu); now = get_tb(); list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); ret = RESUME_GUEST; if (vcpu->arch.trap) ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, vcpu->arch.run_task); vcpu->arch.ret = ret; vcpu->arch.trap = 0; if (vcpu->arch.ceded) { if (ret != RESUME_GUEST) kvmppc_end_cede(vcpu); else kvmppc_set_timer(vcpu); } } spin_lock(&vc->lock); out: vc->vcore_state = VCORE_INACTIVE; list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, arch.run_list) { if (vcpu->arch.ret != RESUME_GUEST) { kvmppc_remove_runnable(vc, vcpu); wake_up(&vcpu->arch.cpu_run); } } return 1; } /* * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. */ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) { DEFINE_WAIT(wait); prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) schedule(); finish_wait(&vcpu->arch.cpu_run, &wait); } /* * All the vcpus in this vcore are idle, so wait for a decrementer * or external interrupt to one of the vcpus. vc->lock is held. */ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) { DEFINE_WAIT(wait); struct kvm_vcpu *v; int all_idle = 1; prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); vc->vcore_state = VCORE_SLEEPING; spin_unlock(&vc->lock); list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { if (!v->arch.ceded || v->arch.pending_exceptions) { all_idle = 0; break; } } if (all_idle) schedule(); finish_wait(&vc->wq, &wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; } static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int n_ceded; int prev_state; struct kvmppc_vcore *vc; struct kvm_vcpu *v, *vn; kvm_run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; /* * Synchronize with other threads in this virtual core */ vc = vcpu->arch.vcore; spin_lock(&vc->lock); vcpu->arch.ceded = 0; vcpu->arch.run_task = current; vcpu->arch.kvm_run = kvm_run; prev_state = vcpu->arch.state; vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); ++vc->n_runnable; /* * This happens the first time this is called for a vcpu. * If the vcore is already running, we may be able to start * this thread straight away and have it join in. */ if (prev_state == KVMPPC_VCPU_STOPPED) { if (vc->vcore_state == VCORE_RUNNING && VCORE_EXIT_COUNT(vc) == 0) { vcpu->arch.ptid = vc->n_runnable - 1; kvmppc_start_thread(vcpu); } } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST) --vc->n_busy; while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) { spin_unlock(&vc->lock); kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); spin_lock(&vc->lock); continue; } n_ceded = 0; list_for_each_entry(v, &vc->runnable_threads, arch.run_list) n_ceded += v->arch.ceded; if (n_ceded == vc->n_runnable) kvmppc_vcore_blocked(vc); else kvmppc_run_core(vc); list_for_each_entry_safe(v, vn, &vc->runnable_threads, arch.run_list) { kvmppc_core_prepare_to_enter(v); if (signal_pending(v->arch.run_task)) { kvmppc_remove_runnable(vc, v); v->stat.signal_exits++; v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; v->arch.ret = -EINTR; wake_up(&v->arch.cpu_run); } } } if (signal_pending(current)) { if (vc->vcore_state == VCORE_RUNNING || vc->vcore_state == VCORE_EXITING) { spin_unlock(&vc->lock); kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); spin_lock(&vc->lock); } if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_remove_runnable(vc, vcpu); vcpu->stat.signal_exits++; kvm_run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; } } spin_unlock(&vc->lock); return vcpu->arch.ret; } int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } /* On the first time here, set up VRMA or RMA */ if (!vcpu->kvm->arch.rma_setup_done) { r = kvmppc_hv_setup_rma(vcpu); if (r) return r; } flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; do { r = kvmppc_run_vcpu(run, vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { r = kvmppc_pseries_do_hcall(vcpu); kvmppc_core_prepare_to_enter(vcpu); } } while (r == RESUME_GUEST); return r; } static long kvmppc_stt_npages(unsigned long window_size) { return ALIGN((window_size >> SPAPR_TCE_SHIFT) * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) { struct kvm *kvm = stt->kvm; int i; mutex_lock(&kvm->lock); list_del(&stt->list); for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) __free_page(stt->pages[i]); kfree(stt); mutex_unlock(&kvm->lock); kvm_put_kvm(kvm); } static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; struct page *page; if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) return VM_FAULT_SIGBUS; page = stt->pages[vmf->pgoff]; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { .fault = kvm_spapr_tce_fault, }; static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &kvm_spapr_tce_vm_ops; return 0; } static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) { struct kvmppc_spapr_tce_table *stt = filp->private_data; release_spapr_tce_table(stt); return 0; } static struct file_operations kvm_spapr_tce_fops = { .mmap = kvm_spapr_tce_mmap, .release = kvm_spapr_tce_release, }; long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args) { struct kvmppc_spapr_tce_table *stt = NULL; long npages; int ret = -ENOMEM; int i; /* Check this LIOBN hasn't been previously allocated */ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { if (stt->liobn == args->liobn) return -EBUSY; } npages = kvmppc_stt_npages(args->window_size); stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *), GFP_KERNEL); if (!stt) goto fail; stt->liobn = args->liobn; stt->window_size = args->window_size; stt->kvm = kvm; for (i = 0; i < npages; i++) { stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!stt->pages[i]) goto fail; } kvm_get_kvm(kvm); mutex_lock(&kvm->lock); list_add(&stt->list, &kvm->arch.spapr_tce_tables); mutex_unlock(&kvm->lock); return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, stt, O_RDWR); fail: if (stt) { for (i = 0; i < npages; i++) if (stt->pages[i]) __free_page(stt->pages[i]); kfree(stt); } return ret; } /* Work out RMLS (real mode limit selector) field value for a given RMA size. Assumes POWER7 or PPC970. */ static inline int lpcr_rmls(unsigned long rma_size) { switch (rma_size) { case 32ul << 20: /* 32 MB */ if (cpu_has_feature(CPU_FTR_ARCH_206)) return 8; /* only supported on POWER7 */ return -1; case 64ul << 20: /* 64 MB */ return 3; case 128ul << 20: /* 128 MB */ return 7; case 256ul << 20: /* 256 MB */ return 4; case 1ul << 30: /* 1 GB */ return 2; case 16ul << 30: /* 16 GB */ return 1; case 256ul << 30: /* 256 GB */ return 0; default: return -1; } } static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct kvmppc_linear_info *ri = vma->vm_file->private_data; struct page *page; if (vmf->pgoff >= ri->npages) return VM_FAULT_SIGBUS; page = pfn_to_page(ri->base_pfn + vmf->pgoff); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct kvm_rma_vm_ops = { .fault = kvm_rma_fault, }; static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_flags |= VM_RESERVED; vma->vm_ops = &kvm_rma_vm_ops; return 0; } static int kvm_rma_release(struct inode *inode, struct file *filp) { struct kvmppc_linear_info *ri = filp->private_data; kvm_release_rma(ri); return 0; } static struct file_operations kvm_rma_fops = { .mmap = kvm_rma_mmap, .release = kvm_rma_release, }; long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) { struct kvmppc_linear_info *ri; long fd; ri = kvm_alloc_rma(); if (!ri) return -ENOMEM; fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR); if (fd < 0) kvm_release_rma(ri); ret->rma_size = ri->npages << PAGE_SHIFT; return fd; } /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; int r; unsigned long n; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); r = kvmppc_hv_get_dirty_log(kvm, memslot); if (r) goto out; r = -EFAULT; if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } static unsigned long slb_pgsize_encoding(unsigned long psize) { unsigned long senc = 0; if (psize > 0x1000) { senc = SLB_VSID_L; if (psize == 0x10000) senc |= SLB_VSID_LP_01; } return senc; } int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { unsigned long npages; unsigned long *phys; /* Allocate a slot_phys array */ phys = kvm->arch.slot_phys[mem->slot]; if (!kvm->arch.using_mmu_notifiers && !phys) { npages = mem->memory_size >> PAGE_SHIFT; phys = vzalloc(npages * sizeof(unsigned long)); if (!phys) return -ENOMEM; kvm->arch.slot_phys[mem->slot] = phys; kvm->arch.slot_npages[mem->slot] = npages; } return 0; } static void unpin_slot(struct kvm *kvm, int slot_id) { unsigned long *physp; unsigned long j, npages, pfn; struct page *page; physp = kvm->arch.slot_phys[slot_id]; npages = kvm->arch.slot_npages[slot_id]; if (physp) { spin_lock(&kvm->arch.slot_phys_lock); for (j = 0; j < npages; j++) { if (!(physp[j] & KVMPPC_GOT_PAGE)) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); SetPageDirty(page); put_page(page); } kvm->arch.slot_phys[slot_id] = NULL; spin_unlock(&kvm->arch.slot_phys_lock); vfree(physp); } } void kvmppc_core_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { } static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) { int err = 0; struct kvm *kvm = vcpu->kvm; struct kvmppc_linear_info *ri = NULL; unsigned long hva; struct kvm_memory_slot *memslot; struct vm_area_struct *vma; unsigned long lpcr, senc; unsigned long psize, porder; unsigned long rma_size; unsigned long rmls; unsigned long *physp; unsigned long i, npages; mutex_lock(&kvm->lock); if (kvm->arch.rma_setup_done) goto out; /* another vcpu beat us to it */ /* Look up the memslot for guest physical address 0 */ memslot = gfn_to_memslot(kvm, 0); /* We must have some memory at 0 by now */ err = -EINVAL; if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) goto out; /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, hva); if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) goto up_out; psize = vma_kernel_pagesize(vma); porder = __ilog2(psize); /* Is this one of our preallocated RMAs? */ if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops && hva == vma->vm_start) ri = vma->vm_file->private_data; up_read(&current->mm->mmap_sem); if (!ri) { /* On POWER7, use VRMA; on PPC970, give up */ err = -EPERM; if (cpu_has_feature(CPU_FTR_ARCH_201)) { pr_err("KVM: CPU requires an RMO\n"); goto out; } /* We can handle 4k, 64k or 16M pages in the VRMA */ err = -EINVAL; if (!(psize == 0x1000 || psize == 0x10000 || psize == 0x1000000)) goto out; /* Update VRMASD field in the LPCR */ senc = slb_pgsize_encoding(psize); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; lpcr |= senc << (LPCR_VRMASD_SH - 4); kvm->arch.lpcr = lpcr; /* Create HPTEs in the hash page table for the VRMA */ kvmppc_map_vrma(vcpu, memslot, porder); } else { /* Set up to use an RMO region */ rma_size = ri->npages; if (rma_size > memslot->npages) rma_size = memslot->npages; rma_size <<= PAGE_SHIFT; rmls = lpcr_rmls(rma_size); err = -EINVAL; if (rmls < 0) { pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); goto out; } atomic_inc(&ri->use_count); kvm->arch.rma = ri; /* Update LPCR and RMOR */ lpcr = kvm->arch.lpcr; if (cpu_has_feature(CPU_FTR_ARCH_201)) { /* PPC970; insert RMLS value (split field) in HID4 */ lpcr &= ~((1ul << HID4_RMLS0_SH) | (3ul << HID4_RMLS2_SH)); lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | ((rmls & 3) << HID4_RMLS2_SH); /* RMOR is also in HID4 */ lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) << HID4_RMOR_SH; } else { /* POWER7 */ lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); lpcr |= rmls << LPCR_RMLS_SH; kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; } kvm->arch.lpcr = lpcr; pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); /* Initialize phys addrs of pages in RMO */ npages = ri->npages; porder = __ilog2(npages); physp = kvm->arch.slot_phys[memslot->id]; spin_lock(&kvm->arch.slot_phys_lock); for (i = 0; i < npages; ++i) physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; spin_unlock(&kvm->arch.slot_phys_lock); } /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ smp_wmb(); kvm->arch.rma_setup_done = 1; err = 0; out: mutex_unlock(&kvm->lock); return err; up_out: up_read(&current->mm->mmap_sem); goto out; } int kvmppc_core_init_vm(struct kvm *kvm) { long r; unsigned long lpcr; /* Allocate hashed page table */ r = kvmppc_alloc_hpt(kvm); if (r) return r; INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); kvm->arch.rma = NULL; kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); if (cpu_has_feature(CPU_FTR_ARCH_201)) { /* PPC970; HID4 is effectively the LPCR */ unsigned long lpid = kvm->arch.lpid; kvm->arch.host_lpid = 0; kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | ((lpid & 0xf) << HID4_LPID5_SH); } else { /* POWER7; init LPCR for virtual RMA mode */ kvm->arch.host_lpid = mfspr(SPRN_LPID); kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); lpcr &= LPCR_PECE | LPCR_LPES; lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); } kvm->arch.lpcr = lpcr; kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); spin_lock_init(&kvm->arch.slot_phys_lock); return 0; } void kvmppc_core_destroy_vm(struct kvm *kvm) { unsigned long i; if (!kvm->arch.using_mmu_notifiers) for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) unpin_slot(kvm, i); if (kvm->arch.rma) { kvm_release_rma(kvm->arch.rma); kvm->arch.rma = NULL; } kvmppc_free_hpt(kvm); WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); } /* These are stubs for now */ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { } /* We don't need to emulate any privileged instructions or dcbz */ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { return EMULATE_FAIL; } int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { return EMULATE_FAIL; } int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { return EMULATE_FAIL; } static int kvmppc_book3s_hv_init(void) { int r; r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (r) return r; r = kvmppc_mmu_hv_init(); return r; } static void kvmppc_book3s_hv_exit(void) { kvm_exit(); } module_init(kvmppc_book3s_hv_init); module_exit(kvmppc_book3s_hv_exit);
gpl-2.0
nychitman1/android_kernel_samsung_manta
arch/microblaze/kernel/ftrace.c
7700
6208
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted, err; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* * Protect against fault, even if it shouldn't * happen. This tool is too much intrusive to * ignore such a protection. */ asm volatile(" 1: lwi %0, %2, 0; \ 2: swi %3, %2, 0; \ addik %1, r0, 0; \ 3: \ .section .fixup, \"ax\"; \ 4: brid 3b; \ addik %1, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b; \ .word 2b,4b; \ .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); flush_dcache_range((u32)parent, (u32)parent + 4); flush_icache_range((u32)parent, (u32)parent + 4); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); if (err == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE /* save value to addr - it is save to do it in asm */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0; \ addik %0, r0, 0; \ 2: \ .section .fixup, \"ax\"; \ 3: brid 2b; \ addik %0, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,3b; \ .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; flush_dcache_range(addr, addr + 4); flush_icache_range(addr, addr + 4); return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* if save was or not */ static unsigned int imm; /* saving whole imm instruction */ /* There are two approaches howto solve ftrace_make nop function - look below */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* saving whole bralid instruction */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* we have this part of code which we are working with * b000c000 imm -16384 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * * The first solution (!USE_FTRACE_NOP-could be called branch solution) * b000c000 bri 12 (0xC - jump to any other instruction) * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * any other instruction * * The second solution (USE_FTRACE_NOP) - no jump just nops * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* USE_FTRACE_NOP */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* USE_FTRACE_NOP */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* USE_FTRACE_NOP */ return ret; } /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* USE_FTRACE_NOP */ return ret; } int __init ftrace_dyn_arch_init(void *data) { /* The return code is retured via data */ *(unsigned long *)data = 0; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* create proper saving to ftrace_call poll */ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* save upper and lower code */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* We just need to replace the rtsd r15, 8 with NOP */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* saving place for jump instruction */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* save jump over instruction */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); pr_debug("%s\n", __func__); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */
gpl-2.0
NikitaProAndroid/android_kernel_lge_msm8x26
fs/nls/nls_iso8859-2.c
12564
13181
/* * linux/fs/nls/nls_iso8859-2.c * * Charset iso8859-2 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x0104, 0x02d8, 0x0141, 0x00a4, 0x013d, 0x015a, 0x00a7, 0x00a8, 0x0160, 0x015e, 0x0164, 0x0179, 0x00ad, 0x017d, 0x017b, /* 0xb0*/ 0x00b0, 0x0105, 0x02db, 0x0142, 0x00b4, 0x013e, 0x015b, 0x02c7, 0x00b8, 0x0161, 0x015f, 0x0165, 0x017a, 0x02dd, 0x017e, 0x017c, /* 0xc0*/ 0x0154, 0x00c1, 0x00c2, 0x0102, 0x00c4, 0x0139, 0x0106, 0x00c7, 0x010c, 0x00c9, 0x0118, 0x00cb, 0x011a, 0x00cd, 0x00ce, 0x010e, /* 0xd0*/ 0x0110, 0x0143, 0x0147, 0x00d3, 0x00d4, 0x0150, 0x00d6, 0x00d7, 0x0158, 0x016e, 0x00da, 0x0170, 0x00dc, 0x00dd, 0x0162, 0x00df, /* 0xe0*/ 0x0155, 0x00e1, 0x00e2, 0x0103, 0x00e4, 0x013a, 0x0107, 0x00e7, 0x010d, 0x00e9, 0x0119, 0x00eb, 0x011b, 0x00ed, 0x00ee, 0x010f, /* 0xf0*/ 0x0111, 0x0144, 0x0148, 0x00f3, 0x00f4, 0x0151, 0x00f6, 0x00f7, 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ 0xb0, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xc5, 0xe5, 0x00, 0x00, 0xa5, 0xb5, 0x00, /* 0x38-0x3f */ 0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */ 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */ 0xd8, 0xf8, 0xa6, 0xb6, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */ 0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */ 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe, 0x00, /* 0x78-0x7f */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-2", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_2(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_2(void) { unregister_nls(&table); } module_init(init_nls_iso8859_2) module_exit(exit_nls_iso8859_2) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
ouya/ouya_1_1-kernel
scripts/unifdef.c
12564
35639
/* * Copyright (c) 2002 - 2011 Tony Finch <dot@dotat.at> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * unifdef - remove ifdef'ed lines * * This code was derived from software contributed to Berkeley by Dave Yost. * It was rewritten to support ANSI C by Tony Finch. The original version * of unifdef carried the 4-clause BSD copyright licence. None of its code * remains in this version (though some of the names remain) so it now * carries a more liberal licence. * * Wishlist: * provide an option which will append the name of the * appropriate symbol after #else's and #endif's * provide an option which will check symbols after * #else's and #endif's to see that they match their * corresponding #ifdef or #ifndef * * These require better buffer handling, which would also make * it possible to handle all "dodgy" directives correctly. */ #include <sys/types.h> #include <sys/stat.h> #include <ctype.h> #include <err.h> #include <errno.h> #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> const char copyright[] = "@(#) $Version: unifdef-2.5 $\n" "@(#) $Author: Tony Finch (dot@dotat.at) $\n" "@(#) $URL: http://dotat.at/prog/unifdef $\n" ; /* types of input lines: */ typedef enum { LT_TRUEI, /* a true #if with ignore flag */ LT_FALSEI, /* a false #if with ignore flag */ LT_IF, /* an unknown #if */ LT_TRUE, /* a true #if */ LT_FALSE, /* a false #if */ LT_ELIF, /* an unknown #elif */ LT_ELTRUE, /* a true #elif */ LT_ELFALSE, /* a false #elif */ LT_ELSE, /* #else */ LT_ENDIF, /* #endif */ LT_DODGY, /* flag: directive is not on one line */ LT_DODGY_LAST = LT_DODGY + LT_ENDIF, LT_PLAIN, /* ordinary line */ LT_EOF, /* end of file */ LT_ERROR, /* unevaluable #if */ LT_COUNT } Linetype; static char const * const linetype_name[] = { "TRUEI", "FALSEI", "IF", "TRUE", "FALSE", "ELIF", "ELTRUE", "ELFALSE", "ELSE", "ENDIF", "DODGY TRUEI", "DODGY FALSEI", "DODGY IF", "DODGY TRUE", "DODGY FALSE", "DODGY ELIF", "DODGY ELTRUE", "DODGY ELFALSE", "DODGY ELSE", "DODGY ENDIF", "PLAIN", "EOF", "ERROR" }; /* state of #if processing */ typedef enum { IS_OUTSIDE, IS_FALSE_PREFIX, /* false #if followed by false #elifs */ IS_TRUE_PREFIX, /* first non-false #(el)if is true */ IS_PASS_MIDDLE, /* first non-false #(el)if is unknown */ IS_FALSE_MIDDLE, /* a false #elif after a pass state */ IS_TRUE_MIDDLE, /* a true #elif after a pass state */ IS_PASS_ELSE, /* an else after a pass state */ IS_FALSE_ELSE, /* an else after a true state */ IS_TRUE_ELSE, /* an else after only false states */ IS_FALSE_TRAILER, /* #elifs after a true are false */ IS_COUNT } Ifstate; static char const * const ifstate_name[] = { "OUTSIDE", "FALSE_PREFIX", "TRUE_PREFIX", "PASS_MIDDLE", "FALSE_MIDDLE", "TRUE_MIDDLE", "PASS_ELSE", "FALSE_ELSE", "TRUE_ELSE", "FALSE_TRAILER" }; /* state of comment parser */ typedef enum { NO_COMMENT = false, /* outside a comment */ C_COMMENT, /* in a comment like this one */ CXX_COMMENT, /* between // and end of line */ STARTING_COMMENT, /* just after slash-backslash-newline */ FINISHING_COMMENT, /* star-backslash-newline in a C comment */ CHAR_LITERAL, /* inside '' */ STRING_LITERAL /* inside "" */ } Comment_state; static char const * const comment_name[] = { "NO", "C", "CXX", "STARTING", "FINISHING", "CHAR", "STRING" }; /* state of preprocessor line parser */ typedef enum { LS_START, /* only space and comments on this line */ LS_HASH, /* only space, comments, and a hash */ LS_DIRTY /* this line can't be a preprocessor line */ } Line_state; static char const * const linestate_name[] = { "START", "HASH", "DIRTY" }; /* * Minimum translation limits from ISO/IEC 9899:1999 5.2.4.1 */ #define MAXDEPTH 64 /* maximum #if nesting */ #define MAXLINE 4096 /* maximum length of line */ #define MAXSYMS 4096 /* maximum number of symbols */ /* * Sometimes when editing a keyword the replacement text is longer, so * we leave some space at the end of the tline buffer to accommodate this. */ #define EDITSLOP 10 /* * For temporary filenames */ #define TEMPLATE "unifdef.XXXXXX" /* * Globals. */ static bool compblank; /* -B: compress blank lines */ static bool lnblank; /* -b: blank deleted lines */ static bool complement; /* -c: do the complement */ static bool debugging; /* -d: debugging reports */ static bool iocccok; /* -e: fewer IOCCC errors */ static bool strictlogic; /* -K: keep ambiguous #ifs */ static bool killconsts; /* -k: eval constant #ifs */ static bool lnnum; /* -n: add #line directives */ static bool symlist; /* -s: output symbol list */ static bool symdepth; /* -S: output symbol depth */ static bool text; /* -t: this is a text file */ static const char *symname[MAXSYMS]; /* symbol name */ static const char *value[MAXSYMS]; /* -Dsym=value */ static bool ignore[MAXSYMS]; /* -iDsym or -iUsym */ static int nsyms; /* number of symbols */ static FILE *input; /* input file pointer */ static const char *filename; /* input file name */ static int linenum; /* current line number */ static FILE *output; /* output file pointer */ static const char *ofilename; /* output file name */ static bool overwriting; /* output overwrites input */ static char tempname[FILENAME_MAX]; /* used when overwriting */ static char tline[MAXLINE+EDITSLOP];/* input buffer plus space */ static char *keyword; /* used for editing #elif's */ static const char *newline; /* input file format */ static const char newline_unix[] = "\n"; static const char newline_crlf[] = "\r\n"; static Comment_state incomment; /* comment parser state */ static Line_state linestate; /* #if line parser state */ static Ifstate ifstate[MAXDEPTH]; /* #if processor state */ static bool ignoring[MAXDEPTH]; /* ignore comments state */ static int stifline[MAXDEPTH]; /* start of current #if */ static int depth; /* current #if nesting */ static int delcount; /* count of deleted lines */ static unsigned blankcount; /* count of blank lines */ static unsigned blankmax; /* maximum recent blankcount */ static bool constexpr; /* constant #if expression */ static bool zerosyms = true; /* to format symdepth output */ static bool firstsym; /* ditto */ static int exitstat; /* program exit status */ static void addsym(bool, bool, char *); static void closeout(void); static void debug(const char *, ...); static void done(void); static void error(const char *); static int findsym(const char *); static void flushline(bool); static Linetype parseline(void); static Linetype ifeval(const char **); static void ignoreoff(void); static void ignoreon(void); static void keywordedit(const char *); static void nest(void); static void process(void); static const char *skipargs(const char *); static const char *skipcomment(const char *); static const char *skipsym(const char *); static void state(Ifstate); static int strlcmp(const char *, const char *, size_t); static void unnest(void); static void usage(void); static void version(void); #define endsym(c) (!isalnum((unsigned char)c) && c != '_') /* * The main program. */ int main(int argc, char *argv[]) { int opt; while ((opt = getopt(argc, argv, "i:D:U:I:o:bBcdeKklnsStV")) != -1) switch (opt) { case 'i': /* treat stuff controlled by these symbols as text */ /* * For strict backwards-compatibility the U or D * should be immediately after the -i but it doesn't * matter much if we relax that requirement. */ opt = *optarg++; if (opt == 'D') addsym(true, true, optarg); else if (opt == 'U') addsym(true, false, optarg); else usage(); break; case 'D': /* define a symbol */ addsym(false, true, optarg); break; case 'U': /* undef a symbol */ addsym(false, false, optarg); break; case 'I': /* no-op for compatibility with cpp */ break; case 'b': /* blank deleted lines instead of omitting them */ case 'l': /* backwards compatibility */ lnblank = true; break; case 'B': /* compress blank lines around removed section */ compblank = true; break; case 'c': /* treat -D as -U and vice versa */ complement = true; break; case 'd': debugging = true; break; case 'e': /* fewer errors from dodgy lines */ iocccok = true; break; case 'K': /* keep ambiguous #ifs */ strictlogic = true; break; case 'k': /* process constant #ifs */ killconsts = true; break; case 'n': /* add #line directive after deleted lines */ lnnum = true; break; case 'o': /* output to a file */ ofilename = optarg; break; case 's': /* only output list of symbols that control #ifs */ symlist = true; break; case 'S': /* list symbols with their nesting depth */ symlist = symdepth = true; break; case 't': /* don't parse C comments */ text = true; break; case 'V': /* print version */ version(); default: usage(); } argc -= optind; argv += optind; if (compblank && lnblank) errx(2, "-B and -b are mutually exclusive"); if (argc > 1) { errx(2, "can only do one file"); } else if (argc == 1 && strcmp(*argv, "-") != 0) { filename = *argv; input = fopen(filename, "rb"); if (input == NULL) err(2, "can't open %s", filename); } else { filename = "[stdin]"; input = stdin; } if (ofilename == NULL) { ofilename = "[stdout]"; output = stdout; } else { struct stat ist, ost; if (stat(ofilename, &ost) == 0 && fstat(fileno(input), &ist) == 0) overwriting = (ist.st_dev == ost.st_dev && ist.st_ino == ost.st_ino); if (overwriting) { const char *dirsep; int ofd; dirsep = strrchr(ofilename, '/'); if (dirsep != NULL) snprintf(tempname, sizeof(tempname), "%.*s/" TEMPLATE, (int)(dirsep - ofilename), ofilename); else snprintf(tempname, sizeof(tempname), TEMPLATE); ofd = mkstemp(tempname); if (ofd != -1) output = fdopen(ofd, "wb+"); if (output == NULL) err(2, "can't create temporary file"); fchmod(ofd, ist.st_mode & (S_IRWXU|S_IRWXG|S_IRWXO)); } else { output = fopen(ofilename, "wb"); if (output == NULL) err(2, "can't open %s", ofilename); } } process(); abort(); /* bug */ } static void version(void) { const char *c = copyright; for (;;) { while (*++c != '$') if (*c == '\0') exit(0); while (*++c != '$') putc(*c, stderr); putc('\n', stderr); } } static void usage(void) { fprintf(stderr, "usage: unifdef [-bBcdeKknsStV] [-Ipath]" " [-Dsym[=val]] [-Usym] [-iDsym[=val]] [-iUsym] ... [file]\n"); exit(2); } /* * A state transition function alters the global #if processing state * in a particular way. The table below is indexed by the current * processing state and the type of the current line. * * Nesting is handled by keeping a stack of states; some transition * functions increase or decrease the depth. They also maintain the * ignore state on a stack. In some complicated cases they have to * alter the preprocessor directive, as follows. * * When we have processed a group that starts off with a known-false * #if/#elif sequence (which has therefore been deleted) followed by a * #elif that we don't understand and therefore must keep, we edit the * latter into a #if to keep the nesting correct. We use strncpy() to * overwrite the 4 byte token "elif" with "if " without a '\0' byte. * * When we find a true #elif in a group, the following block will * always be kept and the rest of the sequence after the next #elif or * #else will be discarded. We edit the #elif into a #else and the * following directive to #endif since this has the desired behaviour. * * "Dodgy" directives are split across multiple lines, the most common * example being a multi-line comment hanging off the right of the * directive. We can handle them correctly only if there is no change * from printing to dropping (or vice versa) caused by that directive. * If the directive is the first of a group we have a choice between * failing with an error, or passing it through unchanged instead of * evaluating it. The latter is not the default to avoid questions from * users about unifdef unexpectedly leaving behind preprocessor directives. */ typedef void state_fn(void); /* report an error */ static void Eelif (void) { error("Inappropriate #elif"); } static void Eelse (void) { error("Inappropriate #else"); } static void Eendif(void) { error("Inappropriate #endif"); } static void Eeof (void) { error("Premature EOF"); } static void Eioccc(void) { error("Obfuscated preprocessor control line"); } /* plain line handling */ static void print (void) { flushline(true); } static void drop (void) { flushline(false); } /* output lacks group's start line */ static void Strue (void) { drop(); ignoreoff(); state(IS_TRUE_PREFIX); } static void Sfalse(void) { drop(); ignoreoff(); state(IS_FALSE_PREFIX); } static void Selse (void) { drop(); state(IS_TRUE_ELSE); } /* print/pass this block */ static void Pelif (void) { print(); ignoreoff(); state(IS_PASS_MIDDLE); } static void Pelse (void) { print(); state(IS_PASS_ELSE); } static void Pendif(void) { print(); unnest(); } /* discard this block */ static void Dfalse(void) { drop(); ignoreoff(); state(IS_FALSE_TRAILER); } static void Delif (void) { drop(); ignoreoff(); state(IS_FALSE_MIDDLE); } static void Delse (void) { drop(); state(IS_FALSE_ELSE); } static void Dendif(void) { drop(); unnest(); } /* first line of group */ static void Fdrop (void) { nest(); Dfalse(); } static void Fpass (void) { nest(); Pelif(); } static void Ftrue (void) { nest(); Strue(); } static void Ffalse(void) { nest(); Sfalse(); } /* variable pedantry for obfuscated lines */ static void Oiffy (void) { if (!iocccok) Eioccc(); Fpass(); ignoreon(); } static void Oif (void) { if (!iocccok) Eioccc(); Fpass(); } static void Oelif (void) { if (!iocccok) Eioccc(); Pelif(); } /* ignore comments in this block */ static void Idrop (void) { Fdrop(); ignoreon(); } static void Itrue (void) { Ftrue(); ignoreon(); } static void Ifalse(void) { Ffalse(); ignoreon(); } /* modify this line */ static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); } static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); } static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); } static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); } static state_fn * const trans_table[IS_COUNT][LT_COUNT] = { /* IS_OUTSIDE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Eendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eendif, print, done, abort }, /* IS_FALSE_PREFIX */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Strue, Sfalse,Selse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_PREFIX */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Dfalse,Dfalse,Dfalse,Delse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, print, Eeof, abort }, /* IS_PASS_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Pelif, Mtrue, Delif, Pelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Pelif, Oelif, Oelif, Pelse, Pendif, print, Eeof, abort }, /* IS_FALSE_MIDDLE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Pelif, Mtrue, Delif, Pelse, Pendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Melif, Melif, Melif, Melse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Pendif, print, Eeof, abort }, /* IS_PASS_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Pendif, print, Eeof, abort }, /* IS_FALSE_ELSE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Eioccc, drop, Eeof, abort }, /* IS_TRUE_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eioccc, print, Eeof, abort }, /* IS_FALSE_TRAILER */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Eioccc, drop, Eeof, abort } /*TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF (DODGY) PLAIN EOF ERROR */ }; /* * State machine utility functions */ static void ignoreoff(void) { if (depth == 0) abort(); /* bug */ ignoring[depth] = ignoring[depth-1]; } static void ignoreon(void) { ignoring[depth] = true; } static void keywordedit(const char *replacement) { snprintf(keyword, tline + sizeof(tline) - keyword, "%s%s", replacement, newline); print(); } static void nest(void) { if (depth > MAXDEPTH-1) abort(); /* bug */ if (depth == MAXDEPTH-1) error("Too many levels of nesting"); depth += 1; stifline[depth] = linenum; } static void unnest(void) { if (depth == 0) abort(); /* bug */ depth -= 1; } static void state(Ifstate is) { ifstate[depth] = is; } /* * Write a line to the output or not, according to command line options. */ static void flushline(bool keep) { if (symlist) return; if (keep ^ complement) { bool blankline = tline[strspn(tline, " \t\r\n")] == '\0'; if (blankline && compblank && blankcount != blankmax) { delcount += 1; blankcount += 1; } else { if (lnnum && delcount > 0) printf("#line %d%s", linenum, newline); fputs(tline, output); delcount = 0; blankmax = blankcount = blankline ? blankcount + 1 : 0; } } else { if (lnblank) fputs(newline, output); exitstat = 1; delcount += 1; blankcount = 0; } if (debugging) fflush(output); } /* * The driver for the state machine. */ static void process(void) { /* When compressing blank lines, act as if the file is preceded by a large number of blank lines. */ blankmax = blankcount = 1000; for (;;) { Linetype lineval = parseline(); trans_table[ifstate[depth]][lineval](); debug("process line %d %s -> %s depth %d", linenum, linetype_name[lineval], ifstate_name[ifstate[depth]], depth); } } /* * Flush the output and handle errors. */ static void closeout(void) { if (symdepth && !zerosyms) printf("\n"); if (fclose(output) == EOF) { warn("couldn't write to %s", ofilename); if (overwriting) { unlink(tempname); errx(2, "%s unchanged", filename); } else { exit(2); } } } /* * Clean up and exit. */ static void done(void) { if (incomment) error("EOF in comment"); closeout(); if (overwriting && rename(tempname, ofilename) == -1) { warn("couldn't rename temporary file"); unlink(tempname); errx(2, "%s unchanged", ofilename); } exit(exitstat); } /* * Parse a line and determine its type. We keep the preprocessor line * parser state between calls in the global variable linestate, with * help from skipcomment(). */ static Linetype parseline(void) { const char *cp; int cursym; int kwlen; Linetype retval; Comment_state wascomment; linenum++; if (fgets(tline, MAXLINE, input) == NULL) return (LT_EOF); if (newline == NULL) { if (strrchr(tline, '\n') == strrchr(tline, '\r') + 1) newline = newline_crlf; else newline = newline_unix; } retval = LT_PLAIN; wascomment = incomment; cp = skipcomment(tline); if (linestate == LS_START) { if (*cp == '#') { linestate = LS_HASH; firstsym = true; cp = skipcomment(cp + 1); } else if (*cp != '\0') linestate = LS_DIRTY; } if (!incomment && linestate == LS_HASH) { keyword = tline + (cp - tline); cp = skipsym(cp); kwlen = cp - keyword; /* no way can we deal with a continuation inside a keyword */ if (strncmp(cp, "\\\r\n", 3) == 0 || strncmp(cp, "\\\n", 2) == 0) Eioccc(); if (strlcmp("ifdef", keyword, kwlen) == 0 || strlcmp("ifndef", keyword, kwlen) == 0) { cp = skipcomment(cp); if ((cursym = findsym(cp)) < 0) retval = LT_IF; else { retval = (keyword[2] == 'n') ? LT_FALSE : LT_TRUE; if (value[cursym] == NULL) retval = (retval == LT_TRUE) ? LT_FALSE : LT_TRUE; if (ignore[cursym]) retval = (retval == LT_TRUE) ? LT_TRUEI : LT_FALSEI; } cp = skipsym(cp); } else if (strlcmp("if", keyword, kwlen) == 0) retval = ifeval(&cp); else if (strlcmp("elif", keyword, kwlen) == 0) retval = ifeval(&cp) - LT_IF + LT_ELIF; else if (strlcmp("else", keyword, kwlen) == 0) retval = LT_ELSE; else if (strlcmp("endif", keyword, kwlen) == 0) retval = LT_ENDIF; else { linestate = LS_DIRTY; retval = LT_PLAIN; } cp = skipcomment(cp); if (*cp != '\0') { linestate = LS_DIRTY; if (retval == LT_TRUE || retval == LT_FALSE || retval == LT_TRUEI || retval == LT_FALSEI) retval = LT_IF; if (retval == LT_ELTRUE || retval == LT_ELFALSE) retval = LT_ELIF; } if (retval != LT_PLAIN && (wascomment || incomment)) { retval += LT_DODGY; if (incomment) linestate = LS_DIRTY; } /* skipcomment normally changes the state, except if the last line of the file lacks a newline, or if there is too much whitespace in a directive */ if (linestate == LS_HASH) { size_t len = cp - tline; if (fgets(tline + len, MAXLINE - len, input) == NULL) { /* append the missing newline */ strcpy(tline + len, newline); cp += strlen(newline); linestate = LS_START; } else { linestate = LS_DIRTY; } } } if (linestate == LS_DIRTY) { while (*cp != '\0') cp = skipcomment(cp + 1); } debug("parser line %d state %s comment %s line", linenum, comment_name[incomment], linestate_name[linestate]); return (retval); } /* * These are the binary operators that are supported by the expression * evaluator. */ static Linetype op_strict(int *p, int v, Linetype at, Linetype bt) { if(at == LT_IF || bt == LT_IF) return (LT_IF); return (*p = v, v ? LT_TRUE : LT_FALSE); } static Linetype op_lt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a < b, at, bt); } static Linetype op_gt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a > b, at, bt); } static Linetype op_le(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a <= b, at, bt); } static Linetype op_ge(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a >= b, at, bt); } static Linetype op_eq(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a == b, at, bt); } static Linetype op_ne(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a != b, at, bt); } static Linetype op_or(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_TRUE || bt == LT_TRUE)) return (*p = 1, LT_TRUE); return op_strict(p, a || b, at, bt); } static Linetype op_and(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_FALSE || bt == LT_FALSE)) return (*p = 0, LT_FALSE); return op_strict(p, a && b, at, bt); } /* * An evaluation function takes three arguments, as follows: (1) a pointer to * an element of the precedence table which lists the operators at the current * level of precedence; (2) a pointer to an integer which will receive the * value of the expression; and (3) a pointer to a char* that points to the * expression to be evaluated and that is updated to the end of the expression * when evaluation is complete. The function returns LT_FALSE if the value of * the expression is zero, LT_TRUE if it is non-zero, LT_IF if the expression * depends on an unknown symbol, or LT_ERROR if there is a parse failure. */ struct ops; typedef Linetype eval_fn(const struct ops *, int *, const char **); static eval_fn eval_table, eval_unary; /* * The precedence table. Expressions involving binary operators are evaluated * in a table-driven way by eval_table. When it evaluates a subexpression it * calls the inner function with its first argument pointing to the next * element of the table. Innermost expressions have special non-table-driven * handling. */ static const struct ops { eval_fn *inner; struct op { const char *str; Linetype (*fn)(int *, Linetype, int, Linetype, int); } op[5]; } eval_ops[] = { { eval_table, { { "||", op_or } } }, { eval_table, { { "&&", op_and } } }, { eval_table, { { "==", op_eq }, { "!=", op_ne } } }, { eval_unary, { { "<=", op_le }, { ">=", op_ge }, { "<", op_lt }, { ">", op_gt } } } }; /* * Function for evaluating the innermost parts of expressions, * viz. !expr (expr) number defined(symbol) symbol * We reset the constexpr flag in the last two cases. */ static Linetype eval_unary(const struct ops *ops, int *valp, const char **cpp) { const char *cp; char *ep; int sym; bool defparen; Linetype lt; cp = skipcomment(*cpp); if (*cp == '!') { debug("eval%d !", ops - eval_ops); cp++; lt = eval_unary(ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); if (lt != LT_IF) { *valp = !*valp; lt = *valp ? LT_TRUE : LT_FALSE; } } else if (*cp == '(') { cp++; debug("eval%d (", ops - eval_ops); lt = eval_table(eval_ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); cp = skipcomment(cp); if (*cp++ != ')') return (LT_ERROR); } else if (isdigit((unsigned char)*cp)) { debug("eval%d number", ops - eval_ops); *valp = strtol(cp, &ep, 0); if (ep == cp) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipsym(cp); } else if (strncmp(cp, "defined", 7) == 0 && endsym(cp[7])) { cp = skipcomment(cp+7); debug("eval%d defined", ops - eval_ops); if (*cp == '(') { cp = skipcomment(cp+1); defparen = true; } else { defparen = false; } sym = findsym(cp); if (sym < 0) { lt = LT_IF; } else { *valp = (value[sym] != NULL); lt = *valp ? LT_TRUE : LT_FALSE; } cp = skipsym(cp); cp = skipcomment(cp); if (defparen && *cp++ != ')') return (LT_ERROR); constexpr = false; } else if (!endsym(*cp)) { debug("eval%d symbol", ops - eval_ops); sym = findsym(cp); cp = skipsym(cp); if (sym < 0) { lt = LT_IF; cp = skipargs(cp); } else if (value[sym] == NULL) { *valp = 0; lt = LT_FALSE; } else { *valp = strtol(value[sym], &ep, 0); if (*ep != '\0' || ep == value[sym]) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipargs(cp); } constexpr = false; } else { debug("eval%d bad expr", ops - eval_ops); return (LT_ERROR); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); return (lt); } /* * Table-driven evaluation of binary operators. */ static Linetype eval_table(const struct ops *ops, int *valp, const char **cpp) { const struct op *op; const char *cp; int val; Linetype lt, rt; debug("eval%d", ops - eval_ops); cp = *cpp; lt = ops->inner(ops+1, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); for (;;) { cp = skipcomment(cp); for (op = ops->op; op->str != NULL; op++) if (strncmp(cp, op->str, strlen(op->str)) == 0) break; if (op->str == NULL) break; cp += strlen(op->str); debug("eval%d %s", ops - eval_ops, op->str); rt = ops->inner(ops+1, &val, &cp); if (rt == LT_ERROR) return (LT_ERROR); lt = op->fn(valp, lt, *valp, rt, val); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); debug("eval%d lt = %s", ops - eval_ops, linetype_name[lt]); return (lt); } /* * Evaluate the expression on a #if or #elif line. If we can work out * the result we return LT_TRUE or LT_FALSE accordingly, otherwise we * return just a generic LT_IF. */ static Linetype ifeval(const char **cpp) { int ret; int val = 0; debug("eval %s", *cpp); constexpr = killconsts ? false : true; ret = eval_table(eval_ops, &val, cpp); debug("eval = %d", val); return (constexpr ? LT_IF : ret == LT_ERROR ? LT_IF : ret); } /* * Skip over comments, strings, and character literals and stop at the * next character position that is not whitespace. Between calls we keep * the comment state in the global variable incomment, and we also adjust * the global variable linestate when we see a newline. * XXX: doesn't cope with the buffer splitting inside a state transition. */ static const char * skipcomment(const char *cp) { if (text || ignoring[depth]) { for (; isspace((unsigned char)*cp); cp++) if (*cp == '\n') linestate = LS_START; return (cp); } while (*cp != '\0') /* don't reset to LS_START after a line continuation */ if (strncmp(cp, "\\\r\n", 3) == 0) cp += 3; else if (strncmp(cp, "\\\n", 2) == 0) cp += 2; else switch (incomment) { case NO_COMMENT: if (strncmp(cp, "/\\\r\n", 4) == 0) { incomment = STARTING_COMMENT; cp += 4; } else if (strncmp(cp, "/\\\n", 3) == 0) { incomment = STARTING_COMMENT; cp += 3; } else if (strncmp(cp, "/*", 2) == 0) { incomment = C_COMMENT; cp += 2; } else if (strncmp(cp, "//", 2) == 0) { incomment = CXX_COMMENT; cp += 2; } else if (strncmp(cp, "\'", 1) == 0) { incomment = CHAR_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\"", 1) == 0) { incomment = STRING_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\n", 1) == 0) { linestate = LS_START; cp += 1; } else if (strchr(" \r\t", *cp) != NULL) { cp += 1; } else return (cp); continue; case CXX_COMMENT: if (strncmp(cp, "\n", 1) == 0) { incomment = NO_COMMENT; linestate = LS_START; } cp += 1; continue; case CHAR_LITERAL: case STRING_LITERAL: if ((incomment == CHAR_LITERAL && cp[0] == '\'') || (incomment == STRING_LITERAL && cp[0] == '\"')) { incomment = NO_COMMENT; cp += 1; } else if (cp[0] == '\\') { if (cp[1] == '\0') cp += 1; else cp += 2; } else if (strncmp(cp, "\n", 1) == 0) { if (incomment == CHAR_LITERAL) error("unterminated char literal"); else error("unterminated string literal"); } else cp += 1; continue; case C_COMMENT: if (strncmp(cp, "*\\\r\n", 4) == 0) { incomment = FINISHING_COMMENT; cp += 4; } else if (strncmp(cp, "*\\\n", 3) == 0) { incomment = FINISHING_COMMENT; cp += 3; } else if (strncmp(cp, "*/", 2) == 0) { incomment = NO_COMMENT; cp += 2; } else cp += 1; continue; case STARTING_COMMENT: if (*cp == '*') { incomment = C_COMMENT; cp += 1; } else if (*cp == '/') { incomment = CXX_COMMENT; cp += 1; } else { incomment = NO_COMMENT; linestate = LS_DIRTY; } continue; case FINISHING_COMMENT: if (*cp == '/') { incomment = NO_COMMENT; cp += 1; } else incomment = C_COMMENT; continue; default: abort(); /* bug */ } return (cp); } /* * Skip macro arguments. */ static const char * skipargs(const char *cp) { const char *ocp = cp; int level = 0; cp = skipcomment(cp); if (*cp != '(') return (cp); do { if (*cp == '(') level++; if (*cp == ')') level--; cp = skipcomment(cp+1); } while (level != 0 && *cp != '\0'); if (level == 0) return (cp); else /* Rewind and re-detect the syntax error later. */ return (ocp); } /* * Skip over an identifier. */ static const char * skipsym(const char *cp) { while (!endsym(*cp)) ++cp; return (cp); } /* * Look for the symbol in the symbol table. If it is found, we return * the symbol table index, else we return -1. */ static int findsym(const char *str) { const char *cp; int symind; cp = skipsym(str); if (cp == str) return (-1); if (symlist) { if (symdepth && firstsym) printf("%s%3d", zerosyms ? "" : "\n", depth); firstsym = zerosyms = false; printf("%s%.*s%s", symdepth ? " " : "", (int)(cp-str), str, symdepth ? "" : "\n"); /* we don't care about the value of the symbol */ return (0); } for (symind = 0; symind < nsyms; ++symind) { if (strlcmp(symname[symind], str, cp-str) == 0) { debug("findsym %s %s", symname[symind], value[symind] ? value[symind] : ""); return (symind); } } return (-1); } /* * Add a symbol to the symbol table. */ static void addsym(bool ignorethis, bool definethis, char *sym) { int symind; char *val; symind = findsym(sym); if (symind < 0) { if (nsyms >= MAXSYMS) errx(2, "too many symbols"); symind = nsyms++; } symname[symind] = sym; ignore[symind] = ignorethis; val = sym + (skipsym(sym) - sym); if (definethis) { if (*val == '=') { value[symind] = val+1; *val = '\0'; } else if (*val == '\0') value[symind] = "1"; else usage(); } else { if (*val != '\0') usage(); value[symind] = NULL; } debug("addsym %s=%s", symname[symind], value[symind] ? value[symind] : "undef"); } /* * Compare s with n characters of t. * The same as strncmp() except that it checks that s[n] == '\0'. */ static int strlcmp(const char *s, const char *t, size_t n) { while (n-- && *t != '\0') if (*s != *t) return ((unsigned char)*s - (unsigned char)*t); else ++s, ++t; return ((unsigned char)*s); } /* * Diagnostics. */ static void debug(const char *msg, ...) { va_list ap; if (debugging) { va_start(ap, msg); vwarnx(msg, ap); va_end(ap); } } static void error(const char *msg) { if (depth == 0) warnx("%s: %d: %s", filename, linenum, msg); else warnx("%s: %d: %s (#if line %d depth %d)", filename, linenum, msg, stifline[depth], depth); closeout(); errx(2, "output may be truncated"); }
gpl-2.0
tzanussi/linux-yocto-micro-3.17
kernel/time/timeconv.c
13844
3580
/* * Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc. * This file is part of the GNU C Library. * Contributed by Paul Eggert (eggert@twinsun.com). * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with the GNU C Library; see the file COPYING.LIB. If not, * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /* * Converts the calendar time to broken-down time representation * Based on code from glibc-2.6 * * 2009-7-14: * Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com> */ #include <linux/time.h> #include <linux/module.h> /* * Nonzero if YEAR is a leap year (every 4 years, * except every 100th isn't, and every 400th is). */ static int __isleap(long year) { return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0); } /* do a mathdiv for long type */ static long math_div(long a, long b) { return a / b - (a % b < 0); } /* How many leap years between y1 and y2, y1 must less or equal to y2 */ static long leaps_between(long y1, long y2) { long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100) + math_div(y1 - 1, 400); long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100) + math_div(y2 - 1, 400); return leaps2 - leaps1; } /* How many days come before each month (0-12). */ static const unsigned short __mon_yday[2][13] = { /* Normal years. */ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, /* Leap years. */ {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} }; #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) /** * time_to_tm - converts the calendar time to local broken-down time * * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, * Coordinated Universal Time (UTC). * @offset offset seconds adding to totalsecs. * @result pointer to struct tm variable to receive broken-down time */ void time_to_tm(time_t totalsecs, int offset, struct tm *result) { long days, rem, y; const unsigned short *ip; days = totalsecs / SECS_PER_DAY; rem = totalsecs % SECS_PER_DAY; rem += offset; while (rem < 0) { rem += SECS_PER_DAY; --days; } while (rem >= SECS_PER_DAY) { rem -= SECS_PER_DAY; ++days; } result->tm_hour = rem / SECS_PER_HOUR; rem %= SECS_PER_HOUR; result->tm_min = rem / 60; result->tm_sec = rem % 60; /* January 1, 1970 was a Thursday. */ result->tm_wday = (4 + days) % 7; if (result->tm_wday < 0) result->tm_wday += 7; y = 1970; while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { /* Guess a corrected year, assuming 365 days per year. */ long yg = y + math_div(days, 365); /* Adjust DAYS and Y to match the guessed year. */ days -= (yg - y) * 365 + leaps_between(y, yg); y = yg; } result->tm_year = y - 1900; result->tm_yday = days; ip = __mon_yday[__isleap(y)]; for (y = 11; days < ip[y]; y--) continue; days -= ip[y]; result->tm_mon = y; result->tm_mday = days + 1; } EXPORT_SYMBOL(time_to_tm);
gpl-2.0
elelinux/pyramid_kernel_aosp
drivers/usb/core/message.c
21
60464
/* * message.c - synchronous message handling */ #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <asm/byteorder.h> #include "usb.h" static void cancel_async_set_config(struct usb_device *udev); struct api_context { struct completion done; int status; }; static void usb_api_blocking_completion(struct urb *urb) { struct api_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } /* * Starts urb and waits for completion or timeout. Note that this call * is NOT interruptible. Many device driver i/o requests should be * interruptible and therefore these drivers should implement their * own interruptible routines. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) { struct api_context ctx; unsigned long expire; int retval; init_completion(&ctx.done); urb->context = &ctx; urb->actual_length = 0; retval = usb_submit_urb(urb, GFP_NOIO); if (unlikely(retval)) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&ctx.done, expire)) { usb_kill_urb(urb); retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); dev_dbg(&urb->dev->dev, "%s timed out on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length); } else retval = ctx.status; out: if (actual_length) *actual_length = urb->actual_length; usb_free_urb(urb); return retval; } /*-------------------------------------------------------------------*/ /* returns status (negative) or length (positive) */ static int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) return retv; else return length; } /** * usb_control_msg - Builds a control urb, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * * Context: !in_interrupt () * * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * * If successful, it returns the number of bytes transferred, otherwise a * negative error number. * * Don't use this function from within an interrupt context, like a bottom half * handler. If you need an asynchronous message, or need to send a message * from within interrupt context, use usb_submit_urb(). * If a thread in your driver uses this call, make sure your disconnect() * method can wait for it to complete. Since you don't have a handle on the * URB used, you can't cancel the request. */ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); /* dbg("usb_control_msg"); */ ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); kfree(dr); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg); /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: !in_interrupt () * * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * * If successful, it returns 0, otherwise a negative error number. The number * of actual bytes transferred will be stored in the actual_length paramater. * * Don't use this function from within an interrupt context, like a bottom half * handler. If you need an asynchronous message, or need to send a message * from within interrupt context, use usb_submit_urb() If a thread in your * driver uses this call, make sure your disconnect() method can wait for it to * complete. Since you don't have a handle on the URB used, you can't cancel * the request. */ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); } EXPORT_SYMBOL_GPL(usb_interrupt_msg); /** * usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: !in_interrupt () * * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * * If successful, it returns 0, otherwise a negative error number. The number * of actual bytes transferred will be stored in the actual_length paramater. * * Don't use this function from within an interrupt context, like a bottom half * handler. If you need an asynchronous message, or need to send a message * from within interrupt context, use usb_submit_urb() If a thread in your * driver uses this call, make sure your disconnect() method can wait for it to * complete. Since you don't have a handle on the URB used, you can't cancel * the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for * interrupt endpoints. We will take the liberty of creating an interrupt URB * (with the default interval) if the target is an interrupt endpoint. */ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { struct urb *urb; struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(usb_dev, pipe); if (!ep || len < 0) return -EINVAL; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL, ep->desc.bInterval); } else usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); return usb_start_wait_urb(urb, timeout, actual_length); } EXPORT_SYMBOL_GPL(usb_bulk_msg); /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs [io->entries]); kfree(io->urbs); io->urbs = NULL; } io->dev = NULL; } static void sg_complete(struct urb *urb) { struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock(&io->lock); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock(&io->lock); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs [i] || !io->urbs [i]->dev) continue; if (found) { retval = usb_unlink_urb(io->urbs [i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs [i]) found = 1; } spin_lock(&io->lock); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock(&io->lock); } /** * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request * @io: request block being initialized. until usb_sg_wait() returns, * treat this as a pointer to an opaque block of memory, * @dev: the usb device that will send or receive the data * @pipe: endpoint "pipe" used to transfer the data * @period: polling rate for interrupt endpoints, in frames or * (for high speed endpoints) microframes; ignored for bulk * @sg: scatterlist entries * @nents: how many entries in the scatterlist * @length: how many bytes to send from the scatterlist, or zero to * send every byte identified in the list. * @mem_flags: SLAB_* flags affecting memory allocations in this call * * Returns zero for success, else a negative errno value. This initializes a * scatter/gather request, allocating resources such as I/O mappings and urb * memory (except maybe memory used by USB controller drivers). * * The request must be issued using usb_sg_wait(), which waits for the I/O to * complete (or to be canceled) and then cleans up all resources allocated by * usb_sg_init(). * * The request may be canceled with usb_sg_cancel(), either before or after * usb_sg_wait() is called. */ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int use_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || nents <= 0) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; if (dev->bus->sg_tablesize > 0) { use_sg = true; io->entries = 1; } else { use_sg = false; io->entries = nents; } /* initialize all the urbs we'll use */ io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; for_each_sg(sg, sg, io->entries, i) { struct urb *urb; unsigned len; urb = usb_alloc_urb(0, mem_flags); if (!urb) { io->entries = i; goto nomem; } io->urbs[i] = urb; urb->dev = NULL; urb->pipe = pipe; urb->interval = period; urb->transfer_flags = urb_flags; urb->complete = sg_complete; urb->context = io; urb->sg = sg; if (use_sg) { /* There is no single transfer buffer */ urb->transfer_buffer = NULL; urb->num_sgs = nents; /* A length of zero means transfer the whole sg list */ len = length; if (len == 0) { struct scatterlist *sg2; int j; for_each_sg(sg, sg2, nents, j) len += sg2->length; } } else { /* * Some systems can't use DMA; they use PIO instead. * For their sakes, transfer_buffer is set whenever * possible. */ if (!PageHighMem(sg_page(sg))) urb->transfer_buffer = sg_virt(sg); else urb->transfer_buffer = NULL; len = sg->length; if (length) { len = min_t(unsigned, len, length); length -= len; if (length == 0) io->entries = i + 1; } } urb->transfer_buffer_length = len; } io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: sg_clean(io); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_sg_init); /** * usb_sg_wait - synchronously execute scatter/gather request * @io: request block handle, as initialized with usb_sg_init(). * some fields become accessible when this call returns. * Context: !in_interrupt () * * This function blocks until the specified I/O operation completes. It * leverages the grouping of the related I/O requests to get good transfer * rates, by queueing the requests. At higher speeds, such queuing can * significantly improve USB throughput. * * There are three kinds of completion for this function. * (1) success, where io->status is zero. The number of io->bytes * transferred is as requested. * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or * this call will have been freed. The request block parameter may still be * passed to usb_sg_cancel(), or it may be freed. It could also be * reinitialized and then reused. * * Data Transfer Rates: * * Bulk transfers are valid for full or high speed endpoints. * The best full speed data rate is 19 packets of 64 bytes each * per frame, or 1216 bytes per millisecond. * The best high speed data rate is 13 packets of 512 bytes each * per microframe, or 52 KBytes per millisecond. * * The reason to use interrupt transfers through this API would most likely * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond * could be transferred. That capability is less useful for low or full * speed interrupt endpoints, which allow at most one packet per millisecond, * of at most 8 or 64 bytes (respectively). * * It is not necessary to call this function to reserve bandwidth for devices * under an xHCI host controller, as the bandwidth is reserved when the * configuration or interface alt setting is selected. */ void usb_sg_wait(struct usb_sg_request *io) { int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; retval = usb_submit_urb(io->urbs [i], GFP_ATOMIC); /* after we submit, let completions or cancelations fire; * we handshake using io->status. */ spin_unlock_irq(&io->lock); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); /* OK, yes, this could be packaged as non-blocking. * So could the submit loop above ... but it's easier to * solve neither problem than to solve both! */ wait_for_completion(&io->complete); sg_clean(io); } EXPORT_SYMBOL_GPL(usb_sg_wait); /** * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() * @io: request block, initialized with usb_sg_init() * * This stops a request after it has been started by usb_sg_wait(). * It can also prevents one initialized by usb_sg_init() from starting, * so that call just frees resources allocated to the request. */ void usb_sg_cancel(struct usb_sg_request *io) { unsigned long flags; spin_lock_irqsave(&io->lock, flags); /* shut everything down, if it didn't already */ if (!io->status) { int i; io->status = -ECONNRESET; spin_unlock(&io->lock); for (i = 0; i < io->entries; i++) { int retval; if (!io->urbs [i]->dev) continue; retval = usb_unlink_urb(io->urbs [i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } spin_lock(&io->lock); } spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); /*-------------------------------------------------------------------*/ /** * usb_get_descriptor - issues a generic GET_DESCRIPTOR request * @dev: the device whose descriptor is being retrieved * @type: the descriptor type (USB_DT_*) * @index: the number of the descriptor * @buf: where to put the descriptor * @size: how big is "buf"? * Context: !in_interrupt () * * Gets a USB descriptor. Convenience functions exist to simplify * getting some types of descriptors. Use * usb_get_string() or usb_string() for USB_DT_STRING. * Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG) * are part of the device structure. * In addition to a number of USB-standard descriptors, some * devices also use class-specific or vendor-specific descriptors. * * This call is synchronous, and may not be used in an interrupt context. * * Returns the number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char index, void *buf, int size) { int i; int result; memset(buf, 0, size); /* Make sure we parse really received data */ for (i = 0; i < 3; ++i) { /* retry on length 0 or error; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (type << 8) + index, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (result <= 0 && result != -ETIMEDOUT) continue; if (result > 1 && ((u8 *)buf)[1] != type) { result = -ENODATA; continue; } break; } return result; } EXPORT_SYMBOL_GPL(usb_get_descriptor); /** * usb_get_string - gets a string descriptor * @dev: the device whose string descriptor is being retrieved * @langid: code for language chosen (from string descriptor zero) * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * Context: !in_interrupt () * * Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character, * in little-endian byte order). * The usb_string() function will often be a convenient way to turn * these strings into kernel-printable form. * * Strings may be referenced in device, configuration, interface, or other * descriptors, and could also be used in vendor-specific ways. * * This call is synchronous, and may not be used in an interrupt context. * * Returns the number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ static int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size) { int i; int result; for (i = 0; i < 3; ++i) { /* retry on length 0 or stall; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + index, langid, buf, size, USB_CTRL_GET_TIMEOUT); if (result == 0 || result == -EPIPE) continue; if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) { result = -ENODATA; continue; } break; } return result; } static void usb_try_string_workarounds(unsigned char *buf, int *length) { int newlength, oldlength = *length; for (newlength = 2; newlength + 1 < oldlength; newlength += 2) if (!isprint(buf[newlength]) || buf[newlength + 1]) break; if (newlength > 2) { buf[0] = newlength; *length = newlength; } } static int usb_string_sub(struct usb_device *dev, unsigned int langid, unsigned int index, unsigned char *buf) { int rc; /* Try to read the string descriptor by asking for the maximum * possible number of bytes */ if (dev->quirks & USB_QUIRK_STRING_FETCH_255) rc = -EIO; else rc = usb_get_string(dev, langid, index, buf, 255); /* If that failed try to read the descriptor length, then * ask for just that many bytes */ if (rc < 2) { rc = usb_get_string(dev, langid, index, buf, 2); if (rc == 2) rc = usb_get_string(dev, langid, index, buf, buf[0]); } if (rc >= 2) { if (!buf[0] && !buf[1]) usb_try_string_workarounds(buf, &rc); /* There might be extra junk at the end of the descriptor */ if (buf[0] < rc) rc = buf[0]; rc = rc - (rc & 1); /* force a multiple of two */ } if (rc < 2) rc = (rc < 0 ? rc : -EINVAL); return rc; } static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) { int err; if (dev->have_langid) return 0; if (dev->string_langid < 0) return -EPIPE; err = usb_string_sub(dev, 0, 0, tbuf); /* If the string was reported but is malformed, default to english * (0x0409) */ if (err == -ENODATA || (err > 0 && err < 4)) { dev->string_langid = 0x0409; dev->have_langid = 1; dev_err(&dev->dev, "string descriptor 0 malformed (err = %d), " "defaulting to 0x%04x\n", err, dev->string_langid); return 0; } /* In case of all other errors, we assume the device is not able to * deal with strings at all. Set string_langid to -1 in order to * prevent any string to be retrieved from the device */ if (err < 0) { dev_err(&dev->dev, "string descriptor 0 read error: %d\n", err); dev->string_langid = -1; return -EPIPE; } /* always use the first langid listed */ dev->string_langid = tbuf[2] | (tbuf[3] << 8); dev->have_langid = 1; dev_dbg(&dev->dev, "default language 0x%04x\n", dev->string_langid); return 0; } /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * Context: !in_interrupt () * * This converts the UTF-16LE encoded strings returned by devices, from * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones * that are more usable in most kernel contexts. Note that this function * chooses strings in the first language supported by the device. * * This call is synchronous, and may not be used in an interrupt context. * * Returns length of the string (>= 0) or usb_control_msg status (< 0). */ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) { unsigned char *tbuf; int err; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (size <= 0 || !buf || !index) return -EINVAL; buf[0] = 0; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; err = usb_get_langid(dev, tbuf); if (err < 0) goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) goto errout; size--; /* leave room for trailing NULL char in output buffer */ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2, UTF16_LITTLE_ENDIAN, buf, size); buf[err] = 0; if (tbuf[1] != USB_DT_STRING) dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf); errout: kfree(tbuf); return err; } EXPORT_SYMBOL_GPL(usb_string); /* one UTF-8-encoded 16-bit character has at most three bytes */ #define MAX_USB_STRING_SIZE (127 * 3 + 1) /** * usb_cache_string - read a string descriptor and cache it for later use * @udev: the device whose string descriptor is being read * @index: the descriptor index * * Returns a pointer to a kmalloc'ed buffer containing the descriptor string, * or NULL if the index is 0 or the string could not be read. */ char *usb_cache_string(struct usb_device *udev, int index) { char *buf; char *smallbuf = NULL; int len; if (index <= 0) return NULL; buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); if (len > 0) { smallbuf = kmalloc(++len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); } kfree(buf); } return smallbuf; } /* * usb_get_device_descriptor - (re)reads the device descriptor (usbcore) * @dev: the device whose device descriptor is being updated * @size: how much of the descriptor to read * Context: !in_interrupt () * * Updates the copy of the device descriptor stored in the device structure, * which dedicates space for this purpose. * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * * This call is synchronous, and may not be used in an interrupt context. * * Returns the number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size) { struct usb_device_descriptor *desc; int ret; if (size > sizeof(*desc)) return -EINVAL; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) return -ENOMEM; ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size); if (ret >= 0) memcpy(&dev->descriptor, desc, size); kfree(desc); return ret; } /** * usb_get_status - issues a GET_STATUS call * @dev: the device whose status is being checked * @type: USB_RECIP_*; for device, interface, or endpoint * @target: zero (for device), else interface or endpoint number * @data: pointer to two bytes of bitmap data * Context: !in_interrupt () * * Returns device, interface, or endpoint status. Normally only of * interest to see if the device is self powered, or has enabled the * remote wakeup facility; or whether a bulk or interrupt endpoint * is halted ("stalled"). * * Bits in these status bitmaps are set using the SET_FEATURE request, * and cleared using the CLEAR_FEATURE request. The usb_clear_halt() * function should be used to clear halt ("stall") status. * * This call is synchronous, and may not be used in an interrupt context. * * Returns the number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_status(struct usb_device *dev, int type, int target, void *data) { int ret; u16 *status = kmalloc(sizeof(*status), GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | type, 0, target, status, sizeof(*status), USB_CTRL_GET_TIMEOUT); *(u16 *)data = *status; kfree(status); return ret; } EXPORT_SYMBOL_GPL(usb_get_status); /** * usb_clear_halt - tells device to clear endpoint halt/stall condition * @dev: device whose endpoint is halted * @pipe: endpoint "pipe" being cleared * Context: !in_interrupt () * * This is used to clear halt conditions for bulk and interrupt endpoints, * as reported by URB completion status. Endpoints that are halted are * sometimes referred to as being "stalled". Such endpoints are unable * to transmit or receive data until the halt status is cleared. Any URBs * queued for such an endpoint should normally be unlinked by the driver * before clearing the halt condition, as described in sections 5.7.5 * and 5.8.5 of the USB 2.0 spec. * * Note that control and isochronous endpoints don't halt, although control * endpoints report "protocol stall" (for unsupported requests) using the * same status code used to report a true stall. * * This call is synchronous, and may not be used in an interrupt context. * * Returns zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_clear_halt(struct usb_device *dev, int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; /* we don't care if it wasn't halted first. in fact some devices * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, USB_CTRL_SET_TIMEOUT); /* don't un-halt or force to DATA0 except on success */ if (result < 0) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying * the clear "took", so some devices could lock up if you check... * such as the Hagiwara FlashGate DUAL. So we won't bother. * * NOTE: make sure the logic here doesn't diverge much from * the copy in usb-storage, for as long as we need two copies. */ usb_reset_endpoint(dev, endp); return 0; } EXPORT_SYMBOL_GPL(usb_clear_halt); static int create_intf_ep_devs(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; int i; if (intf->ep_devs_created || intf->unregistering) return 0; for (i = 0; i < alt->desc.bNumEndpoints; ++i) (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev); intf->ep_devs_created = 1; return 0; } static void remove_intf_ep_devs(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; int i; if (!intf->ep_devs_created) return; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_remove_ep_devs(&alt->endpoint[i]); intf->ep_devs_created = 0; } /** * usb_disable_endpoint -- Disable an endpoint by address * @dev: the device whose endpoint is being disabled * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables the endpoint for URB submission and nukes all pending URBs. * If @reset_hardware is set then also deallocates hcd/hardware state * for the endpoint. */ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (!dev) return; if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; if (reset_hardware) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; if (reset_hardware) dev->ep_in[epnum] = NULL; } if (ep) { ep->enabled = 0; usb_hcd_flush_endpoint(dev, ep); if (reset_hardware) usb_hcd_disable_endpoint(dev, ep); } } /** * usb_reset_endpoint - Reset an endpoint's state. * @dev: the device whose endpoint is to be reset * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * * Resets any host-side endpoint state such as the toggle bit, * sequence number or current window. */ void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (usb_endpoint_out(epaddr)) ep = dev->ep_out[epnum]; else ep = dev->ep_in[epnum]; if (ep) usb_hcd_reset_endpoint(dev, ep); } EXPORT_SYMBOL_GPL(usb_reset_endpoint); /** * usb_disable_interface -- Disable all endpoints for an interface * @dev: the device whose interface is being disabled * @intf: pointer to the interface descriptor * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables all the endpoints for the interface's current altsetting. */ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { usb_disable_endpoint(dev, alt->endpoint[i].desc.bEndpointAddress, reset_hardware); } } /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. * * Disables all the device's endpoints, potentially including endpoint 0. * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ if (dev->actconfig) { /* * FIXME: In order to avoid self-deadlock involving the * bandwidth_mutex, we have to mark all the interfaces * before unregistering any of them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) dev->actconfig->interface[i]->unregistering = 1; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *interface; /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; if (!device_is_registered(&interface->dev)) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); remove_intf_ep_devs(interface); device_del(&interface->dev); } /* Now that the interfaces are unbound, nobody should * try to access them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { put_device(&dev->actconfig->interface[i]->dev); dev->actconfig->interface[i] = NULL; } dev->actconfig = NULL; if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); if (hcd->driver->check_bandwidth) { /* First pass: Cancel URBs, leave endpoint pointers intact. */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, false); usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); mutex_unlock(hcd->bandwidth_mutex); /* Second pass: remove endpoint pointers */ } for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } } /** * usb_enable_endpoint - Enable an endpoint for USB communications * @dev: the device whose interface is being enabled * @ep: the endpoint * @reset_ep: flag to reset the endpoint state * * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. * For control endpoints, both the input and output sides are handled. */ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_ep) { int epnum = usb_endpoint_num(&ep->desc); int is_out = usb_endpoint_dir_out(&ep->desc); int is_control = usb_endpoint_xfer_control(&ep->desc); if (reset_ep) usb_hcd_reset_endpoint(dev, ep); if (is_out || is_control) dev->ep_out[epnum] = ep; if (!is_out || is_control) dev->ep_in[epnum] = ep; ep->enabled = 1; } /** * usb_enable_interface - Enable all the endpoints for an interface * @dev: the device whose interface is being enabled * @intf: pointer to the interface descriptor * @reset_eps: flag to reset the endpoints' state * * Enables all the endpoints for the interface's current altsetting. */ void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_eps) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); } /** * usb_set_interface - Makes a particular alternate setting be current * @dev: the device whose interface is being updated * @interface: the interface being updated * @alternate: the setting being chosen. * Context: !in_interrupt () * * This is used to enable data transfers on interfaces that may not * be enabled by default. Not all devices support such configurability. * Only the driver bound to an interface may change its setting. * * Within any given configuration, each interface may have several * alternative settings. These are often used to control levels of * bandwidth consumption. For example, the default setting for a high * speed interrupt endpoint may not send more than 64 bytes per microframe, * while interrupt transfers of up to 3KBytes per microframe are legal. * Also, isochronous endpoints may never be part of an * interface's default setting. To access such bandwidth, alternate * interface settings must be made current. * * Note that in the Linux USB subsystem, bandwidth associated with * an endpoint in a given alternate setting is not reserved until an URB * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed * (perhaps forced by unlinking). * * Returns zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_set_interface(struct usb_device *dev, int interface, int alternate) { struct usb_interface *iface; struct usb_host_interface *alt; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int ret; int manual = 0; unsigned int epaddr; unsigned int pipe; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; iface = usb_ifnum_to_if(dev, interface); if (!iface) { dev_dbg(&dev->dev, "selecting invalid interface %d\n", interface); return -EINVAL; } if (iface->unregistering) return -ENODEV; alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. */ mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt); if (ret < 0) { dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n", alternate); mutex_unlock(hcd->bandwidth_mutex); return ret; } if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, alternate, interface, NULL, 0, 5000); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. */ if (ret == -EPIPE && iface->num_altsetting == 1) { dev_dbg(&dev->dev, "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; } else if (ret < 0) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); mutex_unlock(hcd->bandwidth_mutex); return ret; } mutex_unlock(hcd->bandwidth_mutex); /* FIXME drivers shouldn't need to replicate/bugfix the logic here * when they implement async or easily-killable versions of this or * other "should-be-internal" functions (like clear_halt). * should hcd+usbcore postprocess control requests? */ /* prevent submissions using previous endpoint settings */ if (iface->cur_altsetting != alt) { remove_intf_ep_devs(iface); usb_remove_sysfs_intf_files(iface); } usb_disable_interface(dev, iface, true); iface->cur_altsetting = alt; /* If the interface only has one altsetting and the device didn't * accept the request, we attempt to carry out the equivalent action * by manually clearing the HALT feature for each endpoint in the * new altsetting. */ if (manual) { int i; for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, USB_ENDPOINT_NUMBER_MASK & epaddr) | (usb_endpoint_out(epaddr) ? USB_DIR_OUT : USB_DIR_IN); usb_clear_halt(dev, pipe); } } /* 9.1.1.5: reset toggles for all endpoints in the new altsetting * * Note: * Despite EP0 is always present in all interfaces/AS, the list of * endpoints from the descriptor does not contain EP0. Due to its * omnipresence one might expect EP0 being considered "affected" by * any SetInterface request and hence assume toggles need to be reset. * However, EP0 toggles are re-synced for every individual transfer * during the SETUP stage - hence EP0 toggles are "don't care" here. * (Likewise, EP0 never "halts" on well designed devices.) */ usb_enable_interface(dev, iface, true); if (device_is_registered(&iface->dev)) { usb_create_sysfs_intf_files(iface); create_intf_ep_devs(iface); } return 0; } EXPORT_SYMBOL_GPL(usb_set_interface); /** * usb_reset_configuration - lightweight device reset * @dev: the device whose configuration is being reset * * This issues a standard SET_CONFIGURATION request to the device using * the current configuration. The effect is to reset most USB-related * state in the device, including interface altsettings (reset to zero), * endpoint halts (cleared), and endpoint state (only for bulk and interrupt * endpoints). Other usbcore state is unchanged, including bindings of * usb device drivers to interfaces. * * Because this affects multiple interfaces, avoid using this with composite * (multi-interface) devices. Instead, the driver for each interface may * use usb_set_interface() on the interfaces it claims. Be careful though; * some devices don't support the SET_INTERFACE request, and others won't * reset all the interface state (notably endpoint state). Resetting the whole * configuration would affect other drivers' interfaces. * * The caller must own the device lock. * * Returns zero on success, else a negative error code. */ int usb_reset_configuration(struct usb_device *dev) { int i, retval; struct usb_host_config *config; struct usb_hcd *hcd = bus_to_hcd(dev->bus); if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; /* caller must have locked the device and must own * the usb bus readlock (so driver bindings are stable); * calls during probe() are fine */ for (i = 1; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } config = dev->actconfig; retval = 0; mutex_lock(hcd->bandwidth_mutex); /* Make sure we have enough bandwidth for each alternate setting 0 */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) retval = usb_hcd_alloc_bandwidth(dev, NULL, intf->cur_altsetting, alt); if (retval < 0) break; } /* If not, reinstate the old alternate settings */ if (retval < 0) { reset_old_alts: for (i--; i >= 0; i--) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) usb_hcd_alloc_bandwidth(dev, NULL, alt, intf->cur_altsetting); } mutex_unlock(hcd->bandwidth_mutex); return retval; } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval < 0) goto reset_old_alts; mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) { remove_intf_ep_devs(intf); usb_remove_sysfs_intf_files(intf); } intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); if (device_is_registered(&intf->dev)) { usb_create_sysfs_intf_files(intf); create_intf_ep_devs(intf); } } return 0; } EXPORT_SYMBOL_GPL(usb_reset_configuration); static void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = altsetting_to_usb_interface_cache(intf->altsetting); kref_put(&intfc->ref, usb_release_interface_cache); kfree(intf); } #ifdef CONFIG_HOTPLUG static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_device *usb_dev; struct usb_interface *intf; struct usb_host_interface *alt; intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); alt = intf->cur_altsetting; if (add_uevent_var(env, "INTERFACE=%d/%d/%d", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=usb:" "v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice), usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; return 0; } #else static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } #endif /* CONFIG_HOTPLUG */ struct device_type usb_if_device_type = { .name = "usb_interface", .release = usb_release_interface, .uevent = usb_if_uevent, }; static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev, struct usb_host_config *config, u8 inum) { struct usb_interface_assoc_descriptor *retval = NULL; struct usb_interface_assoc_descriptor *intf_assoc; int first_intf; int last_intf; int i; for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) { intf_assoc = config->intf_assoc[i]; if (intf_assoc->bInterfaceCount == 0) continue; first_intf = intf_assoc->bFirstInterface; last_intf = first_intf + (intf_assoc->bInterfaceCount - 1); if (inum >= first_intf && inum <= last_intf) { if (!retval) retval = intf_assoc; else dev_err(&dev->dev, "Interface #%d referenced" " by multiple IADs\n", inum); } } return retval; } /* * Internal function to queue a device reset * * This is initialized into the workstruct in 'struct * usb_device->reset_ws' that is launched by * message.c:usb_set_configuration() when initializing each 'struct * usb_interface'. * * It is safe to get the USB device without reference counts because * the life cycle of @iface is bound to the life cycle of @udev. Then, * this function will be ran only if @iface is alive (and before * freeing it any scheduled instances of it will have been cancelled). * * We need to set a flag (usb_dev->reset_running) because when we call * the reset, the interfaces might be unbound. The current interface * cannot try to remove the queued work as it would cause a deadlock * (you cannot remove your work from within your executing * workqueue). This flag lets it know, so that * usb_cancel_queued_reset() doesn't try to do it. * * See usb_queue_reset_device() for more details */ static void __usb_queue_reset_device(struct work_struct *ws) { int rc; struct usb_interface *iface = container_of(ws, struct usb_interface, reset_ws); struct usb_device *udev = interface_to_usbdev(iface); rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { iface->reset_running = 1; usb_reset_device(udev); iface->reset_running = 0; usb_unlock_device(udev); } } /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated * @configuration: the configuration being chosen. * Context: !in_interrupt(), caller owns the device lock * * This is used to enable non-default device modes. Not all devices * use this kind of configurability; many devices only have one * configuration. * * @configuration is the value of the configuration to be installed. * According to the USB spec (e.g. section 9.1.1.5), configuration values * must be non-zero; a value of zero indicates that the device in * unconfigured. However some devices erroneously use 0 as one of their * configuration values. To help manage such devices, this routine will * accept @configuration = -1 as indicating the device should be put in * an unconfigured state. * * USB device configurations may affect Linux interoperability, * power consumption and the functionality available. For example, * the default configuration is limited to using 100mA of bus power, * so that when certain device functionality requires more power, * and the device is bus powered, that functionality should be in some * non-default device configuration. Other device modes may also be * reflected as configuration options, such as whether two ISDN * channels are available independently; and choosing between open * standard device protocols (like CDC) or proprietary ones. * * Note that a non-authorized device (dev->authorized == 0) will only * be put in unconfigured mode. * * Note that USB has an additional level of device configurability, * associated with interfaces. That configurability is accessed using * usb_set_interface(). * * This call is synchronous. The calling context must be able to sleep, * must own the device lock, and must not hold the driver model's USB * bus mutex; usb interface driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. */ int usb_set_configuration(struct usb_device *dev, int configuration) { int i, ret; struct usb_host_config *cp = NULL; struct usb_interface **new_interfaces = NULL; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int n, nintf; if (dev->authorized == 0 || configuration == -1) configuration = 0; else { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (dev->config[i].desc.bConfigurationValue == configuration) { cp = &dev->config[i]; break; } } } if ((!cp && configuration != 0)) return -EINVAL; /* The USB spec says configuration 0 means unconfigured. * But if a device includes a configuration numbered 0, * we will accept it as a correctly configured state. * Use -1 if you really want to unconfigure the device. */ if (cp && configuration == 0) dev_warn(&dev->dev, "config 0 descriptor??\n"); /* Allocate memory for new interfaces before doing anything else, * so that if we run out then nothing will have changed. */ n = nintf = 0; if (cp) { nintf = cp->desc.bNumInterfaces; new_interfaces = kmalloc(nintf * sizeof(*new_interfaces), GFP_NOIO); if (!new_interfaces) { dev_err(&dev->dev, "Out of memory\n"); return -ENOMEM; } for (; n < nintf; ++n) { new_interfaces[n] = kzalloc( sizeof(struct usb_interface), GFP_NOIO); if (!new_interfaces[n]) { dev_err(&dev->dev, "Out of memory\n"); ret = -ENOMEM; free_interfaces: while (--n >= 0) kfree(new_interfaces[n]); kfree(new_interfaces); return ret; } } i = dev->bus_mA - cp->desc.bMaxPower * 2; if (i < 0) dev_warn(&dev->dev, "new config #%d exceeds power " "limit by %dmA\n", configuration, -i); } /* Wake up the device so we can send it the Set-Config request */ ret = usb_autoresume_device(dev); if (ret) goto free_interfaces; /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ /* Get rid of pending async Set-Config requests for this device */ cancel_async_set_config(dev); /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { mutex_unlock(hcd->bandwidth_mutex); usb_autosuspend_device(dev); goto free_interfaces; } dev->actconfig = cp; if (cp) usb_notify_config_device(dev); /* Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { struct usb_interface_cache *intfc; struct usb_interface *intf; struct usb_host_interface *alt; cp->interface[i] = intf = new_interfaces[i]; intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; intf->intf_assoc = find_iad(dev, cp, alt->desc.bInterfaceNumber); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; intf->dev.groups = usb_interface_groups; intf->dev.dma_mask = dev->dev.dma_mask; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); intf->minor = -1; device_initialize(&intf->dev); pm_runtime_no_callbacks(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, alt->desc.bInterfaceNumber); } kfree(new_interfaces); ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0 && cp) { /* All the old state is gone, so what else can we do? * The device is probably useless now anyway. */ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); for (i = 0; i < nintf; ++i) { usb_disable_interface(dev, cp->interface[i], true); put_device(&cp->interface[i]->dev); cp->interface[i] = NULL; } cp = NULL; } dev->actconfig = cp; mutex_unlock(hcd->bandwidth_mutex); if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); /* Leave LPM disabled while the device is unconfigured. */ usb_autosuspend_device(dev); return ret; } usb_set_device_state(dev, USB_STATE_CONFIGURED); if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); /* Now that all the interfaces are set up, register them * to trigger binding of drivers to interfaces. probe() * routines may install different altsettings and may * claim() any interfaces not yet bound. Many class drivers * need that: CDC, audio, video, etc. */ for (i = 0; i < nintf; ++i) { struct usb_interface *intf = cp->interface[i]; dev_dbg(&dev->dev, "adding %s (config #%d, interface %d)\n", dev_name(&intf->dev), configuration, intf->cur_altsetting->desc.bInterfaceNumber); device_enable_async_suspend(&intf->dev); ret = device_add(&intf->dev); if (ret != 0) { dev_err(&dev->dev, "device_add(%s) --> %d\n", dev_name(&intf->dev), ret); continue; } create_intf_ep_devs(intf); } usb_autosuspend_device(dev); return 0; } static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; /* Worker routine for usb_driver_set_configuration() */ static void driver_set_config_work(struct work_struct *work) { struct set_config_request *req = container_of(work, struct set_config_request, work); struct usb_device *udev = req->udev; usb_lock_device(udev); spin_lock(&set_config_lock); list_del(&req->node); spin_unlock(&set_config_lock); if (req->config >= -1) /* Is req still valid? */ usb_set_configuration(udev, req->config); usb_unlock_device(udev); usb_put_dev(udev); kfree(req); } /* Cancel pending Set-Config requests for a device whose configuration * was just changed */ static void cancel_async_set_config(struct usb_device *udev) { struct set_config_request *req; spin_lock(&set_config_lock); list_for_each_entry(req, &set_config_list, node) { if (req->udev == udev) req->config = -999; /* Mark as cancelled */ } spin_unlock(&set_config_lock); } /** * usb_driver_set_configuration - Provide a way for drivers to change device configurations * @udev: the device whose configuration is being updated * @config: the configuration being chosen. * Context: In process context, must be able to sleep * * Device interface drivers are not allowed to change device configurations. * This is because changing configurations will destroy the interface the * driver is bound to and create new ones; it would be like a floppy-disk * driver telling the computer to replace the floppy-disk drive with a * tape drive! * * Still, in certain specialized circumstances the need may arise. This * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * * Returns 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ int usb_driver_set_configuration(struct usb_device *udev, int config) { struct set_config_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->udev = udev; req->config = config; INIT_WORK(&req->work, driver_set_config_work); spin_lock(&set_config_lock); list_add(&req->node, &set_config_list); spin_unlock(&set_config_lock); usb_get_dev(udev); schedule_work(&req->work); return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
gpl-2.0
just-another-one-timmy/wireshark
epan/dissectors/packet-adb_cs.c
21
18009
/* packet-adb_cs.c * Routines for Android Debug Bridge Client-Server Protocol * * Copyright 2014, Michal Labedzki for Tieto Corporation * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See thehf_class * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/prefs.h> #include <epan/expert.h> #include <wiretap/wtap.h> #include "packet-adb_service.h" static int proto_adb_cs = -1; static int hf_role = -1; static int hf_hex_ascii_length = -1; static int hf_length = -1; static int hf_service = -1; static int hf_status = -1; static int hf_data = -1; static int hf_fail_reason = -1; static gint ett_adb_cs = -1; static gint ett_length = -1; static expert_field ei_incomplete_message = EI_INIT; static dissector_handle_t adb_cs_handle; static dissector_handle_t adb_service_handle; static dissector_handle_t data_handle; static wmem_tree_t *client_requests = NULL; static guint server_port = 5037; typedef struct _client_request_t { gint64 service_length; guint8 *service; guint32 first_in; gint64 service_in; gint64 response_frame; guint8 status; gint64 data_length; } client_request_t; static const value_string role_vals[] = { { 0x00, "Unknown" }, { 0x01, "Server" }, { 0x02, "Client" }, { 0, NULL } }; #define SERVICE_NONE NULL #define STATUS_UNKNOWN 0 #define STATUS_OKAY 1 #define STATUS_FAIL 2 void proto_register_adb_cs(void); void proto_reg_handoff_adb_cs(void); static gint dissect_adb_cs(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_) { proto_item *main_item; proto_tree *main_tree; proto_item *sub_item; proto_item *p_item; gint offset = 0; gint64 length = -1; gint direction; gboolean client_request_service = FALSE; tvbuff_t *next_tvb; adb_service_data_t adb_service_data; guint32 wireshark_interface_id = 0; col_set_str(pinfo->cinfo, COL_PROTOCOL, "ADB CS"); col_clear(pinfo->cinfo, COL_INFO); main_item = proto_tree_add_item(tree, proto_adb_cs, tvb, offset, -1, ENC_NA); main_tree = proto_item_add_subtree(main_item, ett_adb_cs); if (pinfo->phdr->presence_flags & WTAP_HAS_INTERFACE_ID) wireshark_interface_id = pinfo->phdr->interface_id; if (pinfo->destport == server_port) { /* Client sent to Server */ client_request_t *client_request; guint8 *service = SERVICE_NONE; wmem_tree_t *subtree; wmem_tree_key_t key[5]; direction = P2P_DIR_SENT; p_item = proto_tree_add_uint(main_tree, hf_role, tvb, offset, 0, 0x02); PROTO_ITEM_SET_GENERATED(p_item); col_add_fstr(pinfo->cinfo, COL_INFO, "Client"); if (pinfo->phdr->presence_flags & WTAP_HAS_INTERFACE_ID) wireshark_interface_id = pinfo->phdr->interface_id; key[0].length = 1; key[0].key = &wireshark_interface_id; key[1].length = 1; key[1].key = &pinfo->srcport; key[2].length = 1; key[2].key = &pinfo->destport; key[3].length = 0; key[3].key = NULL; subtree = (wmem_tree_t *) wmem_tree_lookup32_array(client_requests, key); client_request = (subtree) ? (client_request_t *) wmem_tree_lookup32_le(subtree, pinfo->fd->num) : NULL; if (client_request && client_request->service_in > -1 && client_request->service_in < pinfo->fd->num) { p_item = proto_tree_add_string(main_tree, hf_service, tvb, offset, 0, client_request->service); PROTO_ITEM_SET_GENERATED(p_item); service = client_request->service; client_request_service = TRUE; } else { if (client_request && client_request->service_in > -1 && client_request->service_in <= pinfo->fd->num) client_request_service = TRUE; client_request = NULL; } /* heuristic to recognize type of (partial) packet */ if (tvb_reported_length_remaining(tvb, offset) >= 4) { guint8 hex_ascii_length[5]; guint32 ulength; hex_ascii_length[4] = 0; tvb_memcpy(tvb, hex_ascii_length, offset, 4); if (g_ascii_xdigit_value(hex_ascii_length[0]) >= 0 && g_ascii_xdigit_value(hex_ascii_length[1]) >= 0 && g_ascii_xdigit_value(hex_ascii_length[2]) >= 0 && g_ascii_xdigit_value(hex_ascii_length[3]) >= 0) { /* probably 4 bytes ascii hex length field */ offset = dissect_ascii_uint32(main_tree, hf_hex_ascii_length, ett_length, hf_length, tvb, offset, &ulength); length = (gint64) ulength; col_append_fstr(pinfo->cinfo, COL_INFO, " Length=%u", ulength); } } if (length == -1 && service) { col_append_fstr(pinfo->cinfo, COL_INFO, " Service=<%s>", service); /* Decode services */ adb_service_data.service = service; adb_service_data.direction = direction; adb_service_data.session_key_length = 3; adb_service_data.session_key = (guint32 *) wmem_alloc(wmem_packet_scope(), adb_service_data.session_key_length * sizeof(guint32)); adb_service_data.session_key[0] = wireshark_interface_id; adb_service_data.session_key[1] = pinfo->destport; adb_service_data.session_key[2] = pinfo->srcport; next_tvb = tvb_new_subset(tvb, offset, tvb_captured_length_remaining(tvb, offset), tvb_captured_length_remaining(tvb, offset)); call_dissector_with_data(adb_service_handle, next_tvb, pinfo, tree, &adb_service_data); return tvb_captured_length(tvb); } if (!pinfo->fd->flags.visited && length > 0) { /* save Length to client_requests */ if (pinfo->phdr->presence_flags & WTAP_HAS_INTERFACE_ID) wireshark_interface_id = pinfo->phdr->interface_id; key[0].length = 1; key[0].key = &wireshark_interface_id; key[1].length = 1; key[1].key = &pinfo->srcport; key[2].length = 1; key[2].key = &pinfo->destport; key[3].length = 1; key[3].key = &pinfo->fd->num; key[4].length = 0; key[4].key = NULL; client_request = wmem_new(wmem_file_scope(), client_request_t); client_request->service_length = length; client_request->service = SERVICE_NONE; client_request->response_frame = -1; client_request->first_in = pinfo->fd->num; client_request->service_in = -1; client_request->data_length = -1; wmem_tree_insert32_array(client_requests, key, client_request); } if (!pinfo->fd->flags.visited && (length == -1 || (client_request && client_request->service_in == -1 && tvb_reported_length_remaining(tvb, offset) > 0))) { /* save Service to client_requests */ if (!client_request) { if (pinfo->phdr->presence_flags & WTAP_HAS_INTERFACE_ID) wireshark_interface_id = pinfo->phdr->interface_id; key[0].length = 1; key[0].key = &wireshark_interface_id; key[1].length = 1; key[1].key = &pinfo->srcport; key[2].length = 1; key[2].key = &pinfo->destport; key[3].length = 0; key[3].key = NULL; subtree = (wmem_tree_t *) wmem_tree_lookup32_array(client_requests, key); client_request = (subtree) ? (client_request_t *) wmem_tree_lookup32_le(subtree, pinfo->fd->num - 1) : NULL; } if (client_request) { client_request->service = (guint8 *) wmem_alloc(wmem_file_scope(), (const size_t)(client_request->service_length + 1)); tvb_memcpy(tvb, client_request->service, offset, (size_t) client_request->service_length); client_request->service[client_request->service_length] = '\0'; client_request->service_in = pinfo->fd->num; } } if (!client_request_service && tvb_reported_length_remaining(tvb, offset) > 0) { col_append_fstr(pinfo->cinfo, COL_INFO, " Unknown service"); proto_tree_add_item(main_tree, hf_data, tvb, offset, -1, ENC_NA); } else if (tvb_reported_length_remaining(tvb, offset) > 0) { proto_tree_add_item(main_tree, hf_service, tvb, offset, -1, ENC_NA | ENC_ASCII); service = (guint8 *) wmem_alloc(wmem_packet_scope(), tvb_reported_length_remaining(tvb, offset) + 1); tvb_memcpy(tvb, service, offset, tvb_reported_length_remaining(tvb, offset)); service[tvb_reported_length_remaining(tvb, offset)] = '\0'; col_append_fstr(pinfo->cinfo, COL_INFO, " Service=<%s>", service); } offset = tvb_captured_length(tvb); } else if (pinfo->srcport == server_port) { /* Server sent to Client */ guint8 *service = SERVICE_NONE; wmem_tree_t *subtree; wmem_tree_key_t key[5]; client_request_t *client_request; gint64 response_frame = -1; guint8 status = STATUS_UNKNOWN; direction = P2P_DIR_RECV; key[0].length = 1; key[0].key = &wireshark_interface_id; key[1].length = 1; key[1].key = &pinfo->destport; key[2].length = 1; key[2].key = &pinfo->srcport; key[3].length = 0; key[3].key = NULL; subtree = (wmem_tree_t *) wmem_tree_lookup32_array(client_requests, key); client_request = (subtree) ? (client_request_t *) wmem_tree_lookup32_le(subtree, pinfo->fd->num - 1) : NULL; if (client_request) { service = client_request->service; status = client_request->status; length = client_request->data_length; response_frame = client_request->response_frame; } p_item = proto_tree_add_uint(main_tree, hf_role, tvb, offset, 0, 0x01); PROTO_ITEM_SET_GENERATED(p_item); p_item = proto_tree_add_string(main_tree, hf_service, tvb, offset, 0, service); PROTO_ITEM_SET_GENERATED(p_item); col_add_fstr(pinfo->cinfo, COL_INFO, "Server"); if (!service) { col_append_fstr(pinfo->cinfo, COL_INFO, " Unknown service"); proto_tree_add_item(main_tree, hf_data, tvb, offset, -1, ENC_NA); return tvb_captured_length(tvb); } if (response_frame == -1 || response_frame == (gint64) pinfo->fd->num) { proto_tree_add_item(main_tree, hf_status, tvb, offset, 4, ENC_NA | ENC_ASCII); col_append_fstr(pinfo->cinfo, COL_INFO, " Status=%c%c%c%c", tvb_get_guint8(tvb, offset), tvb_get_guint8(tvb, offset + 1), tvb_get_guint8(tvb, offset + 2), tvb_get_guint8(tvb, offset + 3)); offset += 4; if (tvb_memeql(tvb, offset - 4, "FAIL", 4) == 0) { guint32 ulength; offset = dissect_ascii_uint32(main_tree, hf_hex_ascii_length, ett_length, hf_length, tvb, offset, &ulength); length = (gint64) ulength; status = STATUS_FAIL; } else if (tvb_memeql(tvb, offset - 4, "OKAY", 4) == 0) { status = STATUS_OKAY; length = -1; } if (!pinfo->fd->flags.visited && client_request) { client_request->response_frame = pinfo->fd->num; client_request->status = status; client_request->data_length = length; } } col_append_fstr(pinfo->cinfo, COL_INFO, " Service=<%s>", service); if (tvb_reported_length_remaining(tvb, offset) <= 0) return offset; if (status == STATUS_FAIL) { sub_item = proto_tree_add_item(main_tree, hf_fail_reason, tvb, offset, -1, ENC_NA | ENC_ASCII); if (length < tvb_reported_length_remaining(tvb, offset)) { expert_add_info(pinfo, sub_item, &ei_incomplete_message); } col_append_fstr(pinfo->cinfo, COL_INFO, " Fail=<%s>", tvb_get_string_enc(wmem_packet_scope(), tvb, offset, tvb_reported_length_remaining(tvb, offset), ENC_ASCII)); return tvb_captured_length(tvb); } /* Decode services */ adb_service_data.service = service; adb_service_data.direction = direction; adb_service_data.session_key_length = 3; adb_service_data.session_key = (guint32 *) wmem_alloc(wmem_packet_scope(), adb_service_data.session_key_length * sizeof(guint32)); adb_service_data.session_key[0] = wireshark_interface_id; adb_service_data.session_key[1] = pinfo->destport; adb_service_data.session_key[2] = pinfo->srcport; next_tvb = tvb_new_subset(tvb, offset, tvb_captured_length_remaining(tvb, offset), tvb_captured_length_remaining(tvb, offset)); call_dissector_with_data(adb_service_handle, next_tvb, pinfo, tree, &adb_service_data); offset = tvb_captured_length(tvb); } else { col_add_fstr(pinfo->cinfo, COL_INFO, "Unknown role"); p_item = proto_tree_add_uint(main_tree, hf_role, tvb, offset, 0, 0x00); PROTO_ITEM_SET_GENERATED(p_item); next_tvb = tvb_new_subset_remaining(tvb, offset); call_dissector(data_handle, next_tvb, pinfo, main_tree); offset += tvb_captured_length_remaining(tvb, offset); } return offset; } void proto_register_adb_cs(void) { module_t *module; expert_module_t *expert_module; static hf_register_info hf[] = { { &hf_role, { "Role", "adb_cs.role", FT_UINT8, BASE_HEX, VALS(role_vals), 0x00, NULL, HFILL } }, { &hf_hex_ascii_length, { "Hex ASCII Length", "adb_cs.hex_ascii_length", FT_STRING, STR_ASCII, NULL, 0x00, NULL, HFILL } }, { &hf_length, { "Length", "adb_cs.length", FT_UINT32, BASE_DEC_HEX, NULL, 0x00, NULL, HFILL } }, { &hf_service, { "Service", "adb_cs.service", FT_STRING, STR_ASCII, NULL, 0x00, NULL, HFILL } }, { &hf_fail_reason, { "Fail Reason", "adb_cs.fail_reason", FT_STRING, STR_ASCII, NULL, 0x00, NULL, HFILL } }, { &hf_status, { "Status", "adb_cs.status", FT_STRING, STR_ASCII, NULL, 0x00, NULL, HFILL } }, { &hf_data, { "Data", "adb_cs.data", FT_BYTES, BASE_NONE, NULL, 0x00, NULL, HFILL } }, }; static gint *ett[] = { &ett_adb_cs, &ett_length }; static ei_register_info ei[] = { { &ei_incomplete_message, { "adb_cs.expert.incomplete_message", PI_PROTOCOL, PI_WARN, "Incomplete message", EXPFILL }}, }; client_requests = wmem_tree_new_autoreset(wmem_epan_scope(), wmem_file_scope()); proto_adb_cs = proto_register_protocol("Android Debug Bridge Client-Server", "ADB CS", "adb_cs"); adb_cs_handle = new_register_dissector("adb_cs", dissect_adb_cs, proto_adb_cs); proto_register_field_array(proto_adb_cs, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); expert_module = expert_register_protocol(proto_adb_cs); expert_register_field_array(expert_module, ei, array_length(ei)); module = prefs_register_protocol(proto_adb_cs, NULL); prefs_register_static_text_preference(module, "version", "ADB CS protocol version is compatibile pior to: adb 1.0.31", "Version of protocol supported by this dissector."); prefs_register_uint_preference(module, "server_port", "Server Port", "Server Port", 10, &server_port); } void proto_reg_handoff_adb_cs(void) { data_handle = find_dissector("data"); adb_service_handle = find_dissector("adb_service"); dissector_add_for_decode_as("tcp.port", adb_cs_handle); } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 4 * tab-width: 8 * indent-tabs-mode: nil * End: * * vi: set shiftwidth=4 tabstop=8 expandtab: * :indentSize=4:tabSize=8:noTabs=true: */
gpl-2.0
Pascal-TK/Beagleboard-xM-Kernel
drivers/misc/sgi-gru/grufile.c
21
11611
/* * SN Platform GRU Driver * * FILE OPERATIONS & DRIVER INITIALIZATION * * This file supports the user system call for file open, close, mmap, etc. * This also incudes the driver initialization code. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <asm/uv/uv.h> #include "gru.h" #include "grulib.h" #include "grutables.h" #include <asm/uv/uv_hub.h> #include <asm/uv/uv_mmrs.h> struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; unsigned long gru_start_paddr __read_mostly; void *gru_start_vaddr __read_mostly; unsigned long gru_end_paddr __read_mostly; unsigned int gru_max_gids __read_mostly; struct gru_stats_s gru_stats; /* Guaranteed user available resources on each node */ static int max_user_cbrs, max_user_dsr_bytes; static struct miscdevice gru_miscdev; /* * gru_vma_close * * Called when unmapping a device mapping. Frees all gru resources * and tables belonging to the vma. */ static void gru_vma_close(struct vm_area_struct *vma) { struct gru_vma_data *vdata; struct gru_thread_state *gts; struct list_head *entry, *next; if (!vma->vm_private_data) return; vdata = vma->vm_private_data; vma->vm_private_data = NULL; gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, vdata); list_for_each_safe(entry, next, &vdata->vd_head) { gts = list_entry(entry, struct gru_thread_state, ts_next); list_del(&gts->ts_next); mutex_lock(&gts->ts_ctxlock); if (gts->ts_gru) gru_unload_context(gts, 0); mutex_unlock(&gts->ts_ctxlock); gts_drop(gts); } kfree(vdata); STAT(vdata_free); } /* * gru_file_mmap * * Called when mmapping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) return -EPERM; if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) return -EINVAL; vma->vm_flags |= (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | VM_RESERVED); vma->vm_page_prot = PAGE_SHARED; vma->vm_ops = &gru_vm_ops; vma->vm_private_data = gru_alloc_vma_data(vma, 0); if (!vma->vm_private_data) return -ENOMEM; gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", file, vma->vm_start, vma, vma->vm_private_data); return 0; } /* * Create a new GRU context */ static int gru_create_new_context(unsigned long arg) { struct gru_create_context_req req; struct vm_area_struct *vma; struct gru_vma_data *vdata; int ret = -EINVAL; if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; if (req.data_segment_bytes > max_user_dsr_bytes) return -EINVAL; if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count) return -EINVAL; if (!(req.options & GRU_OPT_MISS_MASK)) req.options |= GRU_OPT_MISS_FMM_INTR; down_write(&current->mm->mmap_sem); vma = gru_find_vma(req.gseg); if (vma) { vdata = vma->vm_private_data; vdata->vd_user_options = req.options; vdata->vd_dsr_au_count = GRU_DS_BYTES_TO_AU(req.data_segment_bytes); vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); ret = 0; } up_write(&current->mm->mmap_sem); return ret; } /* * Get GRU configuration info (temp - for emulator testing) */ static long gru_get_config_info(unsigned long arg) { struct gru_config_info info; int nodesperblade; if (num_online_nodes() > 1 && (uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) nodesperblade = 2; else nodesperblade = 1; info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } /* * gru_file_unlocked_ioctl * * Called to update file attributes via IOCTL calls. */ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, unsigned long arg) { int err = -EBADRQC; gru_dbg(grudev, "file %p\n", file); switch (req) { case GRU_CREATE_CONTEXT: err = gru_create_new_context(arg); break; case GRU_SET_CONTEXT_OPTION: err = gru_set_context_option(arg); break; case GRU_USER_GET_EXCEPTION_DETAIL: err = gru_get_exception_detail(arg); break; case GRU_USER_UNLOAD_CONTEXT: err = gru_user_unload_context(arg); break; case GRU_USER_FLUSH_TLB: err = gru_user_flush_tlb(arg); break; case GRU_USER_CALL_OS: err = gru_handle_user_call_os(arg); break; case GRU_GET_GSEG_STATISTICS: err = gru_get_gseg_statistics(arg); break; case GRU_KTEST: err = gru_ktest(arg); break; case GRU_GET_CONFIG_INFO: err = gru_get_config_info(arg); break; case GRU_DUMP_CHIPLET_STATE: err = gru_dump_chiplet_request(arg); break; } return err; } /* * Called at init time to build tables for all GRUs that are present in the * system. */ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, void *vaddr, int nid, int bid, int grunum) { spin_lock_init(&gru->gs_lock); spin_lock_init(&gru->gs_asid_lock); gru->gs_gru_base_paddr = paddr; gru->gs_gru_base_vaddr = vaddr; gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; gru->gs_blade = gru_base[bid]; gru->gs_blade_id = bid; gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; gru->gs_asid_limit = MAX_ASID; gru_tgh_flush_init(gru); if (gru->gs_gid >= gru_max_gids) gru_max_gids = gru->gs_gid + 1; gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, gru->gs_gru_base_paddr); } static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) { int pnode, nid, bid, chip; int cbrs, dsrbytes, n; int order = get_order(sizeof(struct gru_blade_state)); struct page *page; struct gru_state *gru; unsigned long paddr; void *vaddr; max_user_cbrs = GRU_NUM_CB; max_user_dsr_bytes = GRU_NUM_DSR_BYTES; for_each_online_node(nid) { bid = uv_node_to_blade_id(nid); pnode = uv_node_to_pnode(nid); if (bid < 0 || gru_base[bid]) continue; page = alloc_pages_exact_node(nid, GFP_KERNEL, order); if (!page) goto fail; gru_base[bid] = page_address(page); memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; spin_lock_init(&gru_base[bid]->bs_lock); init_rwsem(&gru_base[bid]->bs_kgts_sema); dsrbytes = 0; cbrs = 0; for (gru = gru_base[bid]->bs_grus, chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++, gru++) { paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; cbrs = max(cbrs, n); n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; dsrbytes = max(dsrbytes, n); } max_user_cbrs = min(max_user_cbrs, cbrs); max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); } return 0; fail: for (nid--; nid >= 0; nid--) free_pages((unsigned long)gru_base[nid], order); return -ENOMEM; } #ifdef CONFIG_IA64 static int get_base_irq(void) { return IRQ_GRU; } #elif defined CONFIG_X86_64 static void noop(unsigned int irq) { } static struct irq_chip gru_chip = { .name = "gru", .mask = noop, .unmask = noop, .ack = noop, }; static int get_base_irq(void) { set_irq_chip(IRQ_GRU, &gru_chip); set_irq_chip(IRQ_GRU + 1, &gru_chip); return IRQ_GRU; } #endif /* * gru_init * * Called at boot or module load time to initialize the GRUs. */ static int __init gru_init(void) { int ret, irq, chip; char id[10]; if (!is_uv_system()) return 0; #if defined CONFIG_IA64 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ #else gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & 0x7fffffffffffUL; #endif gru_start_vaddr = __va(gru_start_paddr); gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", gru_start_paddr, gru_end_paddr); irq = get_base_irq(); for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { ret = request_irq(irq + chip, gru_intr, 0, id, NULL); /* TODO: fix irq handling on x86. For now ignore failure because * interrupts are not required & not yet fully supported */ if (ret) { printk(KERN_WARNING "!!!WARNING: GRU ignoring request failure!!!\n"); ret = 0; } if (ret) { printk(KERN_ERR "%s: request_irq failed\n", GRU_DRIVER_ID_STR); goto exit1; } } ret = misc_register(&gru_miscdev); if (ret) { printk(KERN_ERR "%s: misc_register failed\n", GRU_DRIVER_ID_STR); goto exit1; } ret = gru_proc_init(); if (ret) { printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); goto exit2; } ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); if (ret) { printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); goto exit3; } gru_kservices_init(); printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, GRU_DRIVER_VERSION_STR); return 0; exit3: gru_proc_exit(); exit2: misc_deregister(&gru_miscdev); exit1: for (--chip; chip >= 0; chip--) free_irq(irq + chip, NULL); return ret; } static void __exit gru_exit(void) { int i, bid; int order = get_order(sizeof(struct gru_state) * GRU_CHIPLETS_PER_BLADE); if (!is_uv_system()) return; for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) free_irq(IRQ_GRU + i, NULL); gru_kservices_exit(); for (bid = 0; bid < GRU_MAX_BLADES; bid++) free_pages((unsigned long)gru_base[bid], order); misc_deregister(&gru_miscdev); gru_proc_exit(); } static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, }; static struct miscdevice gru_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "gru", .fops = &gru_fops, }; const struct vm_operations_struct gru_vm_ops = { .close = gru_vma_close, .fault = gru_fault, }; #ifndef MODULE fs_initcall(gru_init); #else module_init(gru_init); #endif module_exit(gru_exit); module_param(gru_options, ulong, 0644); MODULE_PARM_DESC(gru_options, "Various debug options"); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); MODULE_VERSION(GRU_DRIVER_VERSION_STR);
gpl-2.0
jwpi/glibc
sysdeps/sparc/sparc32/soft-fp/q_sqrt.c
21
1246
/* Software floating-point emulation. Return sqrtl(a) Copyright (C) 1997-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include "soft-fp.h" #include "quad.h" long double _Q_sqrt(const long double a) { FP_DECL_EX; FP_DECL_Q(A); FP_DECL_Q(C); long double c; FP_INIT_ROUNDMODE; FP_UNPACK_Q(A, a); FP_SQRT_Q(C, A); FP_PACK_Q(c, C); FP_HANDLE_EXCEPTIONS; return c; } strong_alias (_Q_sqrt, __ieee754_sqrtl);
gpl-2.0
AndroidPrimou/android_kernel_htc_msm7x30
drivers/video/msm/mdp_vsync.c
21
11787
/* Copyright (c) 2008-2009, 2012-2013 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/vmalloc.h> #include <linux/clk.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <mach/gpio.h> #include "mdp.h" #include "msm_fb.h" #include "mddihost.h" #ifdef CONFIG_FB_MSM_MDP40 #include "mdp4.h" #define MDP_SYNC_CFG_0 0x100 #define MDP_SYNC_STATUS_0 0x10c #define MDP_SYNC_CFG_1 0x104 #define MDP_SYNC_STATUS_1 0x110 #define MDP_PRIM_VSYNC_OUT_CTRL 0x118 #define MDP_SEC_VSYNC_OUT_CTRL 0x11C #define MDP_VSYNC_SEL 0x124 #define MDP_PRIM_VSYNC_INIT_VAL 0x128 #define MDP_SEC_VSYNC_INIT_VAL 0x12C #else #define MDP_SYNC_CFG_0 0x300 #define MDP_SYNC_STATUS_0 0x30c #define MDP_PRIM_VSYNC_OUT_CTRL 0x318 #define MDP_PRIM_VSYNC_INIT_VAL 0x328 #endif extern mddi_lcd_type mddi_lcd_idx; extern spinlock_t mdp_spin_lock; extern struct workqueue_struct *mdp_vsync_wq; extern int lcdc_mode; extern int vsync_mode; #ifdef MDP_HW_VSYNC int vsync_above_th = 4; int vsync_start_th = 1; int vsync_load_cnt; int vsync_clk_status; DEFINE_MUTEX(vsync_clk_lock); static DEFINE_SPINLOCK(vsync_timer_lock); static struct clk *mdp_vsync_clk; static struct msm_fb_data_type *vsync_mfd; static unsigned char timer_shutdown_flag; static uint32 vsync_cnt_cfg; void vsync_clk_prepare_enable(void) { if (mdp_vsync_clk) clk_prepare_enable(mdp_vsync_clk); } void vsync_clk_disable_unprepare(void) { if (mdp_vsync_clk) clk_disable_unprepare(mdp_vsync_clk); } void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd) { if (vsync_clk_status == 1) return; mutex_lock(&vsync_clk_lock); if (mfd->use_mdp_vsync) { clk_prepare_enable(mdp_vsync_clk); vsync_clk_status = 1; } mutex_unlock(&vsync_clk_lock); } void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd) { if (vsync_clk_status == 0) return; mutex_lock(&vsync_clk_lock); if (mfd->use_mdp_vsync) { clk_disable_unprepare(mdp_vsync_clk); vsync_clk_status = 0; } mutex_unlock(&vsync_clk_lock); } static void mdp_set_vsync(unsigned long data); void mdp_vsync_clk_enable(void) { if (vsync_mfd) { mdp_hw_vsync_clk_enable(vsync_mfd); if (!vsync_mfd->vsync_resync_timer.function) mdp_set_vsync((unsigned long) vsync_mfd); } } void mdp_vsync_clk_disable(void) { if (vsync_mfd) { if (vsync_mfd->vsync_resync_timer.function) { spin_lock(&vsync_timer_lock); timer_shutdown_flag = 1; spin_unlock(&vsync_timer_lock); del_timer_sync(&vsync_mfd->vsync_resync_timer); spin_lock(&vsync_timer_lock); timer_shutdown_flag = 0; spin_unlock(&vsync_timer_lock); vsync_mfd->vsync_resync_timer.function = NULL; } mdp_hw_vsync_clk_disable(vsync_mfd); } } #endif static void mdp_set_vsync(unsigned long data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; struct msm_fb_panel_data *pdata = NULL; pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; vsync_mfd = mfd; init_timer(&mfd->vsync_resync_timer); if ((pdata) && (pdata->set_vsync_notifier == NULL)) return; if ((mfd->panel_info.lcd.vsync_enable) && (mfd->panel_power_on) && (!mfd->vsync_handler_pending)) { mfd->vsync_handler_pending = TRUE; if (!queue_work(mdp_vsync_wq, &mfd->vsync_resync_worker)) { MSM_FB_INFO ("mdp_set_vsync: can't queue_work! -> needs to increase vsync_resync_timer_duration\n"); } } else { MSM_FB_DEBUG ("mdp_set_vsync failed! EN:%d PWR:%d PENDING:%d\n", mfd->panel_info.lcd.vsync_enable, mfd->panel_power_on, mfd->vsync_handler_pending); } spin_lock(&vsync_timer_lock); if (!timer_shutdown_flag) { mfd->vsync_resync_timer.function = mdp_set_vsync; mfd->vsync_resync_timer.data = data; mfd->vsync_resync_timer.expires = jiffies + mfd->panel_info.lcd.vsync_notifier_period; add_timer(&mfd->vsync_resync_timer); } spin_unlock(&vsync_timer_lock); } static void mdp_vsync_handler(void *data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; if (vsync_clk_status == 0) { pr_debug("Warning: vsync clk is disabled\n"); mfd->vsync_handler_pending = FALSE; return; } if (mfd->use_mdp_vsync) { #ifdef MDP_HW_VSYNC if (mfd->panel_power_on) { MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_0, vsync_load_cnt); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_1, vsync_load_cnt); #endif } #endif } else { mfd->last_vsync_timetick = ktime_get_real(); } mfd->vsync_handler_pending = FALSE; } irqreturn_t mdp_hw_vsync_handler_proxy(int irq, void *data) { /* * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt * but getting inaccurate timing in mdp_vsync_handler() * disable_irq(MDP_HW_VSYNC_IRQ); */ mdp_vsync_handler(data); return IRQ_HANDLED; } #ifdef MDP_HW_VSYNC static void mdp_set_sync_cfg_0(struct msm_fb_data_type *mfd, int vsync_cnt) { unsigned long cfg; if (mfd->panel_info.lcd.total_lines) cfg = mfd->panel_info.lcd.total_lines; else cfg = mfd->total_lcd_lines - 1; cfg <<= MDP_SYNCFG_HGT_LOC; if (mfd->panel_info.lcd.hw_vsync_mode) cfg |= MDP_SYNCFG_VSYNC_EXT_EN; cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt); MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_0, cfg); } #ifdef CONFIG_FB_MSM_MDP40 static void mdp_set_sync_cfg_1(struct msm_fb_data_type *mfd, int vsync_cnt) { unsigned long cfg; if (mfd->panel_info.lcd.total_lines) cfg = mfd->panel_info.lcd.total_lines; else cfg = mfd->total_lcd_lines - 1; cfg <<= MDP_SYNCFG_HGT_LOC; if (mfd->panel_info.lcd.hw_vsync_mode) cfg |= MDP_SYNCFG_VSYNC_EXT_EN; cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt); MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_1, cfg); } #endif void mdp_vsync_cfg_regs(struct msm_fb_data_type *mfd, boolean first_time) { /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); if (first_time) mdp_hw_vsync_clk_enable(mfd); mdp_set_sync_cfg_0(mfd, vsync_cnt_cfg); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) mdp_set_sync_cfg_1(mfd, vsync_cnt_cfg); #endif /* * load the last line + 1 to be in the * safety zone */ vsync_load_cnt = mfd->panel_info.yres; /* line counter init value at the next pulse */ MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_INIT_VAL, vsync_load_cnt); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) { MDP_OUTP(MDP_BASE + MDP_SEC_VSYNC_INIT_VAL, vsync_load_cnt); } #endif /* * external vsync source pulse width and * polarity flip */ MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_OUT_CTRL, BIT(0)); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) { MDP_OUTP(MDP_BASE + MDP_SEC_VSYNC_OUT_CTRL, BIT(0)); MDP_OUTP(MDP_BASE + MDP_VSYNC_SEL, 0x20); } #endif /* threshold */ MDP_OUTP(MDP_BASE + 0x200, (vsync_above_th << 16) | (vsync_start_th)); if (first_time) mdp_hw_vsync_clk_disable(mfd); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } #endif void mdp_config_vsync(struct platform_device *pdev, struct msm_fb_data_type *mfd) { /* vsync on primary lcd only for now */ if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1) || (!vsync_mode)) { goto err_handle; } vsync_clk_status = 0; if (mfd->panel_info.lcd.vsync_enable) { mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch + mfd->panel_info.lcd.v_front_porch + mfd->panel_info.lcd.v_pulse_width; mfd->total_lcd_lines = mfd->panel_info.yres + mfd->total_porch_lines; mfd->lcd_ref_usec_time = 100000000 / mfd->panel_info.lcd.refx100; mfd->vsync_handler_pending = FALSE; mfd->last_vsync_timetick.tv64 = 0; #ifdef MDP_HW_VSYNC if (mdp_vsync_clk == NULL) mdp_vsync_clk = clk_get(&pdev->dev, "vsync_clk"); if (IS_ERR(mdp_vsync_clk)) { printk(KERN_ERR "error: can't get mdp_vsync_clk!\n"); mfd->use_mdp_vsync = 0; } else mfd->use_mdp_vsync = 1; if (mfd->use_mdp_vsync) { uint32 vsync_cnt_cfg_dem; uint32 mdp_vsync_clk_speed_hz; mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk); if (mdp_vsync_clk_speed_hz == 0) { mfd->use_mdp_vsync = 0; } else { /* * Do this calculation in 2 steps for * rounding uint32 properly. */ vsync_cnt_cfg_dem = (mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) / 100; vsync_cnt_cfg = (mdp_vsync_clk_speed_hz) / vsync_cnt_cfg_dem; mdp_vsync_cfg_regs(mfd, TRUE); } } #else mfd->use_mdp_vsync = 0; hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler; mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4); #endif mdp_hw_vsync_clk_enable(mfd); mdp_set_vsync((unsigned long)mfd); } return; err_handle: if (mfd->vsync_width_boundary) vfree(mfd->vsync_width_boundary); mfd->panel_info.lcd.vsync_enable = FALSE; printk(KERN_ERR "%s: failed!\n", __func__); } void mdp_vsync_resync_workqueue_handler(struct work_struct *work) { struct msm_fb_data_type *mfd = NULL; int vsync_fnc_enabled = FALSE; struct msm_fb_panel_data *pdata = NULL; mfd = container_of(work, struct msm_fb_data_type, vsync_resync_worker); if (mfd) { if (mfd->panel_power_on) { pdata = (struct msm_fb_panel_data *)mfd->pdev->dev. platform_data; if (pdata->set_vsync_notifier != NULL) { if (pdata->clk_func && !pdata->clk_func(2)) { mfd->vsync_handler_pending = FALSE; return; } pdata->set_vsync_notifier( mdp_vsync_handler, (void *)mfd); vsync_fnc_enabled = TRUE; } } } if ((mfd) && (!vsync_fnc_enabled)) mfd->vsync_handler_pending = FALSE; } boolean mdp_hw_vsync_set_handler(msm_fb_vsync_handler_type handler, void *data) { /* * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt * but getting inaccurate timing in mdp_vsync_handler() * enable_irq(MDP_HW_VSYNC_IRQ); */ return TRUE; } uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd) { uint32 elapsed_usec_time; uint32 lcd_line; ktime_t last_vsync_timetick_local; ktime_t curr_time; unsigned long flag; if ((!mfd->panel_info.lcd.vsync_enable) || (!vsync_mode)) return 0; spin_lock_irqsave(&mdp_spin_lock, flag); last_vsync_timetick_local = mfd->last_vsync_timetick; spin_unlock_irqrestore(&mdp_spin_lock, flag); curr_time = ktime_get_real(); elapsed_usec_time = ktime_to_us(ktime_sub(curr_time, last_vsync_timetick_local)); elapsed_usec_time = elapsed_usec_time % mfd->lcd_ref_usec_time; /* lcd line calculation referencing to line counter = 0 */ lcd_line = (elapsed_usec_time * mfd->total_lcd_lines) / mfd->lcd_ref_usec_time; /* lcd line adjusment referencing to the actual line counter at vsync */ lcd_line = (mfd->total_lcd_lines - mfd->panel_info.lcd.v_back_porch + lcd_line) % (mfd->total_lcd_lines + 1); if (lcd_line > mfd->total_lcd_lines) { MSM_FB_INFO ("mdp_get_lcd_line_counter: mdp_lcd_rd_cnt >= mfd->total_lcd_lines error!\n"); } return lcd_line; }
gpl-2.0
bgat/linux-multi-v7
arch/arm64/kernel/suspend.c
21
4796
#include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/slab.h> #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/debug-monitors.h> #include <asm/pgtable.h> #include <asm/memory.h> #include <asm/mmu_context.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); /* * This is called by __cpu_suspend_enter() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have * the necessary data available when the caches are not searched. * * ptr: CPU context virtual address * save_ptr: address of the location where the context physical address * must be saved */ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, phys_addr_t *save_ptr) { *save_ptr = virt_to_phys(ptr); cpu_do_suspend(ptr); /* * Only flush the context that must be retrieved with the MMU * off. VA primitives ensure the flush is applied to all * cache levels so context is pushed to DRAM. */ __flush_dcache_area(ptr, sizeof(*ptr)); __flush_dcache_area(save_ptr, sizeof(*save_ptr)); } /* * This hook is provided so that cpu_suspend code can restore HW * breakpoints as early as possible in the resume path, before reenabling * debug exceptions. Code cannot be run from a CPU PM notifier since by the * time the notifier runs debug exceptions might have been enabled already, * with HW breakpoints registers content still in an unknown state. */ static void (*hw_breakpoint_restore)(void *); void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) { /* Prevent multiple restore hook initializations */ if (WARN_ON(hw_breakpoint_restore)) return; hw_breakpoint_restore = hw_bp_restore; } /* * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; unsigned long flags; /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ local_dbg_save(flags); /* * Function graph tracer state gets incosistent when the kernel * calls functions that never return (aka suspend finishers) hence * disable graph tracing during their execution. */ pause_graph_tracing(); /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly * set-up on function return. */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* * We are resuming from reset with TTBR0_EL1 set to the * idmap to enable the MMU; set the TTBR0 to the reserved * page tables to prevent speculative TLB allocations, flush * the local tlb and set the default tcr_el1.t0sz so that * the TTBR0 address space set-up is properly restored. * If the current active_mm != &init_mm we entered cpu_suspend * with mappings in TTBR0 that must be restored, so we switch * them back to complete the address space configuration * restoration before returning. */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); if (mm != &init_mm) cpu_switch_mm(mm->pgd, mm); /* * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); /* * PSTATE was not saved over suspend/resume, re-enable any * detected features that might not have been set correctly. */ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ if (hw_breakpoint_restore) hw_breakpoint_restore(NULL); } unpause_graph_tracing(); /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ local_dbg_restore(flags); return ret; } struct sleep_save_sp sleep_save_sp; static int __init cpu_suspend_init(void) { void *ctx_ptr; /* ctx_ptr is an array of physical addresses */ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); if (WARN_ON(!ctx_ptr)) return -ENOMEM; sleep_save_sp.save_ptr_stash = ctx_ptr; sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); return 0; } early_initcall(cpu_suspend_init);
gpl-2.0