repo_name
string
path
string
copies
string
size
string
content
string
license
string
dcsommer/linux-kernel-cwnd-issues
arch/cris/arch-v32/drivers/pci/dma.c
12630
1156
/* * Dynamic DMA mapping support. * * On cris there is no hardware dynamic DMA address translation, * so consistent alloc/free are merely page allocation/freeing. * The rest of the dynamic DMA mapping interface is implemented * in asm/pci.h. * * Borrowed from i386. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <asm/io.h> void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; int order = get_order(size); /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) return ret; if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; ret = (void *)__get_free_pages(gfp, order); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; } void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { int order = get_order(size); if (!dma_release_from_coherent(dev, order, vaddr)) free_pages((unsigned long)vaddr, order); }
gpl-2.0
antmicro/enclustra_zynq_linux
drivers/gpu/drm/exynos/exynos_drm_g2d.c
87
23099
/* * Copyright (C) 2012 Samsung Electronics Co.Ltd * Authors: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundationr */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "drmP.h" #include "exynos_drm.h" #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" #define G2D_HW_MAJOR_VER 4 #define G2D_HW_MINOR_VER 1 /* vaild register range set from user: 0x0104 ~ 0x0880 */ #define G2D_VALID_START 0x0104 #define G2D_VALID_END 0x0880 /* general registers */ #define G2D_SOFT_RESET 0x0000 #define G2D_INTEN 0x0004 #define G2D_INTC_PEND 0x000C #define G2D_DMA_SFR_BASE_ADDR 0x0080 #define G2D_DMA_COMMAND 0x0084 #define G2D_DMA_STATUS 0x008C #define G2D_DMA_HOLD_CMD 0x0090 /* command registers */ #define G2D_BITBLT_START 0x0100 /* registers for base address */ #define G2D_SRC_BASE_ADDR 0x0304 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 #define G2D_DST_BASE_ADDR 0x0404 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 #define G2D_PAT_BASE_ADDR 0x0500 #define G2D_MSK_BASE_ADDR 0x0520 /* G2D_SOFT_RESET */ #define G2D_SFRCLEAR (1 << 1) #define G2D_R (1 << 0) /* G2D_INTEN */ #define G2D_INTEN_ACF (1 << 3) #define G2D_INTEN_UCF (1 << 2) #define G2D_INTEN_GCF (1 << 1) #define G2D_INTEN_SCF (1 << 0) /* G2D_INTC_PEND */ #define G2D_INTP_ACMD_FIN (1 << 3) #define G2D_INTP_UCMD_FIN (1 << 2) #define G2D_INTP_GCMD_FIN (1 << 1) #define G2D_INTP_SCMD_FIN (1 << 0) /* G2D_DMA_COMMAND */ #define G2D_DMA_HALT (1 << 2) #define G2D_DMA_CONTINUE (1 << 1) #define G2D_DMA_START (1 << 0) /* G2D_DMA_STATUS */ #define G2D_DMA_LIST_DONE_COUNT (0xFF << 17) #define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1) #define G2D_DMA_DONE (1 << 0) #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 /* G2D_DMA_HOLD_CMD */ #define G2D_USET_HOLD (1 << 2) #define G2D_LIST_HOLD (1 << 1) #define G2D_BITBLT_HOLD (1 << 0) /* G2D_BITBLT_START */ #define G2D_START_CASESEL (1 << 2) #define G2D_START_NHOLT (1 << 1) #define G2D_START_BITBLT (1 << 0) #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) #define G2D_CMDLIST_NUM 64 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) /* cmdlist data structure */ struct g2d_cmdlist { u32 head; u32 data[G2D_CMDLIST_DATA_NUM]; u32 last; /* last data offset */ }; struct drm_exynos_pending_g2d_event { struct drm_pending_event base; struct drm_exynos_g2d_event event; }; struct g2d_gem_node { struct list_head list; unsigned int handle; }; struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; unsigned int gem_nr; dma_addr_t dma_addr; struct drm_exynos_pending_g2d_event *event; }; struct g2d_runqueue_node { struct list_head list; struct list_head run_cmdlist; struct list_head event_list; struct completion complete; int async; }; struct g2d_data { struct device *dev; struct clk *gate_clk; struct resource *regs_res; void __iomem *regs; int irq; struct workqueue_struct *g2d_workq; struct work_struct runqueue_work; struct exynos_drm_subdrv subdrv; bool suspended; /* cmdlist */ struct g2d_cmdlist_node *cmdlist_node; struct list_head free_cmdlist; struct mutex cmdlist_mutex; dma_addr_t cmdlist_pool; void *cmdlist_pool_virt; /* runqueue*/ struct g2d_runqueue_node *runqueue_node; struct list_head runqueue; struct mutex runqueue_mutex; struct kmem_cache *runqueue_slab; }; static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; struct g2d_cmdlist_node *node = g2d->cmdlist_node; int nr; int ret; g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, &g2d->cmdlist_pool, GFP_KERNEL); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; } node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node), GFP_KERNEL); if (!node) { dev_err(dev, "failed to allocate memory\n"); ret = -ENOMEM; goto err; } for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { node[nr].cmdlist = g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; node[nr].dma_addr = g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; list_add_tail(&node[nr].list, &g2d->free_cmdlist); } return 0; err: dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool); return ret; } static void g2d_fini_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; kfree(g2d->cmdlist_node); dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool); } static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; struct g2d_cmdlist_node *node; mutex_lock(&g2d->cmdlist_mutex); if (list_empty(&g2d->free_cmdlist)) { dev_err(dev, "there is no free cmdlist\n"); mutex_unlock(&g2d->cmdlist_mutex); return NULL; } node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node, list); list_del_init(&node->list); mutex_unlock(&g2d->cmdlist_mutex); return node; } static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node) { mutex_lock(&g2d->cmdlist_mutex); list_move_tail(&node->list, &g2d->free_cmdlist); mutex_unlock(&g2d->cmdlist_mutex); } static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv, struct g2d_cmdlist_node *node) { struct g2d_cmdlist_node *lnode; if (list_empty(&g2d_priv->inuse_cmdlist)) goto add_to_list; /* this links to base address of new cmdlist */ lnode = list_entry(g2d_priv->inuse_cmdlist.prev, struct g2d_cmdlist_node, list); lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; add_to_list: list_add_tail(&node->list, &g2d_priv->inuse_cmdlist); if (node->event) list_add_tail(&node->event->base.link, &g2d_priv->event_list); } static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, struct drm_file *file, struct g2d_cmdlist_node *node) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct g2d_cmdlist *cmdlist = node->cmdlist; dma_addr_t *addr; int offset; int i; for (i = 0; i < node->gem_nr; i++) { struct g2d_gem_node *gem_node; gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL); if (!gem_node) { dev_err(g2d_priv->dev, "failed to allocate gem node\n"); return -ENOMEM; } offset = cmdlist->last - (i * 2 + 1); gem_node->handle = cmdlist->data[offset]; addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, file); if (IS_ERR(addr)) { node->gem_nr = i; kfree(gem_node); return PTR_ERR(addr); } cmdlist->data[offset] = *addr; list_add_tail(&gem_node->list, &g2d_priv->gem_list); g2d_priv->gem_nr++; } return 0; } static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, struct drm_file *file, unsigned int nr) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct g2d_gem_node *node, *n; list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { if (!nr) break; exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); list_del_init(&node->list); kfree(node); nr--; } } static void g2d_dma_start(struct g2d_data *g2d, struct g2d_runqueue_node *runqueue_node) { struct g2d_cmdlist_node *node = list_first_entry(&runqueue_node->run_cmdlist, struct g2d_cmdlist_node, list); pm_runtime_get_sync(g2d->dev); clk_enable(g2d->gate_clk); /* interrupt enable */ writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF, g2d->regs + G2D_INTEN); writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); } static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) { struct g2d_runqueue_node *runqueue_node; if (list_empty(&g2d->runqueue)) return NULL; runqueue_node = list_first_entry(&g2d->runqueue, struct g2d_runqueue_node, list); list_del_init(&runqueue_node->list); return runqueue_node; } static void g2d_free_runqueue_node(struct g2d_data *g2d, struct g2d_runqueue_node *runqueue_node) { if (!runqueue_node) return; mutex_lock(&g2d->cmdlist_mutex); list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); mutex_unlock(&g2d->cmdlist_mutex); kmem_cache_free(g2d->runqueue_slab, runqueue_node); } static void g2d_exec_runqueue(struct g2d_data *g2d) { g2d->runqueue_node = g2d_get_runqueue_node(g2d); if (g2d->runqueue_node) g2d_dma_start(g2d, g2d->runqueue_node); } static void g2d_runqueue_worker(struct work_struct *work) { struct g2d_data *g2d = container_of(work, struct g2d_data, runqueue_work); mutex_lock(&g2d->runqueue_mutex); clk_disable(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); complete(&g2d->runqueue_node->complete); if (g2d->runqueue_node->async) g2d_free_runqueue_node(g2d, g2d->runqueue_node); if (g2d->suspended) g2d->runqueue_node = NULL; else g2d_exec_runqueue(g2d); mutex_unlock(&g2d->runqueue_mutex); } static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) { struct drm_device *drm_dev = g2d->subdrv.drm_dev; struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; struct drm_exynos_pending_g2d_event *e; struct timeval now; unsigned long flags; if (list_empty(&runqueue_node->event_list)) return; e = list_first_entry(&runqueue_node->event_list, struct drm_exynos_pending_g2d_event, base.link); do_gettimeofday(&now); e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; e->event.cmdlist_no = cmdlist_no; spin_lock_irqsave(&drm_dev->event_lock, flags); list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); spin_unlock_irqrestore(&drm_dev->event_lock, flags); } static irqreturn_t g2d_irq_handler(int irq, void *dev_id) { struct g2d_data *g2d = dev_id; u32 pending; pending = readl_relaxed(g2d->regs + G2D_INTC_PEND); if (pending) writel_relaxed(pending, g2d->regs + G2D_INTC_PEND); if (pending & G2D_INTP_GCMD_FIN) { u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS); cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >> G2D_DMA_LIST_DONE_COUNT_OFFSET; g2d_finish_event(g2d, cmdlist_no); writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD); if (!(pending & G2D_INTP_ACMD_FIN)) { writel_relaxed(G2D_DMA_CONTINUE, g2d->regs + G2D_DMA_COMMAND); } } if (pending & G2D_INTP_ACMD_FIN) queue_work(g2d->g2d_workq, &g2d->runqueue_work); return IRQ_HANDLED; } static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, int nr, bool for_addr) { int reg_offset; int index; int i; for (i = 0; i < nr; i++) { index = cmdlist->last - 2 * (i + 1); reg_offset = cmdlist->data[index] & ~0xfffff000; if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) goto err; if (reg_offset % 4) goto err; switch (reg_offset) { case G2D_SRC_BASE_ADDR: case G2D_SRC_PLANE2_BASE_ADDR: case G2D_DST_BASE_ADDR: case G2D_DST_PLANE2_BASE_ADDR: case G2D_PAT_BASE_ADDR: case G2D_MSK_BASE_ADDR: if (!for_addr) goto err; break; default: if (for_addr) goto err; break; } } return 0; err: dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); return -EINVAL; } /* ioctl functions */ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_g2d_get_ver *ver = data; ver->major = G2D_HW_MAJOR_VER; ver->minor = G2D_HW_MINOR_VER; return 0; } EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct device *dev = g2d_priv->dev; struct g2d_data *g2d; struct drm_exynos_g2d_set_cmdlist *req = data; struct drm_exynos_g2d_cmd *cmd; struct drm_exynos_pending_g2d_event *e; struct g2d_cmdlist_node *node; struct g2d_cmdlist *cmdlist; unsigned long flags; int size; int ret; if (!dev) return -ENODEV; g2d = dev_get_drvdata(dev); if (!g2d) return -EFAULT; node = g2d_get_cmdlist(g2d); if (!node) return -ENOMEM; node->event = NULL; if (req->event_type != G2D_EVENT_NOT) { spin_lock_irqsave(&drm_dev->event_lock, flags); if (file->event_space < sizeof(e->event)) { spin_unlock_irqrestore(&drm_dev->event_lock, flags); ret = -ENOMEM; goto err; } file->event_space -= sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); e = kzalloc(sizeof(*node->event), GFP_KERNEL); if (!e) { dev_err(dev, "failed to allocate event\n"); spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); ret = -ENOMEM; goto err; } e->event.base.type = DRM_EXYNOS_G2D_EVENT; e->event.base.length = sizeof(e->event); e->event.user_data = req->user_data; e->base.event = &e->event.base; e->base.file_priv = file; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; node->event = e; } cmdlist = node->cmdlist; cmdlist->last = 0; /* * If don't clear SFR registers, the cmdlist is affected by register * values of previous cmdlist. G2D hw executes SFR clear command and * a next command at the same time then the next command is ignored and * is executed rightly from next next command, so needs a dummy command * to next command of SFR clear command. */ cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET; cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR; cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; cmdlist->data[cmdlist->last++] = 0; if (node->event) { cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; } /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; if (size > G2D_CMDLIST_DATA_NUM) { dev_err(dev, "cmdlist size is too big\n"); ret = -EINVAL; goto err_free_event; } cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; if (copy_from_user(cmdlist->data + cmdlist->last, (void __user *)cmd, sizeof(*cmd) * req->cmd_nr)) { ret = -EFAULT; goto err_free_event; } cmdlist->last += req->cmd_nr * 2; ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); if (ret < 0) goto err_free_event; node->gem_nr = req->cmd_gem_nr; if (req->cmd_gem_nr) { struct drm_exynos_g2d_cmd *cmd_gem; cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; if (copy_from_user(cmdlist->data + cmdlist->last, (void __user *)cmd_gem, sizeof(*cmd_gem) * req->cmd_gem_nr)) { ret = -EFAULT; goto err_free_event; } cmdlist->last += req->cmd_gem_nr * 2; ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); if (ret < 0) goto err_free_event; ret = g2d_get_cmdlist_gem(drm_dev, file, node); if (ret < 0) goto err_unmap; } cmdlist->data[cmdlist->last++] = G2D_BITBLT_START; cmdlist->data[cmdlist->last++] = G2D_START_BITBLT; /* head */ cmdlist->head = cmdlist->last / 2; /* tail */ cmdlist->data[cmdlist->last] = 0; g2d_add_cmdlist_to_inuse(g2d_priv, node); return 0; err_unmap: g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); err_free_event: if (node->event) { spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); kfree(node->event); } err: g2d_put_cmdlist(g2d, node); return ret; } EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct device *dev = g2d_priv->dev; struct g2d_data *g2d; struct drm_exynos_g2d_exec *req = data; struct g2d_runqueue_node *runqueue_node; struct list_head *run_cmdlist; struct list_head *event_list; if (!dev) return -ENODEV; g2d = dev_get_drvdata(dev); if (!g2d) return -EFAULT; runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); if (!runqueue_node) { dev_err(dev, "failed to allocate memory\n"); return -ENOMEM; } run_cmdlist = &runqueue_node->run_cmdlist; event_list = &runqueue_node->event_list; INIT_LIST_HEAD(run_cmdlist); INIT_LIST_HEAD(event_list); init_completion(&runqueue_node->complete); runqueue_node->async = req->async; list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist); list_splice_init(&g2d_priv->event_list, event_list); if (list_empty(run_cmdlist)) { dev_err(dev, "there is no inuse cmdlist\n"); kmem_cache_free(g2d->runqueue_slab, runqueue_node); return -EPERM; } mutex_lock(&g2d->runqueue_mutex); list_add_tail(&runqueue_node->list, &g2d->runqueue); if (!g2d->runqueue_node) g2d_exec_runqueue(g2d); mutex_unlock(&g2d->runqueue_mutex); if (runqueue_node->async) goto out; wait_for_completion(&runqueue_node->complete); g2d_free_runqueue_node(g2d, runqueue_node); out: return 0; } EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_open(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv; g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); if (!g2d_priv) { dev_err(dev, "failed to allocate g2d private data\n"); return -ENOMEM; } g2d_priv->dev = dev; file_priv->g2d_priv = g2d_priv; INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); INIT_LIST_HEAD(&g2d_priv->event_list); INIT_LIST_HEAD(&g2d_priv->gem_list); return 0; } static void g2d_close(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct g2d_data *g2d; struct g2d_cmdlist_node *node, *n; if (!dev) return; g2d = dev_get_drvdata(dev); if (!g2d) return; mutex_lock(&g2d->cmdlist_mutex); list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) list_move_tail(&node->list, &g2d->free_cmdlist); mutex_unlock(&g2d->cmdlist_mutex); g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); kfree(file_priv->g2d_priv); } static int __devinit g2d_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct g2d_data *g2d; struct exynos_drm_subdrv *subdrv; int ret; g2d = kzalloc(sizeof(*g2d), GFP_KERNEL); if (!g2d) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; } g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", sizeof(struct g2d_runqueue_node), 0, 0, NULL); if (!g2d->runqueue_slab) { ret = -ENOMEM; goto err_free_mem; } g2d->dev = dev; g2d->g2d_workq = create_singlethread_workqueue("g2d"); if (!g2d->g2d_workq) { dev_err(dev, "failed to create workqueue\n"); ret = -EINVAL; goto err_destroy_slab; } INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker); INIT_LIST_HEAD(&g2d->free_cmdlist); INIT_LIST_HEAD(&g2d->runqueue); mutex_init(&g2d->cmdlist_mutex); mutex_init(&g2d->runqueue_mutex); ret = g2d_init_cmdlist(g2d); if (ret < 0) goto err_destroy_workqueue; g2d->gate_clk = clk_get(dev, "fimg2d"); if (IS_ERR(g2d->gate_clk)) { dev_err(dev, "failed to get gate clock\n"); ret = PTR_ERR(g2d->gate_clk); goto err_fini_cmdlist; } pm_runtime_enable(dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "failed to get I/O memory\n"); ret = -ENOENT; goto err_put_clk; } g2d->regs_res = request_mem_region(res->start, resource_size(res), dev_name(dev)); if (!g2d->regs_res) { dev_err(dev, "failed to request I/O memory\n"); ret = -ENOENT; goto err_put_clk; } g2d->regs = ioremap(res->start, resource_size(res)); if (!g2d->regs) { dev_err(dev, "failed to remap I/O memory\n"); ret = -ENXIO; goto err_release_res; } g2d->irq = platform_get_irq(pdev, 0); if (g2d->irq < 0) { dev_err(dev, "failed to get irq\n"); ret = g2d->irq; goto err_unmap_base; } ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_unmap_base; } platform_set_drvdata(pdev, g2d); subdrv = &g2d->subdrv; subdrv->dev = dev; subdrv->open = g2d_open; subdrv->close = g2d_close; ret = exynos_drm_subdrv_register(subdrv); if (ret < 0) { dev_err(dev, "failed to register drm g2d device\n"); goto err_free_irq; } dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); return 0; err_free_irq: free_irq(g2d->irq, g2d); err_unmap_base: iounmap(g2d->regs); err_release_res: release_resource(g2d->regs_res); kfree(g2d->regs_res); err_put_clk: pm_runtime_disable(dev); clk_put(g2d->gate_clk); err_fini_cmdlist: g2d_fini_cmdlist(g2d); err_destroy_workqueue: destroy_workqueue(g2d->g2d_workq); err_destroy_slab: kmem_cache_destroy(g2d->runqueue_slab); err_free_mem: kfree(g2d); return ret; } static int __devexit g2d_remove(struct platform_device *pdev) { struct g2d_data *g2d = platform_get_drvdata(pdev); cancel_work_sync(&g2d->runqueue_work); exynos_drm_subdrv_unregister(&g2d->subdrv); free_irq(g2d->irq, g2d); while (g2d->runqueue_node) { g2d_free_runqueue_node(g2d, g2d->runqueue_node); g2d->runqueue_node = g2d_get_runqueue_node(g2d); } iounmap(g2d->regs); release_resource(g2d->regs_res); kfree(g2d->regs_res); pm_runtime_disable(&pdev->dev); clk_put(g2d->gate_clk); g2d_fini_cmdlist(g2d); destroy_workqueue(g2d->g2d_workq); kmem_cache_destroy(g2d->runqueue_slab); kfree(g2d); return 0; } #ifdef CONFIG_PM_SLEEP static int g2d_suspend(struct device *dev) { struct g2d_data *g2d = dev_get_drvdata(dev); mutex_lock(&g2d->runqueue_mutex); g2d->suspended = true; mutex_unlock(&g2d->runqueue_mutex); while (g2d->runqueue_node) /* FIXME: good range? */ usleep_range(500, 1000); flush_work_sync(&g2d->runqueue_work); return 0; } static int g2d_resume(struct device *dev) { struct g2d_data *g2d = dev_get_drvdata(dev); g2d->suspended = false; g2d_exec_runqueue(g2d); return 0; } #endif SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); struct platform_driver g2d_driver = { .probe = g2d_probe, .remove = __devexit_p(g2d_remove), .driver = { .name = "s5p-g2d", .owner = THIS_MODULE, .pm = &g2d_pm_ops, }, };
gpl-2.0
clemsyn/asusOC
drivers/net/sunlance.c
87
41650
/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ * lance.c: Linux/Sparc/Lance driver * * Written 1995, 1996 by Miguel de Icaza * Sources: * The Linux depca driver * The Linux lance driver. * The Linux skeleton driver. * The NetBSD Sparc/Lance driver. * Theo de Raadt (deraadt@openbsd.org) * NCR92C990 Lan Controller manual * * 1.4: * Added support to run with a ledma on the Sun4m * * 1.5: * Added multiple card detection. * * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost * (ecd@skynet.be) * * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost * (ecd@skynet.be) * * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller * (davem@caip.rutgers.edu) * * 5/29/96: override option 'tpe-link-test?', if it is 'false', as * this disables auto carrier detection on sun4m. Eddie C. Dost * (ecd@skynet.be) * * 1.7: * 6/26/96: Bug fix for multiple ledmas, miguel. * * 1.8: * Stole multicast code from depca.c, fixed lance_tx. * * 1.9: * 8/21/96: Fixed the multicast code (Pedro Roque) * * 8/28/96: Send fake packet in lance_open() if auto_select is true, * so we can detect the carrier loss condition in time. * Eddie C. Dost (ecd@skynet.be) * * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an * MNA trap during chksum_partial_copy(). (ecd@skynet.be) * * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be) * * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. * This was the sun4c killer. Shit, stupid bug. * (ecd@skynet.be) * * 1.10: * 1/26/97: Modularize driver. (ecd@skynet.be) * * 1.11: * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz) * * 1.12: * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. * Anton Blanchard (anton@progsoc.uts.edu.au) * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. * David S. Miller (davem@redhat.com) * 2.01: * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com) * */ #undef DEBUG_DRIVER static char lancestr[] = "LANCE"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/socket.h> /* Used for the temporal inet entries and routing */ #include <linux/route.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> #include <asm/byteorder.h> /* Used by the checksum routines */ #include <asm/idprom.h> #include <asm/prom.h> #include <asm/auxio.h> /* For tpe-link-test? setting */ #include <asm/irq.h> #define DRV_NAME "sunlance" #define DRV_VERSION "2.02" #define DRV_RELDATE "8/24/03" #define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun Lance ethernet driver"); MODULE_LICENSE("GPL"); /* Define: 2^4 Tx buffers and 2^4 Rx buffers */ #ifndef LANCE_LOG_TX_BUFFERS #define LANCE_LOG_TX_BUFFERS 4 #define LANCE_LOG_RX_BUFFERS 4 #endif #define LE_CSR0 0 #define LE_CSR1 1 #define LE_CSR2 2 #define LE_CSR3 3 #define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ #define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ #define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ #define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ #define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ #define LE_C0_MERR 0x0800 /* ME: Memory error */ #define LE_C0_RINT 0x0400 /* Received interrupt */ #define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ #define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ #define LE_C0_INTR 0x0080 /* Interrupt or error */ #define LE_C0_INEA 0x0040 /* Interrupt enable */ #define LE_C0_RXON 0x0020 /* Receiver on */ #define LE_C0_TXON 0x0010 /* Transmitter on */ #define LE_C0_TDMD 0x0008 /* Transmitter demand */ #define LE_C0_STOP 0x0004 /* Stop the card */ #define LE_C0_STRT 0x0002 /* Start the card */ #define LE_C0_INIT 0x0001 /* Init the card */ #define LE_C3_BSWP 0x4 /* SWAP */ #define LE_C3_ACON 0x2 /* ALE Control */ #define LE_C3_BCON 0x1 /* Byte control */ /* Receive message descriptor 1 */ #define LE_R1_OWN 0x80 /* Who owns the entry */ #define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ #define LE_R1_FRA 0x20 /* FRA: Frame error */ #define LE_R1_OFL 0x10 /* OFL: Frame overflow */ #define LE_R1_CRC 0x08 /* CRC error */ #define LE_R1_BUF 0x04 /* BUF: Buffer error */ #define LE_R1_SOP 0x02 /* Start of packet */ #define LE_R1_EOP 0x01 /* End of packet */ #define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T1_OWN 0x80 /* Lance owns the packet */ #define LE_T1_ERR 0x40 /* Error summary */ #define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ #define LE_T1_EONE 0x08 /* Error: one retry needed */ #define LE_T1_EDEF 0x04 /* Error: deferred */ #define LE_T1_SOP 0x02 /* Start of packet */ #define LE_T1_EOP 0x01 /* End of packet */ #define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T3_BUF 0x8000 /* Buffer error */ #define LE_T3_UFL 0x4000 /* Error underflow */ #define LE_T3_LCOL 0x1000 /* Error late collision */ #define LE_T3_CLOS 0x0800 /* Error carrier loss */ #define LE_T3_RTY 0x0400 /* Error retry */ #define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) #define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) #define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) #define PKT_BUF_SZ 1544 #define RX_BUFF_SIZE PKT_BUF_SZ #define TX_BUFF_SIZE PKT_BUF_SZ struct lance_rx_desc { u16 rmd0; /* low address of packet */ u8 rmd1_bits; /* descriptor bits */ u8 rmd1_hadr; /* high address of packet */ s16 length; /* This length is 2s complement (negative)! * Buffer length */ u16 mblength; /* This is the actual number of bytes received */ }; struct lance_tx_desc { u16 tmd0; /* low address of packet */ u8 tmd1_bits; /* descriptor bits */ u8 tmd1_hadr; /* high address of packet */ s16 length; /* Length is 2s complement (negative)! */ u16 misc; }; /* The LANCE initialization block, described in databook. */ /* On the Sparc, this block should be on a DMA region */ struct lance_init_block { u16 mode; /* Pre-set mode (reg. 15) */ u8 phys_addr[6]; /* Physical ethernet address */ u32 filter[2]; /* Multicast filter. */ /* Receive and transmit ring base, along with extra bits. */ u16 rx_ptr; /* receive descriptor addr */ u16 rx_len; /* receive len and high addr */ u16 tx_ptr; /* transmit descriptor addr */ u16 tx_len; /* transmit len and high addr */ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ struct lance_rx_desc brx_ring[RX_RING_SIZE]; struct lance_tx_desc btx_ring[TX_RING_SIZE]; u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; u8 pad[2]; /* align rx_buf for copy_and_sum(). */ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; }; #define libdesc_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) #define libbuff_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) struct lance_private { void __iomem *lregs; /* Lance RAP/RDP regs. */ void __iomem *dregs; /* DMA controller regs. */ struct lance_init_block __iomem *init_block_iomem; struct lance_init_block *init_block_mem; spinlock_t lock; int rx_new, tx_new; int rx_old, tx_old; struct platform_device *ledma; /* If set this points to ledma */ char tpe; /* cable-selection is TPE */ char auto_select; /* cable-selection by carrier */ char burst_sizes; /* ledma SBus burst sizes */ char pio_buffer; /* init block in PIO space? */ unsigned short busmaster_regval; void (*init_ring)(struct net_device *); void (*rx)(struct net_device *); void (*tx)(struct net_device *); char *name; dma_addr_t init_block_dvma; struct net_device *dev; /* Backpointer */ struct platform_device *op; struct platform_device *lebuffer; struct timer_list multicast_timer; }; #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ lp->tx_old - lp->tx_new-1) /* Lance registers. */ #define RDP 0x00UL /* register data port */ #define RAP 0x02UL /* register address port */ #define LANCE_REG_SIZE 0x04UL #define STOP_LANCE(__lp) \ do { void __iomem *__base = (__lp)->lregs; \ sbus_writew(LE_CSR0, __base + RAP); \ sbus_writew(LE_C0_STOP, __base + RDP); \ } while (0) int sparc_lance_debug = 2; /* The Lance uses 24 bit addresses */ /* On the Sun4c the DVMA will provide the remaining bytes for us */ /* On the Sun4m we have to instruct the ledma to provide them */ /* Even worse, on scsi/ether SBUS cards, the init block and the * transmit/receive buffers are addresses as offsets from absolute * zero on the lebuffer PIO area. -DaveM */ #define LANCE_ADDR(x) ((long)(x) & ~0xff000000) /* Load the CSR registers */ static void load_csrs(struct lance_private *lp) { u32 leptr; if (lp->pio_buffer) leptr = 0; else leptr = LANCE_ADDR(lp->init_block_dvma); sbus_writew(LE_CSR1, lp->lregs + RAP); sbus_writew(leptr & 0xffff, lp->lregs + RDP); sbus_writew(LE_CSR2, lp->lregs + RAP); sbus_writew(leptr >> 16, lp->lregs + RDP); sbus_writew(LE_CSR3, lp->lregs + RAP); sbus_writew(lp->busmaster_regval, lp->lregs + RDP); /* Point back to csr0 */ sbus_writew(LE_CSR0, lp->lregs + RAP); } /* Setup the Lance Rx and Tx rings */ static void lance_init_ring_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; dma_addr_t aib = lp->init_block_dvma; __u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); ib->rx_ptr = leptr; /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); ib->tx_ptr = leptr; } static void lance_init_ring_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = libbuff_offset(tx_buf, i); sbus_writew(leptr, &ib->btx_ring [i].tmd0); sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); /* The ones required by tmd2 */ sbus_writew(0xf000, &ib->btx_ring [i].length); sbus_writew(0, &ib->btx_ring [i].misc); } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = libbuff_offset(rx_buf, i); sbus_writew(leptr, &ib->brx_ring [i].rmd0); sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); sbus_writew(-RX_BUFF_SIZE|0xf000, &ib->brx_ring [i].length); sbus_writew(0, &ib->brx_ring [i].mblength); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = libdesc_offset(brx_ring, 0); sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), &ib->rx_len); sbus_writew(leptr, &ib->rx_ptr); /* Setup tx descriptor pointer */ leptr = libdesc_offset(btx_ring, 0); sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), &ib->tx_len); sbus_writew(leptr, &ib->tx_ptr); } static void init_restart_ledma(struct lance_private *lp) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); if (!(csr & DMA_HNDL_ERROR)) { /* E-Cache draining */ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) barrier(); } csr = sbus_readl(lp->dregs + DMA_CSR); csr &= ~DMA_E_BURSTS; if (lp->burst_sizes & DMA_BURST32) csr |= DMA_E_BURST32; else csr |= DMA_E_BURST16; csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); if (lp->tpe) csr |= DMA_EN_ENETAUI; else csr &= ~DMA_EN_ENETAUI; udelay(20); sbus_writel(csr, lp->dregs + DMA_CSR); udelay(200); } static int init_restart_lance(struct lance_private *lp) { u16 regval = 0; int i; if (lp->dregs) init_restart_ledma(lp); sbus_writew(LE_CSR0, lp->lregs + RAP); sbus_writew(LE_C0_INIT, lp->lregs + RDP); /* Wait for the lance to complete initialization */ for (i = 0; i < 100; i++) { regval = sbus_readw(lp->lregs + RDP); if (regval & (LE_C0_ERR | LE_C0_IDON)) break; barrier(); } if (i == 100 || (regval & LE_C0_ERR)) { printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", i, regval); if (lp->dregs) printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ sbus_writew(LE_C0_IDON, lp->lregs + RDP); sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); if (lp->dregs) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); csr |= DMA_INT_ENAB; sbus_writel(csr, lp->dregs + DMA_CSR); } return 0; } static void lance_rx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; struct lance_rx_desc *rd; u8 bits; int len, entry = lp->rx_new; struct sk_buff *skb; for (rd = &ib->brx_ring [entry]; !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb(len + 2); if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc *td = &ib->btx_ring [i]; u8 bits = td->tmd1_bits; /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits = bits & ~(LE_T1_POK); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; out: if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); spin_unlock(&lp->lock); } static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) { u16 *p16 = (u16 *) skb->data; u32 *p32; u8 *p8; void __iomem *pbuf = piobuf; /* We know here that both src and dest are on a 16bit boundary. */ *p16++ = sbus_readw(pbuf); p32 = (u32 *) p16; pbuf += 2; len -= 2; while (len >= 4) { *p32++ = sbus_readl(pbuf); pbuf += 4; len -= 4; } p8 = (u8 *) p32; if (len >= 2) { p16 = (u16 *) p32; *p16++ = sbus_readw(pbuf); pbuf += 2; len -= 2; p8 = (u8 *) p16; } if (len >= 1) *p8 = sbus_readb(pbuf); } static void lance_rx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; struct lance_rx_desc __iomem *rd; unsigned char bits; int len, entry; struct sk_buff *skb; entry = lp->rx_new; for (rd = &ib->brx_ring [entry]; !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (sbus_readw(&rd->mblength) & 0xfff) - 4; skb = dev_alloc_skb(len + 2); if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve (skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; u8 bits = sbus_readb(&td->tmd1_bits); /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = sbus_readw(&td->misc); dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); out: spin_unlock(&lp->lock); } static irqreturn_t lance_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; sbus_writew(LE_CSR0, lp->lregs + RAP); csr0 = sbus_readw(lp->lregs + RDP); /* Acknowledge all the interrupt sources ASAP */ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), lp->lregs + RDP); if ((csr0 & LE_C0_ERR) != 0) { /* Clear the error condition */ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_CERR | LE_C0_MERR), lp->lregs + RDP); } if (csr0 & LE_C0_RINT) lp->rx(dev); if (csr0 & LE_C0_TINT) lp->tx(dev); if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { if (lp->dregs) { u32 addr = sbus_readl(lp->dregs + DMA_ADDR); printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", dev->name, csr0, addr & 0xffffff); } else { printk(KERN_ERR "%s: Memory error, status %04x\n", dev->name, csr0); } sbus_writew(LE_C0_STOP, lp->lregs + RDP); if (lp->dregs) { u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); dma_csr |= DMA_FIFO_INV; sbus_writel(dma_csr, lp->dregs + DMA_CSR); } lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } sbus_writew(LE_C0_INEA, lp->lregs + RDP); return IRQ_HANDLED; } /* Build a fake network packet and send it to ourselves. */ static void build_fake_packet(struct lance_private *lp) { struct net_device *dev = lp->dev; int i, entry; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) sbus_writew(0, &packet[i]); for (i = 0; i < 6; i++) { sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]); sbus_writeb(dev->dev_addr[i], &eth->h_source[i]); } sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); struct ethhdr *eth = (struct ethhdr *) packet; memset(packet, 0, ETH_ZLEN); for (i = 0; i < 6; i++) { eth->h_dest[i] = dev->dev_addr[i]; eth->h_source[i] = dev->dev_addr[i]; } ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; ib->btx_ring[entry].misc = 0; ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); } static int lance_open(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status = 0; STOP_LANCE(lp); if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, lancestr, (void *) dev)) { printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); return -EAGAIN; } /* On the 4m, setup the ledma to provide the upper bits for buffers */ if (lp->dregs) { u32 regval = lp->init_block_dvma & 0xff000000; sbus_writel(regval, lp->dregs + DMA_TEST); } /* Set mode and clear multicast filter only at device open, * so that lance_init_ring() called at any error will not * forget multicast filters. * * BTW it is common bug in all lance drivers! --ANK */ if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew(0, &ib->mode); sbus_writel(0, &ib->filter[0]); sbus_writel(0, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->mode = 0; ib->filter [0] = 0; ib->filter [1] = 0; } lp->init_ring(dev); load_csrs(lp); netif_start_queue(dev); status = init_restart_lance(lp); if (!status && lp->auto_select) { build_fake_packet(lp); sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); } return status; } static int lance_close(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue(dev); del_timer_sync(&lp->multicast_timer); STOP_LANCE(lp); free_irq(dev->irq, (void *) dev); return 0; } static int lance_reset(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; STOP_LANCE(lp); /* On the 4m, reset the dma too */ if (lp->dregs) { u32 csr, addr; printk(KERN_ERR "resetting ledma\n"); csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); addr = lp->init_block_dvma & 0xff000000; sbus_writel(addr, lp->dregs + DMA_TEST); } lp->init_ring(dev); load_csrs(lp); dev->trans_start = jiffies; /* prevent tx timeout */ status = init_restart_lance(lp); return status; } static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) { void __iomem *piobuf = dest; u32 *p32; u16 *p16; u8 *p8; switch ((unsigned long)src & 0x3) { case 0: p32 = (u32 *) src; while (len >= 4) { sbus_writel(*p32, piobuf); p32++; piobuf += 4; len -= 4; } src = (char *) p32; break; case 1: case 3: p8 = (u8 *) src; while (len >= 4) { u32 val; val = p8[0] << 24; val |= p8[1] << 16; val |= p8[2] << 8; val |= p8[3]; sbus_writel(val, piobuf); p8 += 4; piobuf += 4; len -= 4; } src = (char *) p8; break; case 2: p16 = (u16 *) src; while (len >= 4) { u32 val = p16[0]<<16 | p16[1]; sbus_writel(val, piobuf); p16 += 2; piobuf += 4; len -= 4; } src = (char *) p16; break; } if (len >= 2) { u16 val = src[0] << 8 | src[1]; sbus_writew(val, piobuf); src += 2; piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(src[0], piobuf); } static void lance_piozero(void __iomem *dest, int len) { void __iomem *piobuf = dest; if ((unsigned long)piobuf & 1) { sbus_writeb(0, piobuf); piobuf += 1; len -= 1; if (len == 0) return; } if (len == 1) { sbus_writeb(0, piobuf); return; } if ((unsigned long)piobuf & 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; if (len == 0) return; } while (len >= 4) { sbus_writel(0, piobuf); piobuf += 4; len -= 4; } if (len >= 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(0, piobuf); } static void lance_tx_timeout(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", dev->name, sbus_readw(lp->lregs + RDP)); lance_reset(dev); netif_wake_queue(dev); } static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; skblen = skb->len; len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; spin_lock_irq(&lp->lock); dev->stats.tx_bytes += len; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); if (len != skblen) lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); if (len != skblen) memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); /* Kick the lance: transmit now */ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); /* Read back CSR to invalidate the E-Cache. * This is needed, because DMA_DSBL_WR_INV is set. */ if (lp->dregs) sbus_readw(lp->lregs + RDP); spin_unlock_irq(&lp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* taken from the depca driver */ static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct netdev_hw_addr *ha; char *addrs; u32 crc; u32 val; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) val = ~0; else val = 0; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writel(val, &ib->filter[0]); sbus_writel(val, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->filter [0] = val; ib->filter [1] = val; } if (dev->flags & IFF_ALLMULTI) return; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc = crc >> 26; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; u16 tmp = sbus_readw(&mcast_table[crc>>4]); tmp |= 1 << (crc & 0xf); sbus_writew(tmp, &mcast_table[crc>>4]); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *mcast_table = (u16 *) &ib->filter; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } } static void lance_set_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib_mem = lp->init_block_mem; struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; u16 mode; if (!netif_running(dev)) return; if (lp->tx_old != lp->tx_new) { mod_timer(&lp->multicast_timer, jiffies + 4); netif_wake_queue(dev); return; } netif_stop_queue(dev); STOP_LANCE(lp); lp->init_ring(dev); if (lp->pio_buffer) mode = sbus_readw(&ib_iomem->mode); else mode = ib_mem->mode; if (dev->flags & IFF_PROMISC) { mode |= LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; } else { mode &= ~LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; lance_load_multicast(dev); } load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } static void lance_set_multicast_retry(unsigned long _opaque) { struct net_device *dev = (struct net_device *) _opaque; lance_set_multicast(dev); } static void lance_free_hwresources(struct lance_private *lp) { if (lp->lregs) of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); if (lp->dregs) { struct platform_device *ledma = lp->ledma; of_iounmap(&ledma->resource[0], lp->dregs, resource_size(&ledma->resource[0])); } if (lp->init_block_iomem) { of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, sizeof(struct lance_init_block)); } else if (lp->init_block_mem) { dma_free_coherent(&lp->op->dev, sizeof(struct lance_init_block), lp->init_block_mem, lp->init_block_dvma); } } /* Ethtool support... */ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "sunlance"); strcpy(info->version, "2.02"); } static u32 sparc_lance_get_link(struct net_device *dev) { /* We really do not keep track of this, but this * is better than not reporting anything at all. */ return 1; } static const struct ethtool_ops sparc_lance_ethtool_ops = { .get_drvinfo = sparc_lance_get_drvinfo, .get_link = sparc_lance_get_link, }; static const struct net_device_ops sparc_lance_ops = { .ndo_open = lance_open, .ndo_stop = lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_set_multicast_list = lance_set_multicast, .ndo_tx_timeout = lance_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit sparc_lance_probe_one(struct platform_device *op, struct platform_device *ledma, struct platform_device *lebuffer) { struct device_node *dp = op->dev.of_node; static unsigned version_printed; struct lance_private *lp; struct net_device *dev; int i; dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) return -ENOMEM; lp = netdev_priv(dev); if (sparc_lance_debug && version_printed++ == 0) printk (KERN_INFO "%s", version); spin_lock_init(&lp->lock); /* Copy the IDPROM ethernet address to the device structure, later we * will copy the address in the device structure to the lance * initialization block. */ for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Get the IO region */ lp->lregs = of_ioremap(&op->resource[0], 0, LANCE_REG_SIZE, lancestr); if (!lp->lregs) { printk(KERN_ERR "SunLance: Cannot map registers.\n"); goto fail; } lp->ledma = ledma; if (lp->ledma) { lp->dregs = of_ioremap(&ledma->resource[0], 0, resource_size(&ledma->resource[0]), "ledma"); if (!lp->dregs) { printk(KERN_ERR "SunLance: Cannot map " "ledma registers.\n"); goto fail; } } lp->op = op; lp->lebuffer = lebuffer; if (lebuffer) { /* sanity check */ if (lebuffer->resource[0].start & 7) { printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); goto fail; } lp->init_block_iomem = of_ioremap(&lebuffer->resource[0], 0, sizeof(struct lance_init_block), "lebuffer"); if (!lp->init_block_iomem) { printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); goto fail; } lp->init_block_dvma = 0; lp->pio_buffer = 1; lp->init_ring = lance_init_ring_pio; lp->rx = lance_rx_pio; lp->tx = lance_tx_pio; } else { lp->init_block_mem = dma_alloc_coherent(&op->dev, sizeof(struct lance_init_block), &lp->init_block_dvma, GFP_ATOMIC); if (!lp->init_block_mem) { printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); goto fail; } lp->pio_buffer = 0; lp->init_ring = lance_init_ring_dvma; lp->rx = lance_rx_dvma; lp->tx = lance_tx_dvma; } lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", (LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON)); lp->name = lancestr; lp->burst_sizes = 0; if (lp->ledma) { struct device_node *ledma_dp = ledma->dev.of_node; struct device_node *sbus_dp; unsigned int sbmask; const char *prop; u32 csr; /* Find burst-size property for ledma */ lp->burst_sizes = of_getintprop_default(ledma_dp, "burst-sizes", 0); /* ledma may be capable of fast bursts, but sbus may not. */ sbus_dp = ledma_dp->parent; sbmask = of_getintprop_default(sbus_dp, "burst-sizes", DMA_BURSTBITS); lp->burst_sizes &= sbmask; /* Get the cable-selection property */ prop = of_get_property(ledma_dp, "cable-selection", NULL); if (!prop || prop[0] == '\0') { struct device_node *nd; printk(KERN_INFO "SunLance: using " "auto-carrier-detection.\n"); nd = of_find_node_by_path("/options"); if (!nd) goto no_link_test; prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) goto no_link_test; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " "'tpe-link-test?'\n"); printk(KERN_NOTICE "SunLance: warning: mail any problems " "to ecd@skynet.be\n"); auxio_set_lte(AUXIO_LTE_ON); } no_link_test: lp->auto_select = 1; lp->tpe = 0; } else if (!strcmp(prop, "aui")) { lp->auto_select = 0; lp->tpe = 0; } else { lp->auto_select = 0; lp->tpe = 1; } /* Reset ledma */ csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); } else lp->dregs = NULL; lp->dev = dev; SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &sparc_lance_ethtool_ops; dev->netdev_ops = &sparc_lance_ops; dev->irq = op->archdata.irqs[0]; /* We cannot sleep if the chip is busy during a * multicast list update event, because such events * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ init_timer(&lp->multicast_timer); lp->multicast_timer.data = (unsigned long) dev; lp->multicast_timer.function = &lance_set_multicast_retry; if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); goto fail; } dev_set_drvdata(&op->dev, lp); printk(KERN_INFO "%s: LANCE %pM\n", dev->name, dev->dev_addr); return 0; fail: lance_free_hwresources(lp); free_netdev(dev); return -ENODEV; } static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match) { struct platform_device *parent = to_platform_device(op->dev.parent); struct device_node *parent_dp = parent->dev.of_node; int err; if (!strcmp(parent_dp->name, "ledma")) { err = sparc_lance_probe_one(op, parent, NULL); } else if (!strcmp(parent_dp->name, "lebuffer")) { err = sparc_lance_probe_one(op, NULL, parent); } else err = sparc_lance_probe_one(op, NULL, NULL); return err; } static int __devexit sunlance_sbus_remove(struct platform_device *op) { struct lance_private *lp = dev_get_drvdata(&op->dev); struct net_device *net_dev = lp->dev; unregister_netdev(net_dev); lance_free_hwresources(lp); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id sunlance_sbus_match[] = { { .name = "le", }, {}, }; MODULE_DEVICE_TABLE(of, sunlance_sbus_match); static struct of_platform_driver sunlance_sbus_driver = { .driver = { .name = "sunlance", .owner = THIS_MODULE, .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, .remove = __devexit_p(sunlance_sbus_remove), }; /* Find all the lance cards on the system and initialize them */ static int __init sparc_lance_init(void) { return of_register_platform_driver(&sunlance_sbus_driver); } static void __exit sparc_lance_exit(void) { of_unregister_platform_driver(&sunlance_sbus_driver); } module_init(sparc_lance_init); module_exit(sparc_lance_exit);
gpl-2.0
efargas/linux
net/nfc/hci/command.c
87
9330
/* * Copyright (C) 2012 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) "hci: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <net/nfc/hci.h> #include "hci.h" static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, const u8 *param, size_t param_len, data_exchange_cb_t cb, void *cb_context) { pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); /* TODO: Define hci cmd execution delay. Should it be the same * for all commands? */ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, param, param_len, cb, cb_context, 3000); } /* * HCI command execution completion callback. * err will be a standard linux error (may be converted from HCI response) * skb contains the response data and must be disposed, or may be NULL if * an error occured */ static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err) { struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context; pr_debug("HCI Cmd completed with result=%d\n", err); hcp_ew->exec_result = err; if (hcp_ew->exec_result == 0) hcp_ew->result_skb = skb; else kfree_skb(skb); hcp_ew->exec_complete = true; wake_up(hcp_ew->wq); } static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq); struct hcp_exec_waiter hcp_ew; hcp_ew.wq = &ew_wq; hcp_ew.exec_complete = false; hcp_ew.result_skb = NULL; pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); /* TODO: Define hci cmd execution delay. Should it be the same * for all commands? */ hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, param, param_len, nfc_hci_execute_cb, &hcp_ew, 3000); if (hcp_ew.exec_result < 0) return hcp_ew.exec_result; wait_event(ew_wq, hcp_ew.exec_complete == true); if (hcp_ew.exec_result == 0) { if (skb) *skb = hcp_ew.result_skb; else kfree_skb(hcp_ew.result_skb); } return hcp_ew.exec_result; } int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, const u8 *param, size_t param_len) { u8 pipe; pr_debug("%d to gate %d\n", event, gate); pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event, param, param_len, NULL, NULL, 0); } EXPORT_SYMBOL(nfc_hci_send_event); int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, const u8 *param, size_t param_len) { u8 pipe; pr_debug("\n"); pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE, response, param, param_len, NULL, NULL, 0); } EXPORT_SYMBOL(nfc_hci_send_response); /* * Execute an hci command sent to gate. * skb will contain response data if success. skb can be NULL if you are not * interested by the response. */ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { u8 pipe; pr_debug("\n"); pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb); } EXPORT_SYMBOL(nfc_hci_send_cmd); int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, const u8 *param, size_t param_len, data_exchange_cb_t cb, void *cb_context) { u8 pipe; pr_debug("\n"); pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len, cb, cb_context); } EXPORT_SYMBOL(nfc_hci_send_cmd_async); int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, const u8 *param, size_t param_len) { int r; u8 *tmp; /* TODO ELa: reg idx must be inserted before param, but we don't want * to ask the caller to do it to keep a simpler API. * For now, just create a new temporary param buffer. This is far from * optimal though, and the plan is to modify APIs to pass idx down to * nfc_hci_hcp_message_tx where the frame is actually built, thereby * eliminating the need for the temp allocation-copy here. */ pr_debug("idx=%d to gate %d\n", idx, gate); tmp = kmalloc(1 + param_len, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; *tmp = idx; memcpy(tmp + 1, param, param_len); r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER, tmp, param_len + 1, NULL); kfree(tmp); return r; } EXPORT_SYMBOL(nfc_hci_set_param); int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, struct sk_buff **skb) { pr_debug("gate=%d regidx=%d\n", gate, idx); return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER, &idx, 1, skb); } EXPORT_SYMBOL(nfc_hci_get_param); static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe) { struct sk_buff *skb; int r; pr_debug("pipe=%d\n", pipe); r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE, NULL, 0, &skb); if (r == 0) { /* dest host other than host controller will send * number of pipes already open on this gate before * execution. The number can be found in skb->data[0] */ kfree_skb(skb); } return r; } static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe) { pr_debug("\n"); return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE, NULL, 0, NULL); } static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, int *result) { struct sk_buff *skb; struct hci_create_pipe_params params; struct hci_create_pipe_resp *resp; u8 pipe; pr_debug("gate=%d\n", dest_gate); params.src_gate = NFC_HCI_ADMIN_GATE; params.dest_host = dest_host; params.dest_gate = dest_gate; *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_CREATE_PIPE, (u8 *) &params, sizeof(params), &skb); if (*result < 0) return NFC_HCI_INVALID_PIPE; resp = (struct hci_create_pipe_resp *)skb->data; pipe = resp->pipe; kfree_skb(skb); pr_debug("pipe created=%d\n", pipe); return pipe; } static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) { pr_debug("\n"); return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) { u8 param[2]; /* TODO: Find out what the identity reference data is * and fill param with it. HCI spec 6.1.3.5 */ pr_debug("\n"); return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); } int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) { int r; u8 pipe = hdev->gate2pipe[gate]; pr_debug("\n"); if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; r = nfc_hci_close_pipe(hdev, pipe); if (r < 0) return r; if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) { r = nfc_hci_delete_pipe(hdev, pipe); if (r < 0) return r; } hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE; return 0; } EXPORT_SYMBOL(nfc_hci_disconnect_gate); int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev) { int r; pr_debug("\n"); r = nfc_hci_clear_all_pipes(hdev); if (r < 0) return r; memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); return 0; } EXPORT_SYMBOL(nfc_hci_disconnect_all_gates); int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, u8 pipe) { bool pipe_created = false; int r; pr_debug("\n"); if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) return -EADDRINUSE; if (pipe != NFC_HCI_INVALID_PIPE) goto open_pipe; switch (dest_gate) { case NFC_HCI_LINK_MGMT_GATE: pipe = NFC_HCI_LINK_MGMT_PIPE; break; case NFC_HCI_ADMIN_GATE: pipe = NFC_HCI_ADMIN_PIPE; break; default: pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r); if (pipe == NFC_HCI_INVALID_PIPE) return r; pipe_created = true; break; } open_pipe: r = nfc_hci_open_pipe(hdev, pipe); if (r < 0) { if (pipe_created) if (nfc_hci_delete_pipe(hdev, pipe) < 0) { /* TODO: Cannot clean by deleting pipe... * -> inconsistent state */ } return r; } hdev->gate2pipe[dest_gate] = pipe; return 0; } EXPORT_SYMBOL(nfc_hci_connect_gate);
gpl-2.0
wylazy/mysql
storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp
87
10751
/* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <ndb_global.h> #include "Win32AsyncFile.hpp" #include <signaldata/FsRef.hpp> #include <signaldata/FsOpenReq.hpp> #include <signaldata/FsReadWriteReq.hpp> Win32AsyncFile::Win32AsyncFile(SimulatedBlock& fs) : AsyncFile(fs),hFile(INVALID_HANDLE_VALUE) { } Win32AsyncFile::~Win32AsyncFile() { } int Win32AsyncFile::init() { return 0; } void Win32AsyncFile::openReq(Request* request) { m_auto_sync_freq = 0; m_write_wo_sync = 0; m_open_flags = request->par.open.flags; // for open.flags, see signal FSOPENREQ DWORD dwCreationDisposition; DWORD dwDesiredAccess = 0; DWORD dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; /** * FIXME: * Previously we had FILE_FLAG_NO_BUFFERING also set here. * This has similar alignment rules to O_DIRECT on 2.4 kernels. * which means we should obey the directio req as we can't do it * everywhere (this seemingly "worked" in 5.0 though), e.g. by default * LCP isn't aligned IO. */ DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS; Uint32 flags = request->par.open.flags; // Convert file open flags from Solaris to Windows if ((flags & FsOpenReq::OM_CREATE) && (flags & FsOpenReq::OM_TRUNCATE)){ dwCreationDisposition = CREATE_ALWAYS; } else if (flags & FsOpenReq::OM_TRUNCATE){ dwCreationDisposition = TRUNCATE_EXISTING; } else if (flags & (FsOpenReq::OM_CREATE|FsOpenReq::OM_CREATE_IF_NONE)){ dwCreationDisposition = CREATE_NEW; } else { dwCreationDisposition = OPEN_EXISTING; } switch(flags & 3){ case FsOpenReq::OM_READONLY: dwDesiredAccess = GENERIC_READ; break; case FsOpenReq::OM_WRITEONLY: dwDesiredAccess = GENERIC_WRITE; break; case FsOpenReq::OM_READWRITE: dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; break; default: request->error = 1000; break; return; } hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode, 0, dwCreationDisposition, dwFlagsAndAttributes, 0); if(INVALID_HANDLE_VALUE == hFile) { request->error = GetLastError(); if(((ERROR_PATH_NOT_FOUND == request->error) || (ERROR_INVALID_NAME == request->error)) && (flags & (FsOpenReq::OM_CREATE|FsOpenReq::OM_CREATE_IF_NONE))) { createDirectories(); hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode, 0, dwCreationDisposition, dwFlagsAndAttributes, 0); if(INVALID_HANDLE_VALUE == hFile) request->error = GetLastError(); else request->error = 0; } } else { request->error = 0; } if (flags & FsOpenReq::OM_INIT) { LARGE_INTEGER off; off.QuadPart= 0; LARGE_INTEGER sz; sz.QuadPart= request->par.open.file_size; char buf[4096]; bzero(buf,sizeof(buf)); while(off.QuadPart < sz.QuadPart) { BOOL r= SetFilePointerEx(hFile, off, NULL, FILE_BEGIN); if(r==0) { request->error= GetLastError(); return; } DWORD dwWritten; BOOL bWrite= WriteFile(hFile, buf, sizeof(buf), &dwWritten, 0); if(!bWrite || dwWritten!=sizeof(buf)) { request->error= GetLastError(); } off.QuadPart+=sizeof(buf); } off.QuadPart= 0; BOOL r= SetFilePointerEx(hFile, off, NULL, FILE_BEGIN); if(r==0) { request->error= GetLastError(); return; } /* Write initial data */ SignalT<25> tmp; Signal * signal = (Signal*)(&tmp); bzero(signal, sizeof(tmp)); FsReadWriteReq* req = (FsReadWriteReq*)signal->getDataPtrSend(); Uint32 index = 0; Uint32 block = refToMain(request->theUserReference); Uint32 instance = refToInstance(request->theUserReference); off.QuadPart= 0; sz.QuadPart= request->par.open.file_size; while(off.QuadPart < sz.QuadPart) { req->filePointer = 0; // DATA 0 req->userPointer = request->theUserPointer; // DATA 2 req->numberOfPages = 1; // DATA 5 req->varIndex = index++; req->data.pageData[0] = m_page_ptr.i; m_fs.EXECUTE_DIRECT(block, GSN_FSWRITEREQ, signal, FsReadWriteReq::FixedLength + 1, instance // wl4391_todo This EXECUTE_DIRECT is thread safe ); Uint32 size = request->par.open.page_size; char* buf = (char*)m_page_ptr.p; DWORD dwWritten; while(size > 0){ BOOL bWrite= WriteFile(hFile, buf, size, &dwWritten, 0); if(!bWrite || dwWritten!=size) { request->error= GetLastError(); } size -= dwWritten; buf += dwWritten; } if(size != 0) { int err = errno; /* close(theFd); unlink(theFileName.c_str());*/ request->error = err; return; } off.QuadPart += request->par.open.page_size; } off.QuadPart= 0; r= SetFilePointerEx(hFile, off, NULL, FILE_BEGIN); if(r==0) { request->error= GetLastError(); return; } } return; } int Win32AsyncFile::readBuffer(Request* req, char * buf, size_t size, off_t offset) { req->par.readWrite.pages[0].size = 0; while (size > 0) { size_t bytes_read = 0; OVERLAPPED ov; bzero(&ov, sizeof(ov)); LARGE_INTEGER li; li.QuadPart = offset; ov.Offset = li.LowPart; ov.OffsetHigh = li.HighPart; DWORD dwBytesRead; BOOL bRead = ReadFile(hFile, buf, size, &dwBytesRead, &ov); if(!bRead){ int err = GetLastError(); if (err == ERROR_HANDLE_EOF && req->action == Request::readPartial) { return 0; } return err; } bytes_read = dwBytesRead; req->par.readWrite.pages[0].size += bytes_read; if(bytes_read == 0){ if(req->action == Request::readPartial) { return 0; } DEBUG(ndbout_c("Read underflow %d %d\n %x\n%d %d", size, offset, buf, bytes_read, return_value)); return ERR_ReadUnderflow; } if(bytes_read != size){ DEBUG(ndbout_c("Warning partial read %d != %d", bytes_read, size)); } buf += bytes_read; size -= bytes_read; offset += bytes_read; } return 0; } int Win32AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset) { size_t chunk_size = 256 * 1024; size_t bytes_to_write = chunk_size; m_write_wo_sync += size; while (size > 0) { OVERLAPPED ov; bzero(&ov, sizeof(ov)); LARGE_INTEGER li; li.QuadPart = offset; ov.Offset = li.LowPart; ov.OffsetHigh = li.HighPart; if (size < bytes_to_write){ // We are at the last chunk bytes_to_write = size; } size_t bytes_written = 0; DWORD dwWritten; BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, &ov); if(!bWrite) { return GetLastError(); } bytes_written = dwWritten; if (bytes_written != bytes_to_write) { DEBUG(ndbout_c("Warning partial write %d != %d", bytes_written, bytes_to_write)); } buf += bytes_written; size -= bytes_written; offset += bytes_written; } return 0; } void Win32AsyncFile::closeReq(Request * request) { if (m_open_flags & ( FsOpenReq::OM_WRITEONLY | FsOpenReq::OM_READWRITE | FsOpenReq::OM_APPEND )) { syncReq(request); } if(!CloseHandle(hFile)) { request->error = GetLastError(); } hFile = INVALID_HANDLE_VALUE; } bool Win32AsyncFile::isOpen(){ return (hFile != INVALID_HANDLE_VALUE); } void Win32AsyncFile::syncReq(Request * request) { if(m_auto_sync_freq && m_write_wo_sync == 0){ return; } if(!FlushFileBuffers(hFile)) { request->error = GetLastError(); return; } m_write_wo_sync = 0; } void Win32AsyncFile::appendReq(Request * request){ const char * buf = request->par.append.buf; Uint32 size = Uint32(request->par.append.size); m_write_wo_sync += size; DWORD dwWritten = 0; while(size > 0){ if(!WriteFile(hFile, buf, size, &dwWritten, 0)){ request->error = GetLastError(); return ; } buf += dwWritten; size -= dwWritten; } if(m_auto_sync_freq && m_write_wo_sync > m_auto_sync_freq){ syncReq(request); } } void Win32AsyncFile::removeReq(Request * request) { if(!DeleteFile(theFileName.c_str())) { request->error = GetLastError(); } } void Win32AsyncFile::rmrfReq(Request * request, const char * src, bool removePath){ if (!request->par.rmrf.directory) { // Remove file if (!DeleteFile(src)) { DWORD dwError = GetLastError(); if (dwError != ERROR_FILE_NOT_FOUND) request->error = dwError; } return; } char path[PATH_MAX]; strcpy(path, src); strcat(path, "\\*"); WIN32_FIND_DATA ffd; HANDLE hFindFile; loop: hFindFile = FindFirstFile(path, &ffd); if (INVALID_HANDLE_VALUE == hFindFile) { DWORD dwError = GetLastError(); if (dwError != ERROR_PATH_NOT_FOUND) request->error = dwError; return; } path[strlen(path) - 1] = 0; // remove '*' do { if (0 != strcmp(".", ffd.cFileName) && 0 != strcmp("..", ffd.cFileName)) { int len = strlen(path); strcat(path, ffd.cFileName); if(DeleteFile(path) || RemoveDirectory(path)) { path[len] = 0; continue; }//if FindClose(hFindFile); strcat(path, "\\*"); goto loop; } } while(FindNextFile(hFindFile, &ffd)); FindClose(hFindFile); path[strlen(path)-1] = 0; // remove '\' if (strcmp(src, path) != 0) { char * t = strrchr(path, '\\'); t[1] = '*'; t[2] = 0; goto loop; } if(removePath && !RemoveDirectory(src)) request->error = GetLastError(); } void Win32AsyncFile::createDirectories() { char* tmp; const char * name = theFileName.c_str(); const char * base = theFileName.get_base_name(); while((tmp = (char *)strstr(base, DIR_SEPARATOR))) { char t = tmp[0]; tmp[0] = 0; CreateDirectory(name, 0); tmp[0] = t; base = tmp + sizeof(DIR_SEPARATOR); } }
gpl-2.0
artagnon/git
compat/regex/regex_internal.c
343
47665
/* Extended regular expression matching and search library. Copyright (C) 2002-2006, 2010 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ static void re_string_construct_common (const char *str, int len, re_string_t *pstr, RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) internal_function; static re_dfastate_t *create_ci_newstate (const re_dfa_t *dfa, const re_node_set *nodes, unsigned int hash) internal_function; static re_dfastate_t *create_cd_newstate (const re_dfa_t *dfa, const re_node_set *nodes, unsigned int context, unsigned int hash) internal_function; #ifdef GAWK #undef MAX /* safety */ static int MAX(size_t a, size_t b) { return (a > b ? a : b); } #endif /* Functions for string operation. */ /* This function allocate the buffers. It is necessary to call re_string_reconstruct before using the object. */ static reg_errcode_t internal_function re_string_allocate (re_string_t *pstr, const char *str, int len, int init_len, RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) { reg_errcode_t ret; int init_buf_len; /* Ensure at least one character fits into the buffers. */ if (init_len < dfa->mb_cur_max) init_len = dfa->mb_cur_max; init_buf_len = (len + 1 < init_len) ? len + 1: init_len; re_string_construct_common (str, len, pstr, trans, icase, dfa); ret = re_string_realloc_buffers (pstr, init_buf_len); if (BE (ret != REG_NOERROR, 0)) return ret; pstr->word_char = dfa->word_char; pstr->word_ops_used = dfa->word_ops_used; pstr->mbs = pstr->mbs_allocated ? pstr->mbs : (unsigned char *) str; pstr->valid_len = (pstr->mbs_allocated || dfa->mb_cur_max > 1) ? 0 : len; pstr->valid_raw_len = pstr->valid_len; return REG_NOERROR; } /* This function allocate the buffers, and initialize them. */ static reg_errcode_t internal_function re_string_construct (re_string_t *pstr, const char *str, int len, RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) { reg_errcode_t ret; memset (pstr, '\0', sizeof (re_string_t)); re_string_construct_common (str, len, pstr, trans, icase, dfa); if (len > 0) { ret = re_string_realloc_buffers (pstr, len + 1); if (BE (ret != REG_NOERROR, 0)) return ret; } pstr->mbs = pstr->mbs_allocated ? pstr->mbs : (unsigned char *) str; if (icase) { #ifdef RE_ENABLE_I18N if (dfa->mb_cur_max > 1) { while (1) { ret = build_wcs_upper_buffer (pstr); if (BE (ret != REG_NOERROR, 0)) return ret; if (pstr->valid_raw_len >= len) break; if (pstr->bufs_len > pstr->valid_len + dfa->mb_cur_max) break; ret = re_string_realloc_buffers (pstr, pstr->bufs_len * 2); if (BE (ret != REG_NOERROR, 0)) return ret; } } else #endif /* RE_ENABLE_I18N */ build_upper_buffer (pstr); } else { #ifdef RE_ENABLE_I18N if (dfa->mb_cur_max > 1) build_wcs_buffer (pstr); else #endif /* RE_ENABLE_I18N */ { if (trans != NULL) re_string_translate_buffer (pstr); else { pstr->valid_len = pstr->bufs_len; pstr->valid_raw_len = pstr->bufs_len; } } } return REG_NOERROR; } /* Helper functions for re_string_allocate, and re_string_construct. */ static reg_errcode_t internal_function re_string_realloc_buffers (re_string_t *pstr, int new_buf_len) { #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1) { wint_t *new_wcs; /* Avoid overflow in realloc. */ const size_t max_object_size = MAX (sizeof (wint_t), sizeof (int)); if (BE (SIZE_MAX / max_object_size < new_buf_len, 0)) return REG_ESPACE; new_wcs = re_realloc (pstr->wcs, wint_t, new_buf_len); if (BE (new_wcs == NULL, 0)) return REG_ESPACE; pstr->wcs = new_wcs; if (pstr->offsets != NULL) { int *new_offsets = re_realloc (pstr->offsets, int, new_buf_len); if (BE (new_offsets == NULL, 0)) return REG_ESPACE; pstr->offsets = new_offsets; } } #endif /* RE_ENABLE_I18N */ if (pstr->mbs_allocated) { unsigned char *new_mbs = re_realloc (pstr->mbs, unsigned char, new_buf_len); if (BE (new_mbs == NULL, 0)) return REG_ESPACE; pstr->mbs = new_mbs; } pstr->bufs_len = new_buf_len; return REG_NOERROR; } static void internal_function re_string_construct_common (const char *str, int len, re_string_t *pstr, RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) { pstr->raw_mbs = (const unsigned char *) str; pstr->len = len; pstr->raw_len = len; pstr->trans = trans; pstr->icase = icase ? 1 : 0; pstr->mbs_allocated = (trans != NULL || icase); pstr->mb_cur_max = dfa->mb_cur_max; pstr->is_utf8 = dfa->is_utf8; pstr->map_notascii = dfa->map_notascii; pstr->stop = pstr->len; pstr->raw_stop = pstr->stop; } #ifdef RE_ENABLE_I18N /* Build wide character buffer PSTR->WCS. If the byte sequence of the string are: <mb1>(0), <mb1>(1), <mb2>(0), <mb2>(1), <sb3> Then wide character buffer will be: <wc1> , WEOF , <wc2> , WEOF , <wc3> We use WEOF for padding, they indicate that the position isn't a first byte of a multibyte character. Note that this function assumes PSTR->VALID_LEN elements are already built and starts from PSTR->VALID_LEN. */ static void internal_function build_wcs_buffer (re_string_t *pstr) { #ifdef _LIBC unsigned char buf[MB_LEN_MAX]; assert (MB_LEN_MAX >= pstr->mb_cur_max); #else unsigned char buf[64]; #endif mbstate_t prev_st; int byte_idx, end_idx, remain_len; size_t mbclen; /* Build the buffers from pstr->valid_len to either pstr->len or pstr->bufs_len. */ end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; for (byte_idx = pstr->valid_len; byte_idx < end_idx;) { wchar_t wc; const char *p; remain_len = end_idx - byte_idx; prev_st = pstr->cur_state; /* Apply the translation if we need. */ if (BE (pstr->trans != NULL, 0)) { int i, ch; for (i = 0; i < pstr->mb_cur_max && i < remain_len; ++i) { ch = pstr->raw_mbs [pstr->raw_mbs_idx + byte_idx + i]; buf[i] = pstr->mbs[byte_idx + i] = pstr->trans[ch]; } p = (const char *) buf; } else p = (const char *) pstr->raw_mbs + pstr->raw_mbs_idx + byte_idx; mbclen = __mbrtowc (&wc, p, remain_len, &pstr->cur_state); if (BE (mbclen == (size_t) -2, 0)) { /* The buffer doesn't have enough space, finish to build. */ pstr->cur_state = prev_st; break; } else if (BE (mbclen == (size_t) -1 || mbclen == 0, 0)) { /* We treat these cases as a singlebyte character. */ mbclen = 1; wc = (wchar_t) pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]; if (BE (pstr->trans != NULL, 0)) wc = pstr->trans[wc]; pstr->cur_state = prev_st; } /* Write wide character and padding. */ pstr->wcs[byte_idx++] = wc; /* Write paddings. */ for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) pstr->wcs[byte_idx++] = WEOF; } pstr->valid_len = byte_idx; pstr->valid_raw_len = byte_idx; } /* Build wide character buffer PSTR->WCS like build_wcs_buffer, but for REG_ICASE. */ static reg_errcode_t internal_function build_wcs_upper_buffer (re_string_t *pstr) { mbstate_t prev_st; int src_idx, byte_idx, end_idx, remain_len; size_t mbclen; #ifdef _LIBC char buf[MB_LEN_MAX]; assert (MB_LEN_MAX >= pstr->mb_cur_max); #else char buf[64]; #endif byte_idx = pstr->valid_len; end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; /* The following optimization assumes that ASCII characters can be mapped to wide characters with a simple cast. */ if (! pstr->map_notascii && pstr->trans == NULL && !pstr->offsets_needed) { while (byte_idx < end_idx) { wchar_t wc; if (isascii (pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]) && mbsinit (&pstr->cur_state)) { /* In case of a singlebyte character. */ pstr->mbs[byte_idx] = toupper (pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]); /* The next step uses the assumption that wchar_t is encoded ASCII-safe: all ASCII values can be converted like this. */ pstr->wcs[byte_idx] = (wchar_t) pstr->mbs[byte_idx]; ++byte_idx; continue; } remain_len = end_idx - byte_idx; prev_st = pstr->cur_state; mbclen = __mbrtowc (&wc, ((const char *) pstr->raw_mbs + pstr->raw_mbs_idx + byte_idx), remain_len, &pstr->cur_state); if (BE (mbclen + 2 > 2, 1)) { wchar_t wcu = wc; if (iswlower (wc)) { size_t mbcdlen; wcu = towupper (wc); mbcdlen = wcrtomb (buf, wcu, &prev_st); if (BE (mbclen == mbcdlen, 1)) memcpy (pstr->mbs + byte_idx, buf, mbclen); else { src_idx = byte_idx; goto offsets_needed; } } else memcpy (pstr->mbs + byte_idx, pstr->raw_mbs + pstr->raw_mbs_idx + byte_idx, mbclen); pstr->wcs[byte_idx++] = wcu; /* Write paddings. */ for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) pstr->wcs[byte_idx++] = WEOF; } else if (mbclen == (size_t) -1 || mbclen == 0) { /* It is an invalid character or '\0'. Just use the byte. */ int ch = pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]; pstr->mbs[byte_idx] = ch; /* And also cast it to wide char. */ pstr->wcs[byte_idx++] = (wchar_t) ch; if (BE (mbclen == (size_t) -1, 0)) pstr->cur_state = prev_st; } else { /* The buffer doesn't have enough space, finish to build. */ pstr->cur_state = prev_st; break; } } pstr->valid_len = byte_idx; pstr->valid_raw_len = byte_idx; return REG_NOERROR; } else for (src_idx = pstr->valid_raw_len; byte_idx < end_idx;) { wchar_t wc; const char *p; offsets_needed: remain_len = end_idx - byte_idx; prev_st = pstr->cur_state; if (BE (pstr->trans != NULL, 0)) { int i, ch; for (i = 0; i < pstr->mb_cur_max && i < remain_len; ++i) { ch = pstr->raw_mbs [pstr->raw_mbs_idx + src_idx + i]; buf[i] = pstr->trans[ch]; } p = (const char *) buf; } else p = (const char *) pstr->raw_mbs + pstr->raw_mbs_idx + src_idx; mbclen = __mbrtowc (&wc, p, remain_len, &pstr->cur_state); if (BE (mbclen + 2 > 2, 1)) { wchar_t wcu = wc; if (iswlower (wc)) { size_t mbcdlen; wcu = towupper (wc); mbcdlen = wcrtomb ((char *) buf, wcu, &prev_st); if (BE (mbclen == mbcdlen, 1)) memcpy (pstr->mbs + byte_idx, buf, mbclen); else if (mbcdlen != (size_t) -1) { size_t i; if (byte_idx + mbcdlen > pstr->bufs_len) { pstr->cur_state = prev_st; break; } if (pstr->offsets == NULL) { pstr->offsets = re_malloc (int, pstr->bufs_len); if (pstr->offsets == NULL) return REG_ESPACE; } if (!pstr->offsets_needed) { for (i = 0; i < (size_t) byte_idx; ++i) pstr->offsets[i] = i; pstr->offsets_needed = 1; } memcpy (pstr->mbs + byte_idx, buf, mbcdlen); pstr->wcs[byte_idx] = wcu; pstr->offsets[byte_idx] = src_idx; for (i = 1; i < mbcdlen; ++i) { pstr->offsets[byte_idx + i] = src_idx + (i < mbclen ? i : mbclen - 1); pstr->wcs[byte_idx + i] = WEOF; } pstr->len += mbcdlen - mbclen; if (pstr->raw_stop > src_idx) pstr->stop += mbcdlen - mbclen; end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; byte_idx += mbcdlen; src_idx += mbclen; continue; } else memcpy (pstr->mbs + byte_idx, p, mbclen); } else memcpy (pstr->mbs + byte_idx, p, mbclen); if (BE (pstr->offsets_needed != 0, 0)) { size_t i; for (i = 0; i < mbclen; ++i) pstr->offsets[byte_idx + i] = src_idx + i; } src_idx += mbclen; pstr->wcs[byte_idx++] = wcu; /* Write paddings. */ for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) pstr->wcs[byte_idx++] = WEOF; } else if (mbclen == (size_t) -1 || mbclen == 0) { /* It is an invalid character or '\0'. Just use the byte. */ int ch = pstr->raw_mbs[pstr->raw_mbs_idx + src_idx]; if (BE (pstr->trans != NULL, 0)) ch = pstr->trans [ch]; pstr->mbs[byte_idx] = ch; if (BE (pstr->offsets_needed != 0, 0)) pstr->offsets[byte_idx] = src_idx; ++src_idx; /* And also cast it to wide char. */ pstr->wcs[byte_idx++] = (wchar_t) ch; if (BE (mbclen == (size_t) -1, 0)) pstr->cur_state = prev_st; } else { /* The buffer doesn't have enough space, finish to build. */ pstr->cur_state = prev_st; break; } } pstr->valid_len = byte_idx; pstr->valid_raw_len = src_idx; return REG_NOERROR; } /* Skip characters until the index becomes greater than NEW_RAW_IDX. Return the index. */ static int internal_function re_string_skip_chars (re_string_t *pstr, int new_raw_idx, wint_t *last_wc) { mbstate_t prev_st; int rawbuf_idx; size_t mbclen; wint_t wc = WEOF; /* Skip the characters which are not necessary to check. */ for (rawbuf_idx = pstr->raw_mbs_idx + pstr->valid_raw_len; rawbuf_idx < new_raw_idx;) { wchar_t wc2; int remain_len = pstr->len - rawbuf_idx; prev_st = pstr->cur_state; mbclen = __mbrtowc (&wc2, (const char *) pstr->raw_mbs + rawbuf_idx, remain_len, &pstr->cur_state); if (BE (mbclen == (size_t) -2 || mbclen == (size_t) -1 || mbclen == 0, 0)) { /* We treat these cases as a single byte character. */ if (mbclen == 0 || remain_len == 0) wc = L'\0'; else wc = *(unsigned char *) (pstr->raw_mbs + rawbuf_idx); mbclen = 1; pstr->cur_state = prev_st; } else wc = (wint_t) wc2; /* Then proceed the next character. */ rawbuf_idx += mbclen; } *last_wc = (wint_t) wc; return rawbuf_idx; } #endif /* RE_ENABLE_I18N */ /* Build the buffer PSTR->MBS, and apply the translation if we need. This function is used in case of REG_ICASE. */ static void internal_function build_upper_buffer (re_string_t *pstr) { int char_idx, end_idx; end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; for (char_idx = pstr->valid_len; char_idx < end_idx; ++char_idx) { int ch = pstr->raw_mbs[pstr->raw_mbs_idx + char_idx]; if (BE (pstr->trans != NULL, 0)) ch = pstr->trans[ch]; if (islower (ch)) pstr->mbs[char_idx] = toupper (ch); else pstr->mbs[char_idx] = ch; } pstr->valid_len = char_idx; pstr->valid_raw_len = char_idx; } /* Apply TRANS to the buffer in PSTR. */ static void internal_function re_string_translate_buffer (re_string_t *pstr) { int buf_idx, end_idx; end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; for (buf_idx = pstr->valid_len; buf_idx < end_idx; ++buf_idx) { int ch = pstr->raw_mbs[pstr->raw_mbs_idx + buf_idx]; pstr->mbs[buf_idx] = pstr->trans[ch]; } pstr->valid_len = buf_idx; pstr->valid_raw_len = buf_idx; } /* This function re-construct the buffers. Concretely, convert to wide character in case of pstr->mb_cur_max > 1, convert to upper case in case of REG_ICASE, apply translation. */ static reg_errcode_t internal_function re_string_reconstruct (re_string_t *pstr, int idx, int eflags) { int offset = idx - pstr->raw_mbs_idx; if (BE (offset < 0, 0)) { /* Reset buffer. */ #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1) memset (&pstr->cur_state, '\0', sizeof (mbstate_t)); #endif /* RE_ENABLE_I18N */ pstr->len = pstr->raw_len; pstr->stop = pstr->raw_stop; pstr->valid_len = 0; pstr->raw_mbs_idx = 0; pstr->valid_raw_len = 0; pstr->offsets_needed = 0; pstr->tip_context = ((eflags & REG_NOTBOL) ? CONTEXT_BEGBUF : CONTEXT_NEWLINE | CONTEXT_BEGBUF); if (!pstr->mbs_allocated) pstr->mbs = (unsigned char *) pstr->raw_mbs; offset = idx; } if (BE (offset != 0, 1)) { /* Should the already checked characters be kept? */ if (BE (offset < pstr->valid_raw_len, 1)) { /* Yes, move them to the front of the buffer. */ #ifdef RE_ENABLE_I18N if (BE (pstr->offsets_needed, 0)) { int low = 0, high = pstr->valid_len, mid; do { mid = (high + low) / 2; if (pstr->offsets[mid] > offset) high = mid; else if (pstr->offsets[mid] < offset) low = mid + 1; else break; } while (low < high); if (pstr->offsets[mid] < offset) ++mid; pstr->tip_context = re_string_context_at (pstr, mid - 1, eflags); /* This can be quite complicated, so handle specially only the common and easy case where the character with different length representation of lower and upper case is present at or after offset. */ if (pstr->valid_len > offset && mid == offset && pstr->offsets[mid] == offset) { memmove (pstr->wcs, pstr->wcs + offset, (pstr->valid_len - offset) * sizeof (wint_t)); memmove (pstr->mbs, pstr->mbs + offset, pstr->valid_len - offset); pstr->valid_len -= offset; pstr->valid_raw_len -= offset; for (low = 0; low < pstr->valid_len; low++) pstr->offsets[low] = pstr->offsets[low + offset] - offset; } else { /* Otherwise, just find out how long the partial multibyte character at offset is and fill it with WEOF/255. */ pstr->len = pstr->raw_len - idx + offset; pstr->stop = pstr->raw_stop - idx + offset; pstr->offsets_needed = 0; while (mid > 0 && pstr->offsets[mid - 1] == offset) --mid; while (mid < pstr->valid_len) if (pstr->wcs[mid] != WEOF) break; else ++mid; if (mid == pstr->valid_len) pstr->valid_len = 0; else { pstr->valid_len = pstr->offsets[mid] - offset; if (pstr->valid_len) { for (low = 0; low < pstr->valid_len; ++low) pstr->wcs[low] = WEOF; memset (pstr->mbs, 255, pstr->valid_len); } } pstr->valid_raw_len = pstr->valid_len; } } else #endif { pstr->tip_context = re_string_context_at (pstr, offset - 1, eflags); #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1) memmove (pstr->wcs, pstr->wcs + offset, (pstr->valid_len - offset) * sizeof (wint_t)); #endif /* RE_ENABLE_I18N */ if (BE (pstr->mbs_allocated, 0)) memmove (pstr->mbs, pstr->mbs + offset, pstr->valid_len - offset); pstr->valid_len -= offset; pstr->valid_raw_len -= offset; #if DEBUG assert (pstr->valid_len > 0); #endif } } else { #ifdef RE_ENABLE_I18N /* No, skip all characters until IDX. */ int prev_valid_len = pstr->valid_len; if (BE (pstr->offsets_needed, 0)) { pstr->len = pstr->raw_len - idx + offset; pstr->stop = pstr->raw_stop - idx + offset; pstr->offsets_needed = 0; } #endif pstr->valid_len = 0; #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1) { int wcs_idx; wint_t wc = WEOF; if (pstr->is_utf8) { const unsigned char *raw, *p, *end; /* Special case UTF-8. Multi-byte chars start with any byte other than 0x80 - 0xbf. */ raw = pstr->raw_mbs + pstr->raw_mbs_idx; end = raw + (offset - pstr->mb_cur_max); if (end < pstr->raw_mbs) end = pstr->raw_mbs; p = raw + offset - 1; #ifdef _LIBC /* We know the wchar_t encoding is UCS4, so for the simple case, ASCII characters, skip the conversion step. */ if (isascii (*p) && BE (pstr->trans == NULL, 1)) { memset (&pstr->cur_state, '\0', sizeof (mbstate_t)); /* pstr->valid_len = 0; */ wc = (wchar_t) *p; } else #endif for (; p >= end; --p) if ((*p & 0xc0) != 0x80) { mbstate_t cur_state; wchar_t wc2; int mlen = raw + pstr->len - p; unsigned char buf[6]; size_t mbclen; if (BE (pstr->trans != NULL, 0)) { int i = mlen < 6 ? mlen : 6; while (--i >= 0) buf[i] = pstr->trans[p[i]]; } /* XXX Don't use mbrtowc, we know which conversion to use (UTF-8 -> UCS4). */ memset (&cur_state, 0, sizeof (cur_state)); mbclen = __mbrtowc (&wc2, (const char *) p, mlen, &cur_state); if (raw + offset - p <= mbclen && mbclen < (size_t) -2) { memset (&pstr->cur_state, '\0', sizeof (mbstate_t)); pstr->valid_len = mbclen - (raw + offset - p); wc = wc2; } break; } } if (wc == WEOF) pstr->valid_len = re_string_skip_chars (pstr, idx, &wc) - idx; if (wc == WEOF) pstr->tip_context = re_string_context_at (pstr, prev_valid_len - 1, eflags); else pstr->tip_context = ((BE (pstr->word_ops_used != 0, 0) && IS_WIDE_WORD_CHAR (wc)) ? CONTEXT_WORD : ((IS_WIDE_NEWLINE (wc) && pstr->newline_anchor) ? CONTEXT_NEWLINE : 0)); if (BE (pstr->valid_len, 0)) { for (wcs_idx = 0; wcs_idx < pstr->valid_len; ++wcs_idx) pstr->wcs[wcs_idx] = WEOF; if (pstr->mbs_allocated) memset (pstr->mbs, 255, pstr->valid_len); } pstr->valid_raw_len = pstr->valid_len; } else #endif /* RE_ENABLE_I18N */ { int c = pstr->raw_mbs[pstr->raw_mbs_idx + offset - 1]; pstr->valid_raw_len = 0; if (pstr->trans) c = pstr->trans[c]; pstr->tip_context = (bitset_contain (pstr->word_char, c) ? CONTEXT_WORD : ((IS_NEWLINE (c) && pstr->newline_anchor) ? CONTEXT_NEWLINE : 0)); } } if (!BE (pstr->mbs_allocated, 0)) pstr->mbs += offset; } pstr->raw_mbs_idx = idx; pstr->len -= offset; pstr->stop -= offset; /* Then build the buffers. */ #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1) { if (pstr->icase) { reg_errcode_t ret = build_wcs_upper_buffer (pstr); if (BE (ret != REG_NOERROR, 0)) return ret; } else build_wcs_buffer (pstr); } else #endif /* RE_ENABLE_I18N */ if (BE (pstr->mbs_allocated, 0)) { if (pstr->icase) build_upper_buffer (pstr); else if (pstr->trans != NULL) re_string_translate_buffer (pstr); } else pstr->valid_len = pstr->len; pstr->cur_idx = 0; return REG_NOERROR; } static unsigned char internal_function __attribute ((pure)) re_string_peek_byte_case (const re_string_t *pstr, int idx) { int ch, off; /* Handle the common (easiest) cases first. */ if (BE (!pstr->mbs_allocated, 1)) return re_string_peek_byte (pstr, idx); #ifdef RE_ENABLE_I18N if (pstr->mb_cur_max > 1 && ! re_string_is_single_byte_char (pstr, pstr->cur_idx + idx)) return re_string_peek_byte (pstr, idx); #endif off = pstr->cur_idx + idx; #ifdef RE_ENABLE_I18N if (pstr->offsets_needed) off = pstr->offsets[off]; #endif ch = pstr->raw_mbs[pstr->raw_mbs_idx + off]; #ifdef RE_ENABLE_I18N /* Ensure that e.g. for tr_TR.UTF-8 BACKSLASH DOTLESS SMALL LETTER I this function returns CAPITAL LETTER I instead of first byte of DOTLESS SMALL LETTER I. The latter would confuse the parser, since peek_byte_case doesn't advance cur_idx in any way. */ if (pstr->offsets_needed && !isascii (ch)) return re_string_peek_byte (pstr, idx); #endif return ch; } static unsigned char internal_function __attribute ((pure)) re_string_fetch_byte_case (re_string_t *pstr) { if (BE (!pstr->mbs_allocated, 1)) return re_string_fetch_byte (pstr); #ifdef RE_ENABLE_I18N if (pstr->offsets_needed) { int off, ch; /* For tr_TR.UTF-8 [[:islower:]] there is [[: CAPITAL LETTER I WITH DOT lower:]] in mbs. Skip in that case the whole multi-byte character and return the original letter. On the other side, with [[: DOTLESS SMALL LETTER I return [[:I, as doing anything else would complicate things too much. */ if (!re_string_first_byte (pstr, pstr->cur_idx)) return re_string_fetch_byte (pstr); off = pstr->offsets[pstr->cur_idx]; ch = pstr->raw_mbs[pstr->raw_mbs_idx + off]; if (! isascii (ch)) return re_string_fetch_byte (pstr); re_string_skip_bytes (pstr, re_string_char_size_at (pstr, pstr->cur_idx)); return ch; } #endif return pstr->raw_mbs[pstr->raw_mbs_idx + pstr->cur_idx++]; } static void internal_function re_string_destruct (re_string_t *pstr) { #ifdef RE_ENABLE_I18N re_free (pstr->wcs); re_free (pstr->offsets); #endif /* RE_ENABLE_I18N */ if (pstr->mbs_allocated) re_free (pstr->mbs); } /* Return the context at IDX in INPUT. */ static unsigned int internal_function re_string_context_at (const re_string_t *input, int idx, int eflags) { int c; if (BE (idx < 0, 0)) /* In this case, we use the value stored in input->tip_context, since we can't know the character in input->mbs[-1] here. */ return input->tip_context; if (BE (idx == input->len, 0)) return ((eflags & REG_NOTEOL) ? CONTEXT_ENDBUF : CONTEXT_NEWLINE | CONTEXT_ENDBUF); #ifdef RE_ENABLE_I18N if (input->mb_cur_max > 1) { wint_t wc; int wc_idx = idx; while(input->wcs[wc_idx] == WEOF) { #ifdef DEBUG /* It must not happen. */ assert (wc_idx >= 0); #endif --wc_idx; if (wc_idx < 0) return input->tip_context; } wc = input->wcs[wc_idx]; if (BE (input->word_ops_used != 0, 0) && IS_WIDE_WORD_CHAR (wc)) return CONTEXT_WORD; return (IS_WIDE_NEWLINE (wc) && input->newline_anchor ? CONTEXT_NEWLINE : 0); } else #endif { c = re_string_byte_at (input, idx); if (bitset_contain (input->word_char, c)) return CONTEXT_WORD; return IS_NEWLINE (c) && input->newline_anchor ? CONTEXT_NEWLINE : 0; } } /* Functions for set operation. */ static reg_errcode_t internal_function re_node_set_alloc (re_node_set *set, int size) { /* * ADR: valgrind says size can be 0, which then doesn't * free the block of size 0. Harumph. This seems * to work ok, though. */ if (size == 0) { memset(set, 0, sizeof(*set)); return REG_NOERROR; } set->alloc = size; set->nelem = 0; set->elems = re_malloc (int, size); if (BE (set->elems == NULL, 0)) return REG_ESPACE; return REG_NOERROR; } static reg_errcode_t internal_function re_node_set_init_1 (re_node_set *set, int elem) { set->alloc = 1; set->nelem = 1; set->elems = re_malloc (int, 1); if (BE (set->elems == NULL, 0)) { set->alloc = set->nelem = 0; return REG_ESPACE; } set->elems[0] = elem; return REG_NOERROR; } static reg_errcode_t internal_function re_node_set_init_2 (re_node_set *set, int elem1, int elem2) { set->alloc = 2; set->elems = re_malloc (int, 2); if (BE (set->elems == NULL, 0)) return REG_ESPACE; if (elem1 == elem2) { set->nelem = 1; set->elems[0] = elem1; } else { set->nelem = 2; if (elem1 < elem2) { set->elems[0] = elem1; set->elems[1] = elem2; } else { set->elems[0] = elem2; set->elems[1] = elem1; } } return REG_NOERROR; } static reg_errcode_t internal_function re_node_set_init_copy (re_node_set *dest, const re_node_set *src) { dest->nelem = src->nelem; if (src->nelem > 0) { dest->alloc = dest->nelem; dest->elems = re_malloc (int, dest->alloc); if (BE (dest->elems == NULL, 0)) { dest->alloc = dest->nelem = 0; return REG_ESPACE; } memcpy (dest->elems, src->elems, src->nelem * sizeof (int)); } else re_node_set_init_empty (dest); return REG_NOERROR; } /* Calculate the intersection of the sets SRC1 and SRC2. And merge it to DEST. Return value indicate the error code or REG_NOERROR if succeeded. Note: We assume dest->elems is NULL, when dest->alloc is 0. */ static reg_errcode_t internal_function re_node_set_add_intersect (re_node_set *dest, const re_node_set *src1, const re_node_set *src2) { int i1, i2, is, id, delta, sbase; if (src1->nelem == 0 || src2->nelem == 0) return REG_NOERROR; /* We need dest->nelem + 2 * elems_in_intersection; this is a conservative estimate. */ if (src1->nelem + src2->nelem + dest->nelem > dest->alloc) { int new_alloc = src1->nelem + src2->nelem + dest->alloc; int *new_elems = re_realloc (dest->elems, int, new_alloc); if (BE (new_elems == NULL, 0)) return REG_ESPACE; dest->elems = new_elems; dest->alloc = new_alloc; } /* Find the items in the intersection of SRC1 and SRC2, and copy into the top of DEST those that are not already in DEST itself. */ sbase = dest->nelem + src1->nelem + src2->nelem; i1 = src1->nelem - 1; i2 = src2->nelem - 1; id = dest->nelem - 1; for (;;) { if (src1->elems[i1] == src2->elems[i2]) { /* Try to find the item in DEST. Maybe we could binary search? */ while (id >= 0 && dest->elems[id] > src1->elems[i1]) --id; if (id < 0 || dest->elems[id] != src1->elems[i1]) dest->elems[--sbase] = src1->elems[i1]; if (--i1 < 0 || --i2 < 0) break; } /* Lower the highest of the two items. */ else if (src1->elems[i1] < src2->elems[i2]) { if (--i2 < 0) break; } else { if (--i1 < 0) break; } } id = dest->nelem - 1; is = dest->nelem + src1->nelem + src2->nelem - 1; delta = is - sbase + 1; /* Now copy. When DELTA becomes zero, the remaining DEST elements are already in place; this is more or less the same loop that is in re_node_set_merge. */ dest->nelem += delta; if (delta > 0 && id >= 0) for (;;) { if (dest->elems[is] > dest->elems[id]) { /* Copy from the top. */ dest->elems[id + delta--] = dest->elems[is--]; if (delta == 0) break; } else { /* Slide from the bottom. */ dest->elems[id + delta] = dest->elems[id]; if (--id < 0) break; } } /* Copy remaining SRC elements. */ memcpy (dest->elems, dest->elems + sbase, delta * sizeof (int)); return REG_NOERROR; } /* Calculate the union set of the sets SRC1 and SRC2. And store it to DEST. Return value indicate the error code or REG_NOERROR if succeeded. */ static reg_errcode_t internal_function re_node_set_init_union (re_node_set *dest, const re_node_set *src1, const re_node_set *src2) { int i1, i2, id; if (src1 != NULL && src1->nelem > 0 && src2 != NULL && src2->nelem > 0) { dest->alloc = src1->nelem + src2->nelem; dest->elems = re_malloc (int, dest->alloc); if (BE (dest->elems == NULL, 0)) return REG_ESPACE; } else { if (src1 != NULL && src1->nelem > 0) return re_node_set_init_copy (dest, src1); else if (src2 != NULL && src2->nelem > 0) return re_node_set_init_copy (dest, src2); else re_node_set_init_empty (dest); return REG_NOERROR; } for (i1 = i2 = id = 0 ; i1 < src1->nelem && i2 < src2->nelem ;) { if (src1->elems[i1] > src2->elems[i2]) { dest->elems[id++] = src2->elems[i2++]; continue; } if (src1->elems[i1] == src2->elems[i2]) ++i2; dest->elems[id++] = src1->elems[i1++]; } if (i1 < src1->nelem) { memcpy (dest->elems + id, src1->elems + i1, (src1->nelem - i1) * sizeof (int)); id += src1->nelem - i1; } else if (i2 < src2->nelem) { memcpy (dest->elems + id, src2->elems + i2, (src2->nelem - i2) * sizeof (int)); id += src2->nelem - i2; } dest->nelem = id; return REG_NOERROR; } /* Calculate the union set of the sets DEST and SRC. And store it to DEST. Return value indicate the error code or REG_NOERROR if succeeded. */ static reg_errcode_t internal_function re_node_set_merge (re_node_set *dest, const re_node_set *src) { int is, id, sbase, delta; if (src == NULL || src->nelem == 0) return REG_NOERROR; if (dest->alloc < 2 * src->nelem + dest->nelem) { int new_alloc = 2 * (src->nelem + dest->alloc); int *new_buffer = re_realloc (dest->elems, int, new_alloc); if (BE (new_buffer == NULL, 0)) return REG_ESPACE; dest->elems = new_buffer; dest->alloc = new_alloc; } if (BE (dest->nelem == 0, 0)) { dest->nelem = src->nelem; memcpy (dest->elems, src->elems, src->nelem * sizeof (int)); return REG_NOERROR; } /* Copy into the top of DEST the items of SRC that are not found in DEST. Maybe we could binary search in DEST? */ for (sbase = dest->nelem + 2 * src->nelem, is = src->nelem - 1, id = dest->nelem - 1; is >= 0 && id >= 0; ) { if (dest->elems[id] == src->elems[is]) is--, id--; else if (dest->elems[id] < src->elems[is]) dest->elems[--sbase] = src->elems[is--]; else /* if (dest->elems[id] > src->elems[is]) */ --id; } if (is >= 0) { /* If DEST is exhausted, the remaining items of SRC must be unique. */ sbase -= is + 1; memcpy (dest->elems + sbase, src->elems, (is + 1) * sizeof (int)); } id = dest->nelem - 1; is = dest->nelem + 2 * src->nelem - 1; delta = is - sbase + 1; if (delta == 0) return REG_NOERROR; /* Now copy. When DELTA becomes zero, the remaining DEST elements are already in place. */ dest->nelem += delta; for (;;) { if (dest->elems[is] > dest->elems[id]) { /* Copy from the top. */ dest->elems[id + delta--] = dest->elems[is--]; if (delta == 0) break; } else { /* Slide from the bottom. */ dest->elems[id + delta] = dest->elems[id]; if (--id < 0) { /* Copy remaining SRC elements. */ memcpy (dest->elems, dest->elems + sbase, delta * sizeof (int)); break; } } } return REG_NOERROR; } /* Insert the new element ELEM to the re_node_set* SET. SET should not already have ELEM. return -1 if an error has occurred, return 1 otherwise. */ static int internal_function re_node_set_insert (re_node_set *set, int elem) { int idx; /* In case the set is empty. */ if (set->alloc == 0) { if (BE (re_node_set_init_1 (set, elem) == REG_NOERROR, 1)) return 1; else return -1; } if (BE (set->nelem, 0) == 0) { /* We already guaranteed above that set->alloc != 0. */ set->elems[0] = elem; ++set->nelem; return 1; } /* Realloc if we need. */ if (set->alloc == set->nelem) { int *new_elems; set->alloc = set->alloc * 2; new_elems = re_realloc (set->elems, int, set->alloc); if (BE (new_elems == NULL, 0)) return -1; set->elems = new_elems; } /* Move the elements which follows the new element. Test the first element separately to skip a check in the inner loop. */ if (elem < set->elems[0]) { idx = 0; for (idx = set->nelem; idx > 0; idx--) set->elems[idx] = set->elems[idx - 1]; } else { for (idx = set->nelem; set->elems[idx - 1] > elem; idx--) set->elems[idx] = set->elems[idx - 1]; } /* Insert the new element. */ set->elems[idx] = elem; ++set->nelem; return 1; } /* Insert the new element ELEM to the re_node_set* SET. SET should not already have any element greater than or equal to ELEM. Return -1 if an error has occurred, return 1 otherwise. */ static int internal_function re_node_set_insert_last (re_node_set *set, int elem) { /* Realloc if we need. */ if (set->alloc == set->nelem) { int *new_elems; set->alloc = (set->alloc + 1) * 2; new_elems = re_realloc (set->elems, int, set->alloc); if (BE (new_elems == NULL, 0)) return -1; set->elems = new_elems; } /* Insert the new element. */ set->elems[set->nelem++] = elem; return 1; } /* Compare two node sets SET1 and SET2. return 1 if SET1 and SET2 are equivalent, return 0 otherwise. */ static int internal_function __attribute ((pure)) re_node_set_compare (const re_node_set *set1, const re_node_set *set2) { int i; if (set1 == NULL || set2 == NULL || set1->nelem != set2->nelem) return 0; for (i = set1->nelem ; --i >= 0 ; ) if (set1->elems[i] != set2->elems[i]) return 0; return 1; } /* Return (idx + 1) if SET contains the element ELEM, return 0 otherwise. */ static int internal_function __attribute ((pure)) re_node_set_contains (const re_node_set *set, int elem) { unsigned int idx, right, mid; if (set->nelem <= 0) return 0; /* Binary search the element. */ idx = 0; right = set->nelem - 1; while (idx < right) { mid = (idx + right) / 2; if (set->elems[mid] < elem) idx = mid + 1; else right = mid; } return set->elems[idx] == elem ? idx + 1 : 0; } static void internal_function re_node_set_remove_at (re_node_set *set, int idx) { if (idx < 0 || idx >= set->nelem) return; --set->nelem; for (; idx < set->nelem; idx++) set->elems[idx] = set->elems[idx + 1]; } /* Add the token TOKEN to dfa->nodes, and return the index of the token. Or return -1, if an error has occurred. */ static int internal_function re_dfa_add_node (re_dfa_t *dfa, re_token_t token) { if (BE (dfa->nodes_len >= dfa->nodes_alloc, 0)) { size_t new_nodes_alloc = dfa->nodes_alloc * 2; int *new_nexts, *new_indices; re_node_set *new_edests, *new_eclosures; re_token_t *new_nodes; /* Avoid overflows in realloc. */ const size_t max_object_size = MAX (sizeof (re_token_t), MAX (sizeof (re_node_set), sizeof (int))); if (BE (SIZE_MAX / max_object_size < new_nodes_alloc, 0)) return -1; new_nodes = re_realloc (dfa->nodes, re_token_t, new_nodes_alloc); if (BE (new_nodes == NULL, 0)) return -1; dfa->nodes = new_nodes; new_nexts = re_realloc (dfa->nexts, int, new_nodes_alloc); new_indices = re_realloc (dfa->org_indices, int, new_nodes_alloc); new_edests = re_realloc (dfa->edests, re_node_set, new_nodes_alloc); new_eclosures = re_realloc (dfa->eclosures, re_node_set, new_nodes_alloc); if (BE (new_nexts == NULL || new_indices == NULL || new_edests == NULL || new_eclosures == NULL, 0)) return -1; dfa->nexts = new_nexts; dfa->org_indices = new_indices; dfa->edests = new_edests; dfa->eclosures = new_eclosures; dfa->nodes_alloc = new_nodes_alloc; } dfa->nodes[dfa->nodes_len] = token; dfa->nodes[dfa->nodes_len].constraint = 0; #ifdef RE_ENABLE_I18N dfa->nodes[dfa->nodes_len].accept_mb = (token.type == OP_PERIOD && dfa->mb_cur_max > 1) || token.type == COMPLEX_BRACKET; #endif dfa->nexts[dfa->nodes_len] = -1; re_node_set_init_empty (dfa->edests + dfa->nodes_len); re_node_set_init_empty (dfa->eclosures + dfa->nodes_len); return dfa->nodes_len++; } static inline unsigned int internal_function calc_state_hash (const re_node_set *nodes, unsigned int context) { unsigned int hash = nodes->nelem + context; int i; for (i = 0 ; i < nodes->nelem ; i++) hash += nodes->elems[i]; return hash; } /* Search for the state whose node_set is equivalent to NODES. Return the pointer to the state, if we found it in the DFA. Otherwise create the new one and return it. In case of an error return NULL and set the error code in ERR. Note: - We assume NULL as the invalid state, then it is possible that return value is NULL and ERR is REG_NOERROR. - We never return non-NULL value in case of any errors, it is for optimization. */ static re_dfastate_t * internal_function re_acquire_state (reg_errcode_t *err, const re_dfa_t *dfa, const re_node_set *nodes) { unsigned int hash; re_dfastate_t *new_state; struct re_state_table_entry *spot; int i; if (BE (nodes->nelem == 0, 0)) { *err = REG_NOERROR; return NULL; } hash = calc_state_hash (nodes, 0); spot = dfa->state_table + (hash & dfa->state_hash_mask); for (i = 0 ; i < spot->num ; i++) { re_dfastate_t *state = spot->array[i]; if (hash != state->hash) continue; if (re_node_set_compare (&state->nodes, nodes)) return state; } /* There are no appropriate state in the dfa, create the new one. */ new_state = create_ci_newstate (dfa, nodes, hash); if (BE (new_state == NULL, 0)) *err = REG_ESPACE; return new_state; } /* Search for the state whose node_set is equivalent to NODES and whose context is equivalent to CONTEXT. Return the pointer to the state, if we found it in the DFA. Otherwise create the new one and return it. In case of an error return NULL and set the error code in ERR. Note: - We assume NULL as the invalid state, then it is possible that return value is NULL and ERR is REG_NOERROR. - We never return non-NULL value in case of any errors, it is for optimization. */ static re_dfastate_t * internal_function re_acquire_state_context (reg_errcode_t *err, const re_dfa_t *dfa, const re_node_set *nodes, unsigned int context) { unsigned int hash; re_dfastate_t *new_state; struct re_state_table_entry *spot; int i; if (nodes->nelem == 0) { *err = REG_NOERROR; return NULL; } hash = calc_state_hash (nodes, context); spot = dfa->state_table + (hash & dfa->state_hash_mask); for (i = 0 ; i < spot->num ; i++) { re_dfastate_t *state = spot->array[i]; if (state->hash == hash && state->context == context && re_node_set_compare (state->entrance_nodes, nodes)) return state; } /* There are no appropriate state in `dfa', create the new one. */ new_state = create_cd_newstate (dfa, nodes, context, hash); if (BE (new_state == NULL, 0)) *err = REG_ESPACE; return new_state; } /* Finish initialization of the new state NEWSTATE, and using its hash value HASH put in the appropriate bucket of DFA's state table. Return value indicates the error code if failed. */ static reg_errcode_t register_state (const re_dfa_t *dfa, re_dfastate_t *newstate, unsigned int hash) { struct re_state_table_entry *spot; reg_errcode_t err; int i; newstate->hash = hash; err = re_node_set_alloc (&newstate->non_eps_nodes, newstate->nodes.nelem); if (BE (err != REG_NOERROR, 0)) return REG_ESPACE; for (i = 0; i < newstate->nodes.nelem; i++) { int elem = newstate->nodes.elems[i]; if (!IS_EPSILON_NODE (dfa->nodes[elem].type)) if (re_node_set_insert_last (&newstate->non_eps_nodes, elem) < 0) return REG_ESPACE; } spot = dfa->state_table + (hash & dfa->state_hash_mask); if (BE (spot->alloc <= spot->num, 0)) { int new_alloc = 2 * spot->num + 2; re_dfastate_t **new_array = re_realloc (spot->array, re_dfastate_t *, new_alloc); if (BE (new_array == NULL, 0)) return REG_ESPACE; spot->array = new_array; spot->alloc = new_alloc; } spot->array[spot->num++] = newstate; return REG_NOERROR; } static void free_state (re_dfastate_t *state) { re_node_set_free (&state->non_eps_nodes); re_node_set_free (&state->inveclosure); if (state->entrance_nodes != &state->nodes) { re_node_set_free (state->entrance_nodes); re_free (state->entrance_nodes); } re_node_set_free (&state->nodes); re_free (state->word_trtable); re_free (state->trtable); re_free (state); } /* Create the new state which is independ of contexts. Return the new state if succeeded, otherwise return NULL. */ static re_dfastate_t * internal_function create_ci_newstate (const re_dfa_t *dfa, const re_node_set *nodes, unsigned int hash) { int i; reg_errcode_t err; re_dfastate_t *newstate; newstate = (re_dfastate_t *) calloc (sizeof (re_dfastate_t), 1); if (BE (newstate == NULL, 0)) return NULL; err = re_node_set_init_copy (&newstate->nodes, nodes); if (BE (err != REG_NOERROR, 0)) { re_free (newstate); return NULL; } newstate->entrance_nodes = &newstate->nodes; for (i = 0 ; i < nodes->nelem ; i++) { re_token_t *node = dfa->nodes + nodes->elems[i]; re_token_type_t type = node->type; if (type == CHARACTER && !node->constraint) continue; #ifdef RE_ENABLE_I18N newstate->accept_mb |= node->accept_mb; #endif /* RE_ENABLE_I18N */ /* If the state has the halt node, the state is a halt state. */ if (type == END_OF_RE) newstate->halt = 1; else if (type == OP_BACK_REF) newstate->has_backref = 1; else if (type == ANCHOR || node->constraint) newstate->has_constraint = 1; } err = register_state (dfa, newstate, hash); if (BE (err != REG_NOERROR, 0)) { free_state (newstate); newstate = NULL; } return newstate; } /* Create the new state which is depend on the context CONTEXT. Return the new state if succeeded, otherwise return NULL. */ static re_dfastate_t * internal_function create_cd_newstate (const re_dfa_t *dfa, const re_node_set *nodes, unsigned int context, unsigned int hash) { int i, nctx_nodes = 0; reg_errcode_t err; re_dfastate_t *newstate; newstate = (re_dfastate_t *) calloc (sizeof (re_dfastate_t), 1); if (BE (newstate == NULL, 0)) return NULL; err = re_node_set_init_copy (&newstate->nodes, nodes); if (BE (err != REG_NOERROR, 0)) { re_free (newstate); return NULL; } newstate->context = context; newstate->entrance_nodes = &newstate->nodes; for (i = 0 ; i < nodes->nelem ; i++) { re_token_t *node = dfa->nodes + nodes->elems[i]; re_token_type_t type = node->type; unsigned int constraint = node->constraint; if (type == CHARACTER && !constraint) continue; #ifdef RE_ENABLE_I18N newstate->accept_mb |= node->accept_mb; #endif /* RE_ENABLE_I18N */ /* If the state has the halt node, the state is a halt state. */ if (type == END_OF_RE) newstate->halt = 1; else if (type == OP_BACK_REF) newstate->has_backref = 1; if (constraint) { if (newstate->entrance_nodes == &newstate->nodes) { newstate->entrance_nodes = re_malloc (re_node_set, 1); if (BE (newstate->entrance_nodes == NULL, 0)) { free_state (newstate); return NULL; } if (re_node_set_init_copy (newstate->entrance_nodes, nodes) != REG_NOERROR) return NULL; nctx_nodes = 0; newstate->has_constraint = 1; } if (NOT_SATISFY_PREV_CONSTRAINT (constraint,context)) { re_node_set_remove_at (&newstate->nodes, i - nctx_nodes); ++nctx_nodes; } } } err = register_state (dfa, newstate, hash); if (BE (err != REG_NOERROR, 0)) { free_state (newstate); newstate = NULL; } return newstate; }
gpl-2.0
jblorenzo/mptcp-nexus5
arch/arm/mach-msm/qdsp5v2/audio_amrnb.c
343
43252
/* * amrnb audio decoder device * * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5/audio_mp3.c * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/earlysuspend.h> #include <linux/memory_alloc.h> #include <linux/msm_audio.h> #include <linux/slab.h> #include <mach/msm_adsp.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/qdsp5v2/qdsp5audppmsg.h> #include <mach/qdsp5v2/qdsp5audplaycmdi.h> #include <mach/qdsp5v2/qdsp5audplaymsg.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/qdsp5v2/audpp.h> #include <mach/debug_mm.h> #include <mach/msm_memtypes.h> #define BUFSZ 1024 /* Hold minimum 700ms voice data and 14 bytes of meta in*/ #define DMASZ (BUFSZ * 2) #define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF #define AUDDEC_DEC_AMRNB 10 #define PCM_BUFSZ_MIN 1624 /* 100ms worth of data and 24 bytes of meta out*/ #define AMRNB_DECODED_FRSZ 320 /* AMR-NB 20ms 8KHz mono PCM size */ #define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most but support 2 buffers currently */ #define ROUTING_MODE_FTRT 1 #define ROUTING_MODE_RT 2 /* Decoder status received from AUDPPTASK */ #define AUDPP_DEC_STATUS_SLEEP 0 #define AUDPP_DEC_STATUS_INIT 1 #define AUDPP_DEC_STATUS_CFG 2 #define AUDPP_DEC_STATUS_PLAY 3 #define AUDAMRNB_METAFIELD_MASK 0xFFFF0000 #define AUDAMRNB_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */ #define AUDAMRNB_EOS_FLG_MASK 0x01 #define AUDAMRNB_EOS_NONE 0x0 /* No EOS detected */ #define AUDAMRNB_EOS_SET 0x1 /* EOS set in meta field */ #define AUDAMRNB_EVENT_NUM 10 /* Default number of pre-allocated event pkts */ struct buffer { void *data; unsigned size; unsigned used; /* Input usage actual DSP produced PCM size */ unsigned addr; unsigned short mfield_sz; /*only useful for data has meta field */ }; #ifdef CONFIG_HAS_EARLYSUSPEND struct audamrnb_suspend_ctl { struct early_suspend node; struct audio *audio; }; #endif struct audamrnb_event{ struct list_head list; int event_type; union msm_audio_event_payload payload; }; struct audio { struct buffer out[2]; spinlock_t dsp_lock; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ atomic_t out_bytes; struct mutex lock; struct mutex write_lock; wait_queue_head_t write_wait; /* Host PCM section */ struct buffer in[PCM_BUF_MAX_COUNT]; struct mutex read_lock; wait_queue_head_t read_wait; /* Wait queue for read */ char *read_data; /* pointer to reader buffer */ int32_t read_phys; /* physical address of reader buffer */ uint8_t read_next; /* index to input buffers to be read next */ uint8_t fill_next; /* index to buffer that DSP should be filling */ uint8_t pcm_buf_count; /* number of pcm buffer allocated */ /* ---- End of Host PCM section */ struct msm_adsp_module *audplay; /* data allocated for various buffers */ char *data; int32_t phys; /* physical address of write buffer */ void *map_v_read; void *map_v_write; int mfield; /* meta field embedded in data */ int rflush; /* Read flush */ int wflush; /* Write flush */ uint8_t opened:1; uint8_t enabled:1; uint8_t running:1; uint8_t stopped:1; /* set when stopped, cleared on flush */ uint8_t pcm_feedback:1; uint8_t buf_refresh:1; int teos; /* valid only if tunnel mode & no data left for decoder */ enum msm_aud_decoder_state dec_state; /* Represents decoder state */ const char *module_name; unsigned queue_id; uint16_t dec_id; uint32_t read_ptr_offset; int16_t source; #ifdef CONFIG_HAS_EARLYSUSPEND struct audamrnb_suspend_ctl suspend_ctl; #endif #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif wait_queue_head_t wait; struct list_head free_event_queue; struct list_head event_queue; wait_queue_head_t event_wait; spinlock_t event_queue_lock; struct mutex get_event_lock; int event_abort; /* AV sync Info */ int avsync_flag; /* Flag to indicate feedback from DSP */ wait_queue_head_t avsync_wait;/* Wait queue for AV Sync Message */ /* flags, 48 bits sample/bytes counter per channel */ uint16_t avsync[AUDPP_AVSYNC_CH_COUNT * AUDPP_AVSYNC_NUM_WORDS + 1]; uint32_t device_events; int eq_enable; int eq_needs_commit; struct audpp_cmd_cfg_object_params_eqalizer eq; struct audpp_cmd_cfg_object_params_volume vol_pan; }; struct audpp_cmd_cfg_adec_params_amrnb { struct audpp_cmd_cfg_adec_params_common common; unsigned short stereo_cfg; } __attribute__((packed)) ; static int auddec_dsp_config(struct audio *audio, int enable); static void audpp_cmd_cfg_adec_params(struct audio *audio); static void audpp_cmd_cfg_routing_mode(struct audio *audio); static void audamrnb_send_data(struct audio *audio, unsigned needed); static void audamrnb_config_hostpcm(struct audio *audio); static void audamrnb_buffer_refresh(struct audio *audio); static void audamrnb_dsp_event(void *private, unsigned id, uint16_t *msg); #ifdef CONFIG_HAS_EARLYSUSPEND static void audamrnb_post_event(struct audio *audio, int type, union msm_audio_event_payload payload); #endif /* must be called with audio->lock held */ static int audamrnb_enable(struct audio *audio) { MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) return 0; audio->dec_state = MSM_AUD_DECODER_STATE_NONE; audio->out_tail = 0; audio->out_needed = 0; if (msm_adsp_enable(audio->audplay)) { MM_ERR("msm_adsp_enable(audplay) failed\n"); return -ENODEV; } if (audpp_enable(audio->dec_id, audamrnb_dsp_event, audio)) { MM_ERR("audpp_enable() failed\n"); msm_adsp_disable(audio->audplay); return -ENODEV; } audio->enabled = 1; return 0; } static void amrnb_listner(u32 evt_id, union auddev_evt_data *evt_payload, void *private_data) { struct audio *audio = (struct audio *) private_data; switch (evt_id) { case AUDDEV_EVT_DEV_RDY: MM_DBG(":AUDDEV_EVT_DEV_RDY\n"); audio->source |= (0x1 << evt_payload->routing_id); if (audio->running == 1 && audio->enabled == 1) audpp_route_stream(audio->dec_id, audio->source); break; case AUDDEV_EVT_DEV_RLS: MM_DBG(":AUDDEV_EVT_DEV_RLS\n"); audio->source &= ~(0x1 << evt_payload->routing_id); if (audio->running == 1 && audio->enabled == 1) audpp_route_stream(audio->dec_id, audio->source); break; case AUDDEV_EVT_STREAM_VOL_CHG: audio->vol_pan.volume = evt_payload->session_vol; MM_DBG(":AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d\n", audio->vol_pan.volume); if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan, POPP); break; default: MM_ERR(":ERROR:wrong event\n"); break; } } /* must be called with audio->lock held */ static int audamrnb_disable(struct audio *audio) { int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) { audio->enabled = 0; audio->dec_state = MSM_AUD_DECODER_STATE_NONE; auddec_dsp_config(audio, 0); rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); if (rc == 0) rc = -ETIMEDOUT; else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE) rc = -EFAULT; else rc = 0; wake_up(&audio->write_wait); wake_up(&audio->read_wait); msm_adsp_disable(audio->audplay); audpp_disable(audio->dec_id, audio); audio->out_needed = 0; } return rc; } /* ------------------- dsp --------------------- */ static void audamrnb_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) { uint8_t index; unsigned long flags; if (audio->rflush) return; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < payload[1]; index++) { if (audio->in[audio->fill_next].addr == payload[2 + index * 2]) { MM_DBG("in[%d] ready\n", audio->fill_next); audio->in[audio->fill_next].used = payload[3 + index * 2]; if ((++audio->fill_next) == audio->pcm_buf_count) audio->fill_next = 0; } else { MM_ERR("expected=%x ret=%x\n", audio->in[audio->fill_next].addr, payload[1 + index * 2]); break; } } if (audio->in[audio->fill_next].used == 0) { audamrnb_buffer_refresh(audio); } else { MM_DBG("read cannot keep up\n"); audio->buf_refresh = 1; } wake_up(&audio->read_wait); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audplay_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)) { struct audio *audio = data; uint32_t msg[28]; getevent(msg, sizeof(msg)); MM_DBG("msg_id=%x\n", id); switch (id) { case AUDPLAY_MSG_DEC_NEEDS_DATA: audamrnb_send_data(audio, 1); break; case AUDPLAY_MSG_BUFFER_UPDATE: audamrnb_update_pcm_buf_entry(audio, msg); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audplaytask)\n"); break; default: MM_ERR("unexpected message from decoder\n"); } } static void audamrnb_dsp_event(void *private, unsigned id, uint16_t *msg) { struct audio *audio = private; switch (id) { case AUDPP_MSG_STATUS_MSG:{ unsigned status = msg[1]; switch (status) { case AUDPP_DEC_STATUS_SLEEP: { uint16_t reason = msg[2]; MM_DBG("decoder status:sleep reason = \ 0x%04x\n", reason); if ((reason == AUDPP_MSG_REASON_MEM) || (reason == AUDPP_MSG_REASON_NODECODER)) { audio->dec_state = MSM_AUD_DECODER_STATE_FAILURE; wake_up(&audio->wait); } else if (reason == AUDPP_MSG_REASON_NONE) { /* decoder is in disable state */ audio->dec_state = MSM_AUD_DECODER_STATE_CLOSE; wake_up(&audio->wait); } break; } case AUDPP_DEC_STATUS_INIT: MM_DBG("decoder status: init \n"); if (audio->pcm_feedback) audpp_cmd_cfg_routing_mode(audio); else audpp_cmd_cfg_adec_params(audio); break; case AUDPP_DEC_STATUS_CFG: MM_DBG("decoder status: cfg \n"); break; case AUDPP_DEC_STATUS_PLAY: MM_DBG("decoder status: play \n"); /* send mixer command */ audpp_route_stream(audio->dec_id, audio->source); if (audio->pcm_feedback) { audamrnb_config_hostpcm(audio); audamrnb_buffer_refresh(audio); } audio->dec_state = MSM_AUD_DECODER_STATE_SUCCESS; wake_up(&audio->wait); break; default: MM_ERR("unknown decoder status \n"); break; } break; } case AUDPP_MSG_CFG_MSG: if (msg[0] == AUDPP_MSG_ENA_ENA) { MM_DBG("CFG_MSG ENABLE\n"); auddec_dsp_config(audio, 1); audio->out_needed = 0; audio->running = 1; audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan, POPP); audpp_dsp_set_eq(audio->dec_id, audio->eq_enable, &audio->eq, POPP); } else if (msg[0] == AUDPP_MSG_ENA_DIS) { MM_DBG("CFG_MSG DISABLE\n"); audio->running = 0; } else { MM_DBG("CFG_MSG %d?\n", msg[0]); } break; case AUDPP_MSG_ROUTING_ACK: MM_DBG("ROUTING_ACK mode=%d\n", msg[1]); audpp_cmd_cfg_adec_params(audio); break; case AUDPP_MSG_FLUSH_ACK: MM_DBG("FLUSH_ACK\n"); audio->wflush = 0; audio->rflush = 0; wake_up(&audio->write_wait); if (audio->pcm_feedback) audamrnb_buffer_refresh(audio); break; case AUDPP_MSG_PCMDMAMISSED: MM_DBG("PCMDMAMISSED\n"); audio->teos = 1; wake_up(&audio->write_wait); break; case AUDPP_MSG_AVSYNC_MSG: MM_DBG("AUDPP_MSG_AVSYNC_MSG\n"); memcpy(&audio->avsync[0], msg, sizeof(audio->avsync)); audio->avsync_flag = 1; wake_up(&audio->avsync_wait); break; default: MM_ERR("UNKNOWN (%d)\n", id); } } struct msm_adsp_ops audplay_adsp_ops_amrnb = { .event = audplay_dsp_event, }; #define audplay_send_queue0(audio, cmd, len) \ msm_adsp_write(audio->audplay, audio->queue_id, \ cmd, len) static int auddec_dsp_config(struct audio *audio, int enable) { struct audpp_cmd_cfg_dec_type cfg_dec_cmd; memset(&cfg_dec_cmd, 0, sizeof(cfg_dec_cmd)); cfg_dec_cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; if (enable) cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_AMRNB; else cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; cfg_dec_cmd.dm_mode = 0x0; cfg_dec_cmd.stream_id = audio->dec_id; return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd)); } static void audpp_cmd_cfg_adec_params(struct audio *audio) { struct audpp_cmd_cfg_adec_params_amrnb cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN; cmd.common.dec_id = audio->dec_id; cmd.common.input_sampling_frequency = 8000; cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V; audpp_send_queue2(&cmd, sizeof(cmd)); } static void audpp_cmd_cfg_routing_mode(struct audio *audio) { struct audpp_cmd_routing_mode cmd; MM_DBG("\n"); /* Macro prints the file name and function */ memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; cmd.object_number = audio->dec_id; if (audio->pcm_feedback) cmd.routing_mode = ROUTING_MODE_FTRT; else cmd.routing_mode = ROUTING_MODE_RT; audpp_send_queue1(&cmd, sizeof(cmd)); } static int audplay_dsp_send_data_avail(struct audio *audio, unsigned idx, unsigned len) { struct audplay_cmd_bitstream_data_avail_nt2 cmd; cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2; if (audio->mfield) cmd.decoder_id = AUDAMRNB_METAFIELD_MASK | (audio->out[idx].mfield_sz >> 1); else cmd.decoder_id = audio->dec_id; cmd.buf_ptr = audio->out[idx].addr; cmd.buf_size = len / 2; cmd.partition_number = 0; return audplay_send_queue0(audio, &cmd, sizeof(cmd)); } static void audamrnb_buffer_refresh(struct audio *audio) { struct audplay_cmd_buffer_refresh refresh_cmd; refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; refresh_cmd.num_buffers = 1; refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; refresh_cmd.buf0_length = audio->in[audio->fill_next].size - (audio->in[audio->fill_next].size % AMRNB_DECODED_FRSZ) + (audio->mfield ? 24 : 0); refresh_cmd.buf_read_count = 0; MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address, refresh_cmd.buf0_length); (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); } static void audamrnb_config_hostpcm(struct audio *audio) { struct audplay_cmd_hpcm_buf_cfg cfg_cmd; MM_DBG("\n"); /* Macro prints the file name and function */ cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; cfg_cmd.max_buffers = audio->pcm_buf_count; cfg_cmd.byte_swap = 0; cfg_cmd.hostpcm_config = (0x8000) | (0x4000); cfg_cmd.feedback_frequency = 1; cfg_cmd.partition_number = 0; (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); } static void audamrnb_send_data(struct audio *audio, unsigned needed) { struct buffer *frame; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); MM_DBG("frame %d busy\n", audio->out_tail); audplay_dsp_send_data_avail(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } /* ------------------- device --------------------- */ static void audamrnb_flush(struct audio *audio) { audio->out[0].used = 0; audio->out[1].used = 0; audio->out_head = 0; audio->out_tail = 0; audio->out_needed = 0; atomic_set(&audio->out_bytes, 0); } static void audamrnb_flush_pcm_buf(struct audio *audio) { uint8_t index; for (index = 0; index < PCM_BUF_MAX_COUNT; index++) audio->in[index].used = 0; audio->buf_refresh = 0; audio->read_next = 0; audio->fill_next = 0; } static void audamrnb_ioport_reset(struct audio *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audamrnb_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->read_wait); mutex_lock(&audio->read_lock); audamrnb_flush_pcm_buf(audio); mutex_unlock(&audio->read_lock); audio->avsync_flag = 1; wake_up(&audio->avsync_wait); } static int audamrnb_events_pending(struct audio *audio) { unsigned long flags; int empty; spin_lock_irqsave(&audio->event_queue_lock, flags); empty = !list_empty(&audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return empty || audio->event_abort; } static void audamrnb_reset_event_queue(struct audio *audio) { unsigned long flags; struct audamrnb_event *drv_evt; struct list_head *ptr, *next; spin_lock_irqsave(&audio->event_queue_lock, flags); list_for_each_safe(ptr, next, &audio->event_queue) { drv_evt = list_first_entry(&audio->event_queue, struct audamrnb_event, list); list_del(&drv_evt->list); kfree(drv_evt); } list_for_each_safe(ptr, next, &audio->free_event_queue) { drv_evt = list_first_entry(&audio->free_event_queue, struct audamrnb_event, list); list_del(&drv_evt->list); kfree(drv_evt); } spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } static long audamrnb_process_event_req(struct audio *audio, void __user *arg) { long rc; struct msm_audio_event usr_evt; struct audamrnb_event *drv_evt = NULL; int timeout; unsigned long flags; if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) return -EFAULT; timeout = (int) usr_evt.timeout_ms; if (timeout > 0) { rc = wait_event_interruptible_timeout( audio->event_wait, audamrnb_events_pending(audio), msecs_to_jiffies(timeout)); if (rc == 0) return -ETIMEDOUT; } else { rc = wait_event_interruptible( audio->event_wait, audamrnb_events_pending(audio)); } if (rc < 0) return rc; if (audio->event_abort) { audio->event_abort = 0; return -ENODEV; } rc = 0; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->event_queue)) { drv_evt = list_first_entry(&audio->event_queue, struct audamrnb_event, list); list_del(&drv_evt->list); } if (drv_evt) { usr_evt.event_type = drv_evt->event_type; usr_evt.event_payload = drv_evt->payload; list_add_tail(&drv_evt->list, &audio->free_event_queue); } else rc = -1; spin_unlock_irqrestore(&audio->event_queue_lock, flags); if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) rc = -EFAULT; return rc; } static int audio_enable_eq(struct audio *audio, int enable) { if (audio->eq_enable == enable && !audio->eq_needs_commit) return 0; audio->eq_enable = enable; if (audio->running) { audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq, POPP); audio->eq_needs_commit = 0; } return 0; } static int audio_get_avsync_data(struct audio *audio, struct msm_audio_stats *stats) { int rc = -EINVAL; unsigned long flags; local_irq_save(flags); if (audio->dec_id == audio->avsync[0] && audio->avsync_flag) { /* av_sync sample count */ stats->sample_count = (audio->avsync[2] << 16) | (audio->avsync[3]); /* av_sync byte_count */ stats->byte_count = (audio->avsync[5] << 16) | (audio->avsync[6]); audio->avsync_flag = 0; rc = 0; } local_irq_restore(flags); return rc; } static long audamrnb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio *audio = file->private_data; int rc = -EINVAL; unsigned long flags = 0; uint16_t enable_mask; int enable; int prev_state; MM_DBG("cmd = %d\n", cmd); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; audio->avsync_flag = 0; memset(&stats, 0, sizeof(stats)); if (audpp_query_avsync(audio->dec_id) < 0) return rc; rc = wait_event_interruptible_timeout(audio->avsync_wait, (audio->avsync_flag == 1), msecs_to_jiffies(AUDPP_AVSYNC_EVENT_TIMEOUT)); if (rc < 0) return rc; else if ((rc > 0) || ((rc == 0) && (audio->avsync_flag == 1))) { if (audio_get_avsync_data(audio, &stats) < 0) return rc; if (copy_to_user((void *)arg, &stats, sizeof(stats))) return -EFAULT; return 0; } else return -EAGAIN; } switch (cmd) { case AUDIO_ENABLE_AUDPP: if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); enable = (enable_mask & EQ_ENABLE) ? 1 : 0; audio_enable_eq(audio, enable); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_VOLUME: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.volume = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan, POPP); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_PAN: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.pan = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan, POPP); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_EQ: prev_state = audio->eq_enable; audio->eq_enable = 0; if (copy_from_user(&audio->eq.num_bands, (void *) arg, sizeof(audio->eq) - (AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) { rc = -EFAULT; break; } audio->eq_enable = prev_state; audio->eq_needs_commit = 1; rc = 0; break; } if (-EINVAL != rc) return rc; if (cmd == AUDIO_GET_EVENT) { MM_DBG("AUDIO_GET_EVENT\n"); if (mutex_trylock(&audio->get_event_lock)) { rc = audamrnb_process_event_req(audio, (void __user *) arg); mutex_unlock(&audio->get_event_lock); } else rc = -EBUSY; return rc; } if (cmd == AUDIO_ABORT_GET_EVENT) { audio->event_abort = 1; wake_up(&audio->event_wait); return 0; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: MM_DBG("AUDIO_START\n"); rc = audamrnb_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc); if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS) rc = -ENODEV; else rc = 0; } break; case AUDIO_STOP: MM_DBG("AUDIO_STOP\n"); rc = audamrnb_disable(audio); audio->stopped = 1; audamrnb_ioport_reset(audio); audio->stopped = 0; break; case AUDIO_FLUSH: MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audamrnb_ioport_reset(audio); if (audio->running) { audpp_flush(audio->dec_id); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; case AUDIO_SET_CONFIG:{ struct msm_audio_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } audio->mfield = config.meta_field; rc = 0; break; } case AUDIO_GET_CONFIG:{ struct msm_audio_config config; config.buffer_size = BUFSZ; config.buffer_count = 2; config.sample_rate = 8000; config.channel_count = 1; config.meta_field = 0; config.unused[0] = 0; config.unused[1] = 0; config.unused[2] = 0; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_GET_PCM_CONFIG:{ struct msm_audio_pcm_config config; config.pcm_feedback = audio->pcm_feedback; config.buffer_count = PCM_BUF_MAX_COUNT; config.buffer_size = PCM_BUFSZ_MIN; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_PCM_CONFIG:{ struct msm_audio_pcm_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } if (config.pcm_feedback != audio->pcm_feedback) { MM_ERR("Not sufficient permission to" "change the playback mode\n"); rc = -EACCES; break; } if ((config.buffer_count > PCM_BUF_MAX_COUNT) || (config.buffer_count == 1)) config.buffer_count = PCM_BUF_MAX_COUNT; if (config.buffer_size < PCM_BUFSZ_MIN) config.buffer_size = PCM_BUFSZ_MIN; /* Check if pcm feedback is required */ if ((config.pcm_feedback) && (!audio->read_data)) { MM_DBG("allocate PCM buf %d\n", config.buffer_count * config.buffer_size); audio->read_phys = allocate_contiguous_ebi_nomap( config.buffer_size * config.buffer_count, SZ_4K); if (!audio->read_phys) { rc = -ENOMEM; break; } audio->map_v_read = ioremap( audio->read_phys, config.buffer_size * config.buffer_count); if (IS_ERR(audio->map_v_read)) { MM_ERR("failed to map read phys address\n"); rc = -ENOMEM; free_contiguous_memory_by_paddr( audio->read_phys); } else { uint8_t index; uint32_t offset = 0; audio->read_data = audio->map_v_read; audio->buf_refresh = 0; audio->pcm_buf_count = config.buffer_count; audio->read_next = 0; audio->fill_next = 0; for (index = 0; index < config.buffer_count; index++) { audio->in[index].data = audio->read_data + offset; audio->in[index].addr = audio->read_phys + offset; audio->in[index].size = config.buffer_size; audio->in[index].used = 0; offset += config.buffer_size; } MM_DBG("read buf: phy addr 0x%08x kernel \ addr 0x%08x\n", audio->read_phys, (int)audio->read_data); rc = 0; } } else { rc = 0; } break; } case AUDIO_GET_SESSION_ID: if (copy_to_user((void *) arg, &audio->dec_id, sizeof(unsigned short))) rc = -EFAULT; else rc = 0; break; default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } /* Only useful in tunnel-mode */ static int audamrnb_fsync(struct file *file, loff_t ppos1, loff_t ppos2, int datasync) { struct audio *audio = file->private_data; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || audio->pcm_feedback) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ audio->teos = 0; rc = wait_event_interruptible(audio->write_wait, audio->teos || audio->wflush); if (audio->wflush) rc = -EBUSY; done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } static ssize_t audamrnb_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; int rc = 0; if (!audio->pcm_feedback) return 0; /* PCM feedback is not enabled. Nothing to read */ mutex_lock(&audio->read_lock); MM_DBG("%d \n", count); while (count > 0) { rc = wait_event_interruptible(audio->read_wait, (audio->in[audio->read_next].used > 0) || (audio->stopped) || (audio->rflush)); if (rc < 0) break; if (audio->stopped || audio->rflush) { rc = -EBUSY; break; } if (count < audio->in[audio->read_next].used) { /* Read must happen in frame boundary. Since driver does * not know frame size, read count must be greater or * equal to size of PCM samples */ MM_DBG("read stop - partial frame\n"); break; } else { MM_DBG("read from in[%d]\n", audio->read_next); if (copy_to_user (buf, audio->in[audio->read_next].data, audio->in[audio->read_next].used)) { MM_ERR("invalid addr %x \n", (unsigned int)buf); rc = -EFAULT; break; } count -= audio->in[audio->read_next].used; buf += audio->in[audio->read_next].used; audio->in[audio->read_next].used = 0; if ((++audio->read_next) == audio->pcm_buf_count) audio->read_next = 0; break; } } /* don't feed output buffer to HW decoder during flushing * buffer refresh command will be sent once flush completes * send buf refresh command here can confuse HW decoder */ if (audio->buf_refresh && !audio->rflush) { audio->buf_refresh = 0; MM_DBG("kick start pcm feedback again\n"); audamrnb_buffer_refresh(audio); } mutex_unlock(&audio->read_lock); if (buf > start) rc = buf - start; MM_DBG("read %d bytes\n", rc); return rc; } static int audamrnb_process_eos(struct audio *audio, const char __user *buf_start, unsigned short mfield_size) { int rc = 0; struct buffer *frame; frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; audamrnb_send_data(audio, 0); done: return rc; } static ssize_t audamrnb_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; struct buffer *frame; size_t xfer; char *cpy_ptr; int rc = 0, eos_condition = AUDAMRNB_EOS_NONE; unsigned short mfield_size = 0; MM_DBG("cnt=%d\n", count); if (count & 1) return -EINVAL; mutex_lock(&audio->write_lock); while (count > 0) { frame = audio->out + audio->out_head; cpy_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); MM_DBG("buffer available\n"); if (rc < 0) break; if (audio->stopped || audio->wflush) { rc = -EBUSY; break; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; break; } else if (mfield_size > count) { rc = -EINVAL; break; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer * contains just meta field */ if (cpy_ptr[AUDAMRNB_EOS_FLG_OFFSET] & AUDAMRNB_EOS_FLG_MASK) { MM_DBG("eos set\n"); eos_condition = AUDAMRNB_EOS_SET; if (mfield_size == count) { buf += mfield_size; break; } else cpy_ptr[AUDAMRNB_EOS_FLG_OFFSET] &= ~AUDAMRNB_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } xfer = (count > (frame->size - mfield_size)) ? (frame->size - mfield_size) : count; if (copy_from_user(cpy_ptr, buf, xfer)) { rc = -EFAULT; break; } frame->used = (xfer + mfield_size); audio->out_head ^= 1; count -= xfer; buf += xfer; audamrnb_send_data(audio, 0); } if (eos_condition == AUDAMRNB_EOS_SET) rc = audamrnb_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); if (!rc) { if (buf > start) return buf - start; } return rc; } static int audamrnb_release(struct inode *inode, struct file *file) { struct audio *audio = file->private_data; MM_INFO("audio instance 0x%08x freeing\n", (int)audio); mutex_lock(&audio->lock); auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id); audamrnb_disable(audio); audamrnb_flush(audio); audamrnb_flush_pcm_buf(audio); msm_adsp_put(audio->audplay); audpp_adec_free(audio->dec_id); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&audio->suspend_ctl.node); #endif audio->event_abort = 1; wake_up(&audio->event_wait); audamrnb_reset_event_queue(audio); iounmap(audio->map_v_write); free_contiguous_memory_by_paddr(audio->phys); if (audio->read_data) { iounmap(audio->map_v_read); free_contiguous_memory_by_paddr(audio->read_phys); } mutex_unlock(&audio->lock); #ifdef CONFIG_DEBUG_FS if (audio->dentry) debugfs_remove(audio->dentry); #endif kfree(audio); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void audamrnb_post_event(struct audio *audio, int type, union msm_audio_event_payload payload) { struct audamrnb_event *e_node = NULL; unsigned long flags; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->free_event_queue)) { e_node = list_first_entry(&audio->free_event_queue, struct audamrnb_event, list); list_del(&e_node->list); } else { e_node = kmalloc(sizeof(struct audamrnb_event), GFP_ATOMIC); if (!e_node) { MM_ERR("No mem to post event %d\n", type); return; } } e_node->event_type = type; e_node->payload = payload; list_add_tail(&e_node->list, &audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); wake_up(&audio->event_wait); } static void audamrnb_suspend(struct early_suspend *h) { struct audamrnb_suspend_ctl *ctl = container_of(h, struct audamrnb_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audamrnb_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); } static void audamrnb_resume(struct early_suspend *h) { struct audamrnb_suspend_ctl *ctl = container_of(h, struct audamrnb_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audamrnb_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); } #endif #ifdef CONFIG_DEBUG_FS static ssize_t audamrnb_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audamrnb_debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { const int debug_bufmax = 1024; static char buffer[1024]; int n = 0, i; struct audio *audio = file->private_data; mutex_lock(&audio->lock); n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); n += scnprintf(buffer + n, debug_bufmax - n, "enabled %d\n", audio->enabled); n += scnprintf(buffer + n, debug_bufmax - n, "stopped %d\n", audio->stopped); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_feedback %d\n", audio->pcm_feedback); n += scnprintf(buffer + n, debug_bufmax - n, "out_buf_sz %d\n", audio->out[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_count %d \n", audio->pcm_buf_count); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_sz %d \n", audio->in[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "volume %x \n", audio->vol_pan.volume); mutex_unlock(&audio->lock); /* Following variables are only useful for debugging when * when playback halts unexpectedly. Thus, no mutual exclusion * enforced */ n += scnprintf(buffer + n, debug_bufmax - n, "wflush %d\n", audio->wflush); n += scnprintf(buffer + n, debug_bufmax - n, "rflush %d\n", audio->rflush); n += scnprintf(buffer + n, debug_bufmax - n, "running %d \n", audio->running); n += scnprintf(buffer + n, debug_bufmax - n, "dec state %d \n", audio->dec_state); n += scnprintf(buffer + n, debug_bufmax - n, "out_needed %d \n", audio->out_needed); n += scnprintf(buffer + n, debug_bufmax - n, "out_head %d \n", audio->out_head); n += scnprintf(buffer + n, debug_bufmax - n, "out_tail %d \n", audio->out_tail); n += scnprintf(buffer + n, debug_bufmax - n, "out[0].used %d \n", audio->out[0].used); n += scnprintf(buffer + n, debug_bufmax - n, "out[1].used %d \n", audio->out[1].used); n += scnprintf(buffer + n, debug_bufmax - n, "buffer_refresh %d \n", audio->buf_refresh); n += scnprintf(buffer + n, debug_bufmax - n, "read_next %d \n", audio->read_next); n += scnprintf(buffer + n, debug_bufmax - n, "fill_next %d \n", audio->fill_next); for (i = 0; i < audio->pcm_buf_count; i++) n += scnprintf(buffer + n, debug_bufmax - n, "in[%d].used %d \n", i, audio->in[i].used); buffer[n] = 0; return simple_read_from_buffer(buf, count, ppos, buffer, n); } static const struct file_operations audamrnb_debug_fops = { .read = audamrnb_debug_read, .open = audamrnb_debug_open, }; #endif static int audamrnb_open(struct inode *inode, struct file *file) { struct audio *audio = NULL; int rc, dec_attrb, decid, i; struct audamrnb_event *e_node = NULL; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_amrnb_" + 5]; #endif /* Allocate Mem for audio instance */ audio = kzalloc(sizeof(struct audio), GFP_KERNEL); if (!audio) { MM_ERR("no memory to allocate audio instance \n"); rc = -ENOMEM; goto done; } MM_INFO("audio instance 0x%08x created\n", (int)audio); /* Allocate the decoder */ dec_attrb = AUDDEC_DEC_AMRNB; if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_NONTUNNEL; audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_TUNNEL; audio->pcm_feedback = TUNNEL_MODE_PLAYBACK; } else { kfree(audio); rc = -EACCES; goto done; } decid = audpp_adec_alloc(dec_attrb, &audio->module_name, &audio->queue_id); if (decid < 0) { MM_ERR("No free decoder available, freeing instance 0x%08x\n", (int)audio); rc = -ENODEV; kfree(audio); goto done; } audio->dec_id = decid & MSM_AUD_DECODER_MASK; audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K); if (!audio->phys) { MM_ERR("could not allocate write buffers, freeing instance \ 0x%08x\n", (int)audio); rc = -ENOMEM; audpp_adec_free(audio->dec_id); kfree(audio); goto done; } else { audio->map_v_write = ioremap(audio->phys, DMASZ); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write phys address, freeing \ instance 0x%08x\n", (int)audio); rc = -ENOMEM; free_contiguous_memory_by_paddr(audio->phys); audpp_adec_free(audio->dec_id); free_contiguous_memory_by_paddr(audio->phys); kfree(audio); goto done; } audio->data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr \ 0x%08x\n", audio->phys, (int)audio->data); } rc = msm_adsp_get(audio->module_name, &audio->audplay, &audplay_adsp_ops_amrnb, audio); if (rc) { MM_ERR("failed to get %s module freeing instance 0x%08x\n", audio->module_name, (int)audio); goto err; } mutex_init(&audio->lock); mutex_init(&audio->write_lock); mutex_init(&audio->read_lock); mutex_init(&audio->get_event_lock); spin_lock_init(&audio->dsp_lock); spin_lock_init(&audio->event_queue_lock); INIT_LIST_HEAD(&audio->free_event_queue); INIT_LIST_HEAD(&audio->event_queue); init_waitqueue_head(&audio->write_wait); init_waitqueue_head(&audio->read_wait); init_waitqueue_head(&audio->wait); init_waitqueue_head(&audio->event_wait); init_waitqueue_head(&audio->avsync_wait); audio->out[0].data = audio->data + 0; audio->out[0].addr = audio->phys + 0; audio->out[0].size = BUFSZ; audio->out[1].data = audio->data + BUFSZ; audio->out[1].addr = audio->phys + BUFSZ; audio->out[1].size = BUFSZ; audio->vol_pan.volume = 0x2000; audamrnb_flush(audio); file->private_data = audio; audio->opened = 1; audio->device_events = AUDDEV_EVT_DEV_RDY |AUDDEV_EVT_DEV_RLS| AUDDEV_EVT_STREAM_VOL_CHG; rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_DEC, audio->dec_id, amrnb_listner, (void *)audio); if (rc) { MM_ERR("%s: failed to register listner\n", __func__); goto event_err; } #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_amrnb_%04x", audio->dec_id); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *) audio, &audamrnb_debug_fops); if (IS_ERR(audio->dentry)) MM_DBG("debugfs_create_file failed\n"); #endif #ifdef CONFIG_HAS_EARLYSUSPEND audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; audio->suspend_ctl.node.resume = audamrnb_resume; audio->suspend_ctl.node.suspend = audamrnb_suspend; audio->suspend_ctl.audio = audio; register_early_suspend(&audio->suspend_ctl.node); #endif for (i = 0; i < AUDAMRNB_EVENT_NUM; i++) { e_node = kmalloc(sizeof(struct audamrnb_event), GFP_KERNEL); if (e_node) list_add_tail(&e_node->list, &audio->free_event_queue); else { MM_ERR("event pkt alloc failed\n"); break; } } done: return rc; event_err: msm_adsp_put(audio->audplay); err: iounmap(audio->map_v_write); free_contiguous_memory_by_paddr(audio->phys); audpp_adec_free(audio->dec_id); kfree(audio); return rc; } static const struct file_operations audio_amrnb_fops = { .owner = THIS_MODULE, .open = audamrnb_open, .release = audamrnb_release, .read = audamrnb_read, .write = audamrnb_write, .unlocked_ioctl = audamrnb_ioctl, .fsync = audamrnb_fsync, }; struct miscdevice audio_amrnb_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrnb", .fops = &audio_amrnb_fops, }; static int __init audamrnb_init(void) { return misc_register(&audio_amrnb_misc); } static void __exit audamrnb_exit(void) { misc_deregister(&audio_amrnb_misc); } module_init(audamrnb_init); module_exit(audamrnb_exit); MODULE_DESCRIPTION("MSM AMR-NB driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
makarandk/linux
drivers/media/platform/soc_camera/pxa_camera.c
599
51401
/* * V4L2 Driver for PXA camera host * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/videobuf-dma-sg.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <linux/videodev2.h> #include <mach/dma.h> #include <linux/platform_data/camera-pxa.h> #define PXA_CAM_VERSION "0.0.6" #define PXA_CAM_DRV_NAME "pxa27x-camera" /* Camera Interface */ #define CICR0 0x0000 #define CICR1 0x0004 #define CICR2 0x0008 #define CICR3 0x000C #define CICR4 0x0010 #define CISR 0x0014 #define CIFR 0x0018 #define CITOR 0x001C #define CIBR0 0x0028 #define CIBR1 0x0030 #define CIBR2 0x0038 #define CICR0_DMAEN (1 << 31) /* DMA request enable */ #define CICR0_PAR_EN (1 << 30) /* Parity enable */ #define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */ #define CICR0_ENB (1 << 28) /* Camera interface enable */ #define CICR0_DIS (1 << 27) /* Camera interface disable */ #define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */ #define CICR0_TOM (1 << 9) /* Time-out mask */ #define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */ #define CICR0_FEM (1 << 7) /* FIFO-empty mask */ #define CICR0_EOLM (1 << 6) /* End-of-line mask */ #define CICR0_PERRM (1 << 5) /* Parity-error mask */ #define CICR0_QDM (1 << 4) /* Quick-disable mask */ #define CICR0_CDM (1 << 3) /* Disable-done mask */ #define CICR0_SOFM (1 << 2) /* Start-of-frame mask */ #define CICR0_EOFM (1 << 1) /* End-of-frame mask */ #define CICR0_FOM (1 << 0) /* FIFO-overrun mask */ #define CICR1_TBIT (1 << 31) /* Transparency bit */ #define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */ #define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */ #define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */ #define CICR1_RGB_F (1 << 11) /* RGB format */ #define CICR1_YCBCR_F (1 << 10) /* YCbCr format */ #define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */ #define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */ #define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */ #define CICR1_DW (0x7 << 0) /* Data width mask */ #define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock wait count mask */ #define CICR2_ELW (0xff << 16) /* End-of-line pixel clock wait count mask */ #define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */ #define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock wait count mask */ #define CICR2_FSW (0x7 << 0) /* Frame stabilization wait count mask */ #define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock wait count mask */ #define CICR3_EFW (0xff << 16) /* End-of-frame line clock wait count mask */ #define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */ #define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock wait count mask */ #define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */ #define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */ #define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */ #define CICR4_PCP (1 << 22) /* Pixel clock polarity */ #define CICR4_HSP (1 << 21) /* Horizontal sync polarity */ #define CICR4_VSP (1 << 20) /* Vertical sync polarity */ #define CICR4_MCLK_EN (1 << 19) /* MCLK enable */ #define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */ #define CICR4_DIV (0xff << 0) /* Clock divisor mask */ #define CISR_FTO (1 << 15) /* FIFO time-out */ #define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */ #define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */ #define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */ #define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */ #define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */ #define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */ #define CISR_EOL (1 << 8) /* End of line */ #define CISR_PAR_ERR (1 << 7) /* Parity error */ #define CISR_CQD (1 << 6) /* Camera interface quick disable */ #define CISR_CDD (1 << 5) /* Camera interface disable done */ #define CISR_SOF (1 << 4) /* Start of frame */ #define CISR_EOF (1 << 3) /* End of frame */ #define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */ #define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */ #define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */ #define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */ #define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */ #define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */ #define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */ #define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */ #define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */ #define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */ #define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */ #define CICR0_SIM_MP (0 << 24) #define CICR0_SIM_SP (1 << 24) #define CICR0_SIM_MS (2 << 24) #define CICR0_SIM_EP (3 << 24) #define CICR0_SIM_ES (4 << 24) #define CICR1_DW_VAL(x) ((x) & CICR1_DW) /* Data bus width */ #define CICR1_PPL_VAL(x) (((x) << 15) & CICR1_PPL) /* Pixels per line */ #define CICR1_COLOR_SP_VAL(x) (((x) << 3) & CICR1_COLOR_SP) /* color space */ #define CICR1_RGB_BPP_VAL(x) (((x) << 7) & CICR1_RGB_BPP) /* bpp for rgb */ #define CICR1_RGBT_CONV_VAL(x) (((x) << 29) & CICR1_RGBT_CONV) /* rgbt conv */ #define CICR2_BLW_VAL(x) (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */ #define CICR2_ELW_VAL(x) (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */ #define CICR2_HSW_VAL(x) (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */ #define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */ #define CICR2_FSW_VAL(x) (((x) << 0) & CICR2_FSW) /* Frame stabilization wait count */ #define CICR3_BFW_VAL(x) (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count */ #define CICR3_EFW_VAL(x) (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */ #define CICR3_VSW_VAL(x) (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */ #define CICR3_LPF_VAL(x) (((x) << 0) & CICR3_LPF) /* Lines per frame */ #define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \ CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \ CICR0_EOFM | CICR0_FOM) /* * Structures */ enum pxa_camera_active_dma { DMA_Y = 0x1, DMA_U = 0x2, DMA_V = 0x4, }; /* descriptor needed for the PXA DMA engine */ struct pxa_cam_dma { dma_addr_t sg_dma; struct pxa_dma_desc *sg_cpu; size_t sg_size; int sglen; }; /* buffer for one video frame */ struct pxa_buffer { /* common v4l buffer stuff -- must be first */ struct videobuf_buffer vb; enum v4l2_mbus_pixelcode code; /* our descriptor lists for Y, U and V channels */ struct pxa_cam_dma dmas[3]; int inwork; enum pxa_camera_active_dma active_dma; }; struct pxa_camera_dev { struct soc_camera_host soc_host; /* * PXA27x is only supposed to handle one camera on its Quick Capture * interface. If anyone ever builds hardware to enable more than * one camera, they will have to modify this driver too */ struct clk *clk; unsigned int irq; void __iomem *base; int channels; unsigned int dma_chans[3]; struct pxacamera_platform_data *pdata; struct resource *res; unsigned long platform_flags; unsigned long ciclk; unsigned long mclk; u32 mclk_divisor; u16 width_flags; /* max 10 bits */ struct list_head capture; spinlock_t lock; struct pxa_buffer *active; struct pxa_dma_desc *sg_tail[3]; u32 save_cicr[5]; }; struct pxa_cam { unsigned long flags; }; static const char *pxa_cam_driver_description = "PXA_Camera"; static unsigned int vid_limit = 16; /* Video memory limit, in Mb */ /* * Videobuf operations */ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct soc_camera_device *icd = vq->priv_data; dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size); *size = icd->sizeimage; if (0 == *count) *count = 32; if (*size * *count > vid_limit * 1024 * 1024) *count = (vid_limit * 1024 * 1024) / *size; return 0; } static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); int i; BUG_ON(in_interrupt()); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, &buf->vb, buf->vb.baddr, buf->vb.bsize); /* * This waits until this buffer is out of danger, i.e., until it is no * longer in STATE_QUEUED or STATE_ACTIVE */ videobuf_waiton(vq, &buf->vb, 0, 0); videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { if (buf->dmas[i].sg_cpu) dma_free_coherent(ici->v4l2_dev.dev, buf->dmas[i].sg_size, buf->dmas[i].sg_cpu, buf->dmas[i].sg_dma); buf->dmas[i].sg_cpu = NULL; } buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int calculate_dma_sglen(struct scatterlist *sglist, int sglen, int sg_first_ofs, int size) { int i, offset, dma_len, xfer_len; struct scatterlist *sg; offset = sg_first_ofs; for_each_sg(sglist, sg, sglen, i) { dma_len = sg_dma_len(sg); /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */ xfer_len = roundup(min(dma_len - offset, size), 8); size = max(0, size - xfer_len); offset = 0; if (size == 0) break; } BUG_ON(size != 0); return i + 1; } /** * pxa_init_dma_channel - init dma descriptors * @pcdev: pxa camera device * @buf: pxa buffer to find pxa dma channel * @dma: dma video buffer * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V') * @cibr: camera Receive Buffer Register * @size: bytes to transfer * @sg_first: first element of sg_list * @sg_first_ofs: offset in first element of sg_list * * Prepares the pxa dma descriptors to transfer one camera channel. * Beware sg_first and sg_first_ofs are both input and output parameters. * * Returns 0 or -ENOMEM if no coherent memory is available */ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf, struct videobuf_dmabuf *dma, int channel, int cibr, int size, struct scatterlist **sg_first, int *sg_first_ofs) { struct pxa_cam_dma *pxa_dma = &buf->dmas[channel]; struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct scatterlist *sg; int i, offset, sglen; int dma_len = 0, xfer_len = 0; if (pxa_dma->sg_cpu) dma_free_coherent(dev, pxa_dma->sg_size, pxa_dma->sg_cpu, pxa_dma->sg_dma); sglen = calculate_dma_sglen(*sg_first, dma->sglen, *sg_first_ofs, size); pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc); pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size, &pxa_dma->sg_dma, GFP_KERNEL); if (!pxa_dma->sg_cpu) return -ENOMEM; pxa_dma->sglen = sglen; offset = *sg_first_ofs; dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n", *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma); for_each_sg(*sg_first, sg, sglen, i) { dma_len = sg_dma_len(sg); /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */ xfer_len = roundup(min(dma_len - offset, size), 8); size = max(0, size - xfer_len); pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr; pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset; pxa_dma->sg_cpu[i].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len; #ifdef DEBUG if (!i) pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN; #endif pxa_dma->sg_cpu[i].ddadr = pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc); dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n", pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc), sg_dma_address(sg) + offset, xfer_len); offset = 0; if (size == 0) break; } pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP; pxa_dma->sg_cpu[sglen].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN; /* * Handle 1 special case : * - in 3 planes (YUV422P format), we might finish with xfer_len equal * to dma_len (end on PAGE boundary). In this case, the sg element * for next plane should be the next after the last used to store the * last scatter gather RAM page */ if (xfer_len >= dma_len) { *sg_first_ofs = xfer_len - dma_len; *sg_first = sg_next(sg); } else { *sg_first_ofs = xfer_len; *sg_first = sg; } return 0; } static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf) { buf->active_dma = DMA_Y; if (pcdev->channels == 3) buf->active_dma |= DMA_U | DMA_V; } /* * Please check the DMA prepared buffer structure in : * Documentation/video4linux/pxa_camera.txt * Please check also in pxa_camera_check_link_miss() to understand why DMA chain * modification while DMA chain is running will work anyway. */ static int pxa_videobuf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); int ret; int size_y, size_u = 0, size_v = 0; dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); /* Added list head initialization on alloc */ WARN_ON(!list_empty(&vb->queue)); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ memset((void *)vb->baddr, 0xaa, vb->bsize); #endif BUG_ON(NULL == icd->current_fmt); /* * I think, in buf_prepare you only have to protect global data, * the actual buffer is yours */ buf->inwork = 1; if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } vb->size = icd->sizeimage; if (0 != vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; } if (vb->state == VIDEOBUF_NEEDS_INIT) { int size = vb->size; int next_ofs = 0; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); struct scatterlist *sg; ret = videobuf_iolock(vq, vb, NULL); if (ret) goto fail; if (pcdev->channels == 3) { size_y = size / 2; size_u = size_v = size / 4; } else { size_y = size; } sg = dma->sglist; /* init DMA for Y channel */ ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for Y/RGB failed\n"); goto fail; } /* init DMA for U channel */ if (size_u) ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1, size_u, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for U failed\n"); goto fail_u; } /* init DMA for V channel */ if (size_v) ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2, size_v, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for V failed\n"); goto fail_v; } vb->state = VIDEOBUF_PREPARED; } buf->inwork = 0; pxa_videobuf_set_actdma(pcdev, buf); return 0; fail_v: dma_free_coherent(dev, buf->dmas[1].sg_size, buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma); fail_u: dma_free_coherent(dev, buf->dmas[0].sg_size, buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma); fail: free_buffer(vq, buf); out: buf->inwork = 0; return ret; } /** * pxa_dma_start_channels - start DMA channel for active buffer * @pcdev: pxa camera device * * Initialize DMA channels to the beginning of the active video buffer, and * start these channels. */ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev) { int i; struct pxa_buffer *active; active = pcdev->active; for (i = 0; i < pcdev->channels; i++) { dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d) ddadr=%08x\n", __func__, i, active->dmas[i].sg_dma); DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma; DCSR(pcdev->dma_chans[i]) = DCSR_RUN; } } static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev) { int i; for (i = 0; i < pcdev->channels; i++) { dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d)\n", __func__, i); DCSR(pcdev->dma_chans[i]) = 0; } } static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf) { int i; struct pxa_dma_desc *buf_last_desc; for (i = 0; i < pcdev->channels; i++) { buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen; buf_last_desc->ddadr = DDADR_STOP; if (pcdev->sg_tail[i]) /* Link the new buffer to the old tail */ pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma; /* Update the channel tail */ pcdev->sg_tail[i] = buf_last_desc; } } /** * pxa_camera_start_capture - start video capturing * @pcdev: camera device * * Launch capturing. DMA channels should not be active yet. They should get * activated at the end of frame interrupt, to capture only whole frames, and * never begin the capture of a partial frame. */ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) { unsigned long cicr0; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); /* Enable End-Of-Frame Interrupt */ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB; cicr0 &= ~CICR0_EOFM; __raw_writel(cicr0, pcdev->base + CICR0); } static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev) { unsigned long cicr0; pxa_dma_stop_channels(pcdev); cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB; __raw_writel(cicr0, pcdev->base + CICR0); pcdev->active = NULL; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); } /* Called under spinlock_irqsave(&pcdev->lock, ...) */ static void pxa_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__, vb, vb->baddr, vb->bsize, pcdev->active); list_add_tail(&vb->queue, &pcdev->capture); vb->state = VIDEOBUF_ACTIVE; pxa_dma_add_tail_buf(pcdev, buf); if (!pcdev->active) pxa_camera_start_capture(pcdev); } static void pxa_videobuf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); #ifdef DEBUG struct soc_camera_device *icd = vq->priv_data; struct device *dev = icd->parent; dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); switch (vb->state) { case VIDEOBUF_ACTIVE: dev_dbg(dev, "%s (active)\n", __func__); break; case VIDEOBUF_QUEUED: dev_dbg(dev, "%s (queued)\n", __func__); break; case VIDEOBUF_PREPARED: dev_dbg(dev, "%s (prepared)\n", __func__); break; default: dev_dbg(dev, "%s (unknown)\n", __func__); break; } #endif free_buffer(vq, buf); } static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev, struct videobuf_buffer *vb, struct pxa_buffer *buf) { int i; /* _init is used to debug races, see comment in pxa_camera_reqbufs() */ list_del_init(&vb->queue); vb->state = VIDEOBUF_DONE; v4l2_get_timestamp(&vb->ts); vb->field_count++; wake_up(&vb->done); dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb); if (list_empty(&pcdev->capture)) { pxa_camera_stop_capture(pcdev); for (i = 0; i < pcdev->channels; i++) pcdev->sg_tail[i] = NULL; return; } pcdev->active = list_entry(pcdev->capture.next, struct pxa_buffer, vb.queue); } /** * pxa_camera_check_link_miss - check missed DMA linking * @pcdev: camera device * * The DMA chaining is done with DMA running. This means a tiny temporal window * remains, where a buffer is queued on the chain, while the chain is already * stopped. This means the tailed buffer would never be transferred by DMA. * This function restarts the capture for this corner case, where : * - DADR() == DADDR_STOP * - a videobuffer is queued on the pcdev->capture list * * Please check the "DMA hot chaining timeslice issue" in * Documentation/video4linux/pxa_camera.txt * * Context: should only be called within the dma irq handler */ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev) { int i, is_dma_stopped = 1; for (i = 0; i < pcdev->channels; i++) if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP) is_dma_stopped = 0; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s : top queued buffer=%p, dma_stopped=%d\n", __func__, pcdev->active, is_dma_stopped); if (pcdev->active && is_dma_stopped) pxa_camera_start_capture(pcdev); } static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, enum pxa_camera_active_dma act_dma) { struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf; unsigned long flags; u32 status, camera_status, overrun; struct videobuf_buffer *vb; spin_lock_irqsave(&pcdev->lock, flags); status = DCSR(channel); DCSR(channel) = status; camera_status = __raw_readl(pcdev->base + CISR); overrun = CISR_IFO_0; if (pcdev->channels == 3) overrun |= CISR_IFO_1 | CISR_IFO_2; if (status & DCSR_BUSERR) { dev_err(dev, "DMA Bus Error IRQ!\n"); goto out; } if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) { dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n", status); goto out; } /* * pcdev->active should not be NULL in DMA irq handler. * * But there is one corner case : if capture was stopped due to an * overrun of channel 1, and at that same channel 2 was completed. * * When handling the overrun in DMA irq for channel 1, we'll stop the * capture and restart it (and thus set pcdev->active to NULL). But the * DMA irq handler will already be pending for channel 2. So on entering * the DMA irq handler for channel 2 there will be no active buffer, yet * that is normal. */ if (!pcdev->active) goto out; vb = &pcdev->active->vb; buf = container_of(vb, struct pxa_buffer, vb); WARN_ON(buf->inwork || list_empty(&vb->queue)); dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n", __func__, channel, status & DCSR_STARTINTR ? "SOF " : "", status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel)); if (status & DCSR_ENDINTR) { /* * It's normal if the last frame creates an overrun, as there * are no more DMA descriptors to fetch from QCI fifos */ if (camera_status & overrun && !list_is_last(pcdev->capture.next, &pcdev->capture)) { dev_dbg(dev, "FIFO overrun! CISR: %x\n", camera_status); pxa_camera_stop_capture(pcdev); pxa_camera_start_capture(pcdev); goto out; } buf->active_dma &= ~act_dma; if (!buf->active_dma) { pxa_camera_wakeup(pcdev, vb, buf); pxa_camera_check_link_miss(pcdev); } } out: spin_unlock_irqrestore(&pcdev->lock, flags); } static void pxa_camera_dma_irq_y(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_Y); } static void pxa_camera_dma_irq_u(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_U); } static void pxa_camera_dma_irq_v(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_V); } static struct videobuf_queue_ops pxa_videobuf_ops = { .buf_setup = pxa_videobuf_setup, .buf_prepare = pxa_videobuf_prepare, .buf_queue = pxa_videobuf_queue, .buf_release = pxa_videobuf_release, }; static void pxa_camera_init_videobuf(struct videobuf_queue *q, struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; /* * We must pass NULL as dev pointer, then all pci_* dma operations * transform to normal dma_* ones. */ videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct pxa_buffer), icd, &ici->host_lock); } static u32 mclk_get_divisor(struct platform_device *pdev, struct pxa_camera_dev *pcdev) { unsigned long mclk = pcdev->mclk; struct device *dev = &pdev->dev; u32 div; unsigned long lcdclk; lcdclk = clk_get_rate(pcdev->clk); pcdev->ciclk = lcdclk; /* mclk <= ciclk / 4 (27.4.2) */ if (mclk > lcdclk / 4) { mclk = lcdclk / 4; dev_warn(dev, "Limiting master clock to %lu\n", mclk); } /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; /* If we're not supplying MCLK, leave it at 0 */ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) pcdev->mclk = lcdclk / (2 * (div + 1)); dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n", lcdclk, mclk, div); return div; } static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev, unsigned long pclk) { /* We want a timeout > 1 pixel time, not ">=" */ u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1; __raw_writel(ciclk_per_pixel, pcdev->base + CITOR); } static void pxa_camera_activate(struct pxa_camera_dev *pcdev) { u32 cicr4 = 0; /* disable all interrupts */ __raw_writel(0x3ff, pcdev->base + CICR0); if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) cicr4 |= CICR4_PCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) cicr4 |= CICR4_MCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_PCP) cicr4 |= CICR4_PCP; if (pcdev->platform_flags & PXA_CAMERA_HSP) cicr4 |= CICR4_HSP; if (pcdev->platform_flags & PXA_CAMERA_VSP) cicr4 |= CICR4_VSP; __raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4); if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) /* Initialise the timeout under the assumption pclk = mclk */ recalculate_fifo_timeout(pcdev, pcdev->mclk); else /* "Safe default" - 13MHz */ recalculate_fifo_timeout(pcdev, 13000000); clk_prepare_enable(pcdev->clk); } static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev) { clk_disable_unprepare(pcdev->clk); } static irqreturn_t pxa_camera_irq(int irq, void *data) { struct pxa_camera_dev *pcdev = data; unsigned long status, cifr, cicr0; struct pxa_buffer *buf; struct videobuf_buffer *vb; status = __raw_readl(pcdev->base + CISR); dev_dbg(pcdev->soc_host.v4l2_dev.dev, "Camera interrupt status 0x%lx\n", status); if (!status) return IRQ_NONE; __raw_writel(status, pcdev->base + CISR); if (status & CISR_EOF) { /* Reset the FIFOs */ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; __raw_writel(cifr, pcdev->base + CIFR); pcdev->active = list_first_entry(&pcdev->capture, struct pxa_buffer, vb.queue); vb = &pcdev->active->vb; buf = container_of(vb, struct pxa_buffer, vb); pxa_videobuf_set_actdma(pcdev, buf); pxa_dma_start_channels(pcdev); cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM; __raw_writel(cicr0, pcdev->base + CICR0); } return IRQ_HANDLED; } static int pxa_camera_add_device(struct soc_camera_device *icd) { dev_info(icd->parent, "PXA Camera driver attached to camera %d\n", icd->devnum); return 0; } static void pxa_camera_remove_device(struct soc_camera_device *icd) { dev_info(icd->parent, "PXA Camera driver detached from camera %d\n", icd->devnum); } /* * The following two functions absolutely depend on the fact, that * there can be only one camera on PXA quick capture interface * Called with .host_lock held */ static int pxa_camera_clock_start(struct soc_camera_host *ici) { struct pxa_camera_dev *pcdev = ici->priv; pxa_camera_activate(pcdev); return 0; } /* Called with .host_lock held */ static void pxa_camera_clock_stop(struct soc_camera_host *ici) { struct pxa_camera_dev *pcdev = ici->priv; /* disable capture, disable interrupts */ __raw_writel(0x3ff, pcdev->base + CICR0); /* Stop DMA engine */ DCSR(pcdev->dma_chans[0]) = 0; DCSR(pcdev->dma_chans[1]) = 0; DCSR(pcdev->dma_chans[2]) = 0; pxa_camera_deactivate(pcdev); } static int test_platform_param(struct pxa_camera_dev *pcdev, unsigned char buswidth, unsigned long *flags) { /* * Platform specified synchronization and pixel clock polarities are * only a recommendation and are only used during probing. The PXA270 * quick capture interface supports both. */ *flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ? V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING; /* If requested data width is supported by the platform, use it */ if ((1 << (buswidth - 1)) & pcdev->width_flags) return 0; return -EINVAL; } static void pxa_camera_setup_cicr(struct soc_camera_device *icd, unsigned long flags, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); unsigned long dw, bpp; u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top; int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top); if (ret < 0) y_skip_top = 0; /* * Datawidth is now guaranteed to be equal to one of the three values. * We fix bit-per-pixel equal to data-width... */ switch (icd->current_fmt->host_fmt->bits_per_sample) { case 10: dw = 4; bpp = 0x40; break; case 9: dw = 3; bpp = 0x20; break; default: /* * Actually it can only be 8 now, * default is just to silence compiler warnings */ case 8: dw = 2; bpp = 0; } if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) cicr4 |= CICR4_PCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) cicr4 |= CICR4_MCLK_EN; if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) cicr4 |= CICR4_PCP; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) cicr4 |= CICR4_HSP; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) cicr4 |= CICR4_VSP; cicr0 = __raw_readl(pcdev->base + CICR0); if (cicr0 & CICR0_ENB) __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0); cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw; switch (pixfmt) { case V4L2_PIX_FMT_YUV422P: pcdev->channels = 3; cicr1 |= CICR1_YCBCR_F; /* * Normally, pxa bus wants as input UYVY format. We allow all * reorderings of the YUV422 format, as no processing is done, * and the YUV stream is just passed through without any * transformation. Note that UYVY is the only format that * should be used if pxa framebuffer Overlay2 is used. */ case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_YVYU: cicr1 |= CICR1_COLOR_SP_VAL(2); break; case V4L2_PIX_FMT_RGB555: cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) | CICR1_TBIT | CICR1_COLOR_SP_VAL(1); break; case V4L2_PIX_FMT_RGB565: cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2); break; } cicr2 = 0; cicr3 = CICR3_LPF_VAL(icd->user_height - 1) | CICR3_BFW_VAL(min((u32)255, y_skip_top)); cicr4 |= pcdev->mclk_divisor; __raw_writel(cicr1, pcdev->base + CICR1); __raw_writel(cicr2, pcdev->base + CICR2); __raw_writel(cicr3, pcdev->base + CICR3); __raw_writel(cicr4, pcdev->base + CICR4); /* CIF interrupts are not used, only DMA */ cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ? CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP)); cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK; __raw_writel(cicr0, pcdev->base + CICR0); } static int pxa_camera_set_bus_param(struct soc_camera_device *icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; u32 pixfmt = icd->current_fmt->host_fmt->fourcc; unsigned long bus_flags, common_flags; int ret; struct pxa_cam *cam = icd->host_priv; ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample, &bus_flags); if (ret < 0) return ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, bus_flags); if (!common_flags) { dev_warn(icd->parent, "Flags incompatible: camera 0x%x, host 0x%lx\n", cfg.flags, bus_flags); return -EINVAL; } } else if (ret != -ENOIOCTLCMD) { return ret; } else { common_flags = bus_flags; } pcdev->channels = 1; /* Make choises, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_HSP) common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_VSP) common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) && (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) { if (pcdev->platform_flags & PXA_CAMERA_PCP) common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING; else common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) { dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n", common_flags, ret); return ret; } cam->flags = common_flags; pxa_camera_setup_cicr(icd, common_flags, pixfmt); return 0; } static int pxa_camera_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long bus_flags, common_flags; int ret = test_platform_param(pcdev, buswidth, &bus_flags); if (ret < 0) return ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, bus_flags); if (!common_flags) { dev_warn(icd->parent, "Flags incompatible: camera 0x%x, host 0x%lx\n", cfg.flags, bus_flags); return -EINVAL; } } else if (ret == -ENOIOCTLCMD) { ret = 0; } return ret; } static const struct soc_mbus_pixelfmt pxa_camera_formats[] = { { .fourcc = V4L2_PIX_FMT_YUV422P, .name = "Planar YUV422 16 bit", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_U_V, }, }; /* This will be corrected as we get more formats */ static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; int formats = 0, ret; struct pxa_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_err(dev, "Invalid format code #%u: %d\n", idx, code); return 0; } /* This also checks support for the requested bits-per-sample */ ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; if (!icd->host_priv) { cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; icd->host_priv = cam; } else { cam = icd->host_priv; } switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: formats++; if (xlate) { xlate->host_fmt = &pxa_camera_formats[0]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", pxa_camera_formats[0].name, code); } case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: case V4L2_MBUS_FMT_RGB565_2X8_LE: case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: if (xlate) dev_dbg(dev, "Providing format %s packed\n", fmt->name); break; default: if (!pxa_camera_packing_supported(fmt)) return 0; if (xlate) dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; } return formats; } static void pxa_camera_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } static int pxa_camera_check_frame(u32 width, u32 height) { /* limit to pxa hardware capabilities */ return height < 32 || height > 2048 || width < 48 || width > 2048 || (width & 0x01); } static int pxa_camera_set_crop(struct soc_camera_device *icd, const struct v4l2_crop *a) { const struct v4l2_rect *rect = &a->c; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; struct v4l2_mbus_framefmt mf; struct pxa_cam *cam = icd->host_priv; u32 fourcc = icd->current_fmt->host_fmt->fourcc; int ret; /* If PCLK is used to latch data from the sensor, check sense */ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) icd->sense = &sense; ret = v4l2_subdev_call(sd, video, s_crop, a); icd->sense = NULL; if (ret < 0) { dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); return ret; } ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (pxa_camera_check_frame(mf.width, mf.height)) { /* * Camera cropping produced a frame beyond our capabilities. * FIXME: just extract a subframe, that we can process. */ v4l_bound_align_image(&mf.width, 48, 2048, 1, &mf.height, 32, 2048, 0, fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0); ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(icd->parent, "Inconsistent state. Use S_FMT to repair\n"); return -EINVAL; } } if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; } recalculate_fifo_timeout(pcdev, sense.pixel_clock); } icd->user_width = mf.width; icd->user_height = mf.height; pxa_camera_setup_cicr(icd, cam->flags, fourcc); return ret; } static int pxa_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate = NULL; struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { dev_warn(dev, "Format %x not found\n", pix->pixelformat); return -EINVAL; } /* If PCLK is used to latch data from the sensor, check sense */ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) /* The caller holds a mutex. */ icd->sense = &sense; mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (mf.code != xlate->code) return -EINVAL; icd->sense = NULL; if (ret < 0) { dev_warn(dev, "Failed to configure for format %x\n", pix->pixelformat); } else if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(dev, "Camera driver produced an unsupported frame %dx%d\n", mf.width, mf.height); ret = -EINVAL; } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; } recalculate_fifo_timeout(pcdev, sense.pixel_clock); } if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; icd->current_fmt = xlate; return ret; } static int pxa_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(icd->parent, "Format %x not found\n", pixfmt); return -EINVAL; } /* * Limit to pxa hardware capabilities. YUV422P planar format requires * images size to be a multiple of 16 bytes. If not, zeros will be * inserted between Y and U planes, and U and V planes, which violates * the YUV422P standard. */ v4l_bound_align_image(&pix->width, 48, 2048, 1, &pix->height, 32, 2048, 0, pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; /* Only progressive video supported so far */ mf.field = V4L2_FIELD_NONE; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->colorspace = mf.colorspace; switch (mf.field) { case V4L2_FIELD_ANY: case V4L2_FIELD_NONE: pix->field = V4L2_FIELD_NONE; break; default: /* TODO: support interlaced at least in pass-through mode */ dev_err(icd->parent, "Field type %d unsupported.\n", mf.field); return -EINVAL; } return ret; } static int pxa_camera_reqbufs(struct soc_camera_device *icd, struct v4l2_requestbuffers *p) { int i; /* * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now * it hadn't triggered */ for (i = 0; i < p->count; i++) { struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i], struct pxa_buffer, vb); buf->inwork = 0; INIT_LIST_HEAD(&buf->vb.queue); } return 0; } static unsigned int pxa_camera_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; struct pxa_buffer *buf; buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer, vb.stream); poll_wait(file, &buf->vb.done, pt); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) return POLLIN|POLLRDNORM; return 0; } static int pxa_camera_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int pxa_camera_suspend(struct device *dev) { struct soc_camera_host *ici = to_soc_camera_host(dev); struct pxa_camera_dev *pcdev = ici->priv; int i = 0, ret = 0; pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4); if (pcdev->soc_host.icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd); ret = v4l2_subdev_call(sd, core, s_power, 0); if (ret == -ENOIOCTLCMD) ret = 0; } return ret; } static int pxa_camera_resume(struct device *dev) { struct soc_camera_host *ici = to_soc_camera_host(dev); struct pxa_camera_dev *pcdev = ici->priv; int i = 0, ret = 0; DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD; DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD; __raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4); if (pcdev->soc_host.icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd); ret = v4l2_subdev_call(sd, core, s_power, 1); if (ret == -ENOIOCTLCMD) ret = 0; } /* Restart frame capture if active buffer exists */ if (!ret && pcdev->active) pxa_camera_start_capture(pcdev); return ret; } static struct soc_camera_host_ops pxa_soc_camera_host_ops = { .owner = THIS_MODULE, .add = pxa_camera_add_device, .remove = pxa_camera_remove_device, .clock_start = pxa_camera_clock_start, .clock_stop = pxa_camera_clock_stop, .set_crop = pxa_camera_set_crop, .get_formats = pxa_camera_get_formats, .put_formats = pxa_camera_put_formats, .set_fmt = pxa_camera_set_fmt, .try_fmt = pxa_camera_try_fmt, .init_videobuf = pxa_camera_init_videobuf, .reqbufs = pxa_camera_reqbufs, .poll = pxa_camera_poll, .querycap = pxa_camera_querycap, .set_bus_param = pxa_camera_set_bus_param, }; static int pxa_camera_probe(struct platform_device *pdev) { struct pxa_camera_dev *pcdev; struct resource *res; void __iomem *base; int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq < 0) return -ENODEV; pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); return -ENOMEM; } pcdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pcdev->clk)) return PTR_ERR(pcdev->clk); pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; pcdev->platform_flags = pcdev->pdata->flags; if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) { /* * Platform hasn't set available data widths. This is bad. * Warn and use a default. */ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " "data widths, using default 10 bit\n"); pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10; } if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8) pcdev->width_flags = 1 << 7; if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9) pcdev->width_flags |= 1 << 8; if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10) pcdev->width_flags |= 1 << 9; pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; if (!pcdev->mclk) { dev_warn(&pdev->dev, "mclk == 0! Please, fix your platform data. " "Using default 20MHz\n"); pcdev->mclk = 20000000; } pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev); INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the regions. */ base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); pcdev->irq = irq; pcdev->base = base; /* request dma */ err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH, pxa_camera_dma_irq_y, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for Y\n"); return err; } pcdev->dma_chans[0] = err; dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]); err = pxa_request_dma("CI_U", DMA_PRIO_HIGH, pxa_camera_dma_irq_u, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for U\n"); goto exit_free_dma_y; } pcdev->dma_chans[1] = err; dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]); err = pxa_request_dma("CI_V", DMA_PRIO_HIGH, pxa_camera_dma_irq_v, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for V\n"); goto exit_free_dma_u; } pcdev->dma_chans[2] = err; dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]); DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD; DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD; /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME, pcdev); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); goto exit_free_dma; } pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; pcdev->soc_host.ops = &pxa_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_dma; return 0; exit_free_dma: pxa_free_dma(pcdev->dma_chans[2]); exit_free_dma_u: pxa_free_dma(pcdev->dma_chans[1]); exit_free_dma_y: pxa_free_dma(pcdev->dma_chans[0]); return err; } static int pxa_camera_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct pxa_camera_dev *pcdev = container_of(soc_host, struct pxa_camera_dev, soc_host); pxa_free_dma(pcdev->dma_chans[0]); pxa_free_dma(pcdev->dma_chans[1]); pxa_free_dma(pcdev->dma_chans[2]); soc_camera_host_unregister(soc_host); dev_info(&pdev->dev, "PXA Camera driver unloaded\n"); return 0; } static const struct dev_pm_ops pxa_camera_pm = { .suspend = pxa_camera_suspend, .resume = pxa_camera_resume, }; static struct platform_driver pxa_camera_driver = { .driver = { .name = PXA_CAM_DRV_NAME, .pm = &pxa_camera_pm, }, .probe = pxa_camera_probe, .remove = pxa_camera_remove, }; module_platform_driver(pxa_camera_driver); MODULE_DESCRIPTION("PXA27x SoC Camera Host driver"); MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); MODULE_LICENSE("GPL"); MODULE_VERSION(PXA_CAM_VERSION); MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M130K
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
855
90821
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Few lines might be stolen from other part of the ieee80211 * stack. Copyright who own it's copyright * * WPA code stolen from the ipw2200 driver. * Copyright who own it's copyright. * * released under the GPL */ #include "ieee80211.h" #include <linux/random.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/version.h> #include <asm/uaccess.h> #ifdef ENABLE_DOT11D #include "dot11d.h" #endif u8 rsn_authen_cipher_suite[16][4] = { {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default} {0x00,0x0F,0xAC,0x03}, //WRAP-historical {0x00,0x0F,0xAC,0x04}, //CCMP {0x00,0x0F,0xAC,0x05}, //WEP-104 }; short ieee80211_is_54g(struct ieee80211_network net) { return ((net.rates_ex_len > 0) || (net.rates_len > 4)); } short ieee80211_is_shortslot(struct ieee80211_network net) { return (net.capability & WLAN_CAPABILITY_SHORT_SLOT); } /* returns the total length needed for pleacing the RATE MFIE * tag and the EXTENDED RATE MFIE tag if needed. * It encludes two bytes per tag for the tag itself and its len */ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee) { unsigned int rate_len = 0; if (ieee->modulation & IEEE80211_CCK_MODULATION) rate_len = IEEE80211_CCK_RATE_LEN + 2; if (ieee->modulation & IEEE80211_OFDM_MODULATION) rate_len += IEEE80211_OFDM_RATE_LEN + 2; return rate_len; } /* pleace the MFIE rate, tag to the memory (double) poined. * Then it updates the pointer so that * it points after the new MFIE tag added. */ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; if (ieee->modulation & IEEE80211_CCK_MODULATION){ *tag++ = MFIE_TYPE_RATES; *tag++ = 4; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; } /* We may add an option for custom rates that specific HW might support */ *tag_p = tag; } void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; if (ieee->modulation & IEEE80211_OFDM_MODULATION){ *tag++ = MFIE_TYPE_RATES_EX; *tag++ = 8; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB; } /* We may add an option for custom rates that specific HW might support */ *tag_p = tag; } void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; *tag++ = MFIE_TYPE_GENERIC; //0 *tag++ = 7; *tag++ = 0x00; *tag++ = 0x50; *tag++ = 0xf2; *tag++ = 0x02;//5 *tag++ = 0x00; *tag++ = 0x01; #ifdef SUPPORT_USPD if(ieee->current_network.wmm_info & 0x80) { *tag++ = 0x0f|MAX_SP_Len; } else { *tag++ = MAX_SP_Len; } #else *tag++ = MAX_SP_Len; #endif *tag_p = tag; } #ifdef THOMAS_TURBO void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; *tag++ = MFIE_TYPE_GENERIC; //0 *tag++ = 7; *tag++ = 0x00; *tag++ = 0xe0; *tag++ = 0x4c; *tag++ = 0x01;//5 *tag++ = 0x02; *tag++ = 0x11; *tag++ = 0x00; *tag_p = tag; printk(KERN_ALERT "This is enable turbo mode IE process\n"); } #endif void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb) { int nh; nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM; /* * if the queue is full but we have newer frames then * just overwrites the oldest. * * if (nh == ieee->mgmt_queue_tail) * return -1; */ ieee->mgmt_queue_head = nh; ieee->mgmt_queue_ring[nh] = skb; //return 0; } struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee) { struct sk_buff *ret; if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head) return NULL; ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail]; ieee->mgmt_queue_tail = (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM; return ret; } void init_mgmt_queue(struct ieee80211_device *ieee) { ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0; } u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u8 rate; // 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M) rate = 0x0c; else rate = ieee->basic_rate & 0x7f; if(rate == 0){ // 2005.01.26, by rcnjko. if(ieee->mode == IEEE_A|| ieee->mode== IEEE_N_5G|| (ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK)) rate = 0x0c; else rate = 0x02; } /* // Data rate of ProbeReq is already decided. Annie, 2005-03-31 if( pMgntInfo->bScanInProgress || (pMgntInfo->bDualModeScanStep!=0) ) { if(pMgntInfo->dot11CurrentWirelessMode==WIRELESS_MODE_A) rate = 0x0c; else rate = 0x02; } */ return rate; } void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl); inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee) { unsigned long flags; short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE; struct ieee80211_hdr_3addr *header= (struct ieee80211_hdr_3addr *) skb->data; cb_desc *tcb_desc = (cb_desc *)(skb->cb + 8); spin_lock_irqsave(&ieee->lock, flags); /* called with 2nd param 0, no mgmt lock required */ ieee80211_sta_wakeup(ieee,0); tcb_desc->queue_index = MGNT_QUEUE; tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee); tcb_desc->RATRIndex = 7; tcb_desc->bTxDisableRateFallBack = 1; tcb_desc->bTxUseDriverAssingedRate = 1; if(single){ if(ieee->queue_stop){ enqueue_mgmt(ieee,skb); }else{ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); //dev_kfree_skb_any(skb);//edit by thomas } spin_unlock_irqrestore(&ieee->lock, flags); }else{ spin_unlock_irqrestore(&ieee->lock, flags); spin_lock_irqsave(&ieee->mgmt_tx_lock, flags); header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* check wether the managed packet queued greater than 5 */ if(!ieee->check_nic_enough_desc(ieee->dev,tcb_desc->queue_index)||\ (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0)||\ (ieee->queue_stop) ) { /* insert the skb packet to the management queue */ /* as for the completion function, it does not need * to check it any more. * */ printk("%s():insert to waitqueue!\n",__FUNCTION__); skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb); } else { //printk("TX packet!\n"); ieee->softmac_hard_start_xmit(skb,ieee->dev); //dev_kfree_skb_any(skb);//edit by thomas } spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags); } } inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee) { short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE; struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; if(single){ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); }else{ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; ieee->softmac_hard_start_xmit(skb,ieee->dev); } //dev_kfree_skb_any(skb);//edit by thomas } inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee) { unsigned int len,rate_len; u8 *tag; struct sk_buff *skb; struct ieee80211_probe_request *req; len = ieee->current_network.ssid_len; rate_len = ieee80211_MFIE_rate_len(ieee); skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) + 2 + len + rate_len + ieee->tx_headroom); if (!skb) return NULL; skb_reserve(skb, ieee->tx_headroom); req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request)); req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); req->header.duration_id = 0; //FIXME: is this OK ? memset(req->header.addr1, 0xff, ETH_ALEN); memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memset(req->header.addr3, 0xff, ETH_ALEN); tag = (u8 *) skb_put(skb,len+2+rate_len); *tag++ = MFIE_TYPE_SSID; *tag++ = len; memcpy(tag, ieee->current_network.ssid, len); tag += len; ieee80211_MFIE_Brate(ieee,&tag); ieee80211_MFIE_Grate(ieee,&tag); return skb; } struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee); void ieee80211_send_beacon(struct ieee80211_device *ieee) { struct sk_buff *skb; if(!ieee->ieee_up) return; //unsigned long flags; skb = ieee80211_get_beacon_(ieee); if (skb){ softmac_mgmt_xmit(skb, ieee); ieee->softmac_stats.tx_beacons++; //dev_kfree_skb_any(skb);//edit by thomas } // ieee->beacon_timer.expires = jiffies + // (MSECS( ieee->current_network.beacon_interval -5)); //spin_lock_irqsave(&ieee->beacon_lock,flags); if(ieee->beacon_txing && ieee->ieee_up){ // if(!timer_pending(&ieee->beacon_timer)) // add_timer(&ieee->beacon_timer); mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5))); } //spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_send_beacon_cb(unsigned long _ieee) { struct ieee80211_device *ieee = (struct ieee80211_device *) _ieee; unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock, flags); ieee80211_send_beacon(ieee); spin_unlock_irqrestore(&ieee->beacon_lock, flags); } void ieee80211_send_probe(struct ieee80211_device *ieee) { struct sk_buff *skb; skb = ieee80211_probe_req(ieee); if (skb){ softmac_mgmt_xmit(skb, ieee); ieee->softmac_stats.tx_probe_rq++; //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_send_probe_requests(struct ieee80211_device *ieee) { if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){ ieee80211_send_probe(ieee); ieee80211_send_probe(ieee); } } /* this performs syncro scan blocking the caller until all channels * in the allowed channel map has been checked. */ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee) { short ch = 0; #ifdef ENABLE_DOT11D u8 channel_map[MAX_CHANNEL_NUMBER+1]; memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1); #endif down(&ieee->scan_sem); while(1) { do{ ch++; if (ch > MAX_CHANNEL_NUMBER) goto out; /* scan completed */ #ifdef ENABLE_DOT11D }while(!channel_map[ch]); #else }while(!ieee->channel_map[ch]); #endif /* this fuction can be called in two situations * 1- We have switched to ad-hoc mode and we are * performing a complete syncro scan before conclude * there are no interesting cell and to create a * new one. In this case the link state is * IEEE80211_NOLINK until we found an interesting cell. * If so the ieee8021_new_net, called by the RX path * will set the state to IEEE80211_LINKED, so we stop * scanning * 2- We are linked and the root uses run iwlist scan. * So we switch to IEEE80211_LINKED_SCANNING to remember * that we are still logically linked (not interested in * new network events, despite for updating the net list, * but we are temporarly 'unlinked' as the driver shall * not filter RX frames and the channel is changing. * So the only situation in witch are interested is to check * if the state become LINKED because of the #1 situation */ if (ieee->state == IEEE80211_LINKED) goto out; ieee->set_chan(ieee->dev, ch); #ifdef ENABLE_DOT11D if(channel_map[ch] == 1) #endif ieee80211_send_probe_requests(ieee); /* this prevent excessive time wait when we * need to wait for a syncro scan to end.. */ if(ieee->state < IEEE80211_LINKED) ; else if (ieee->sync_scan_hurryup) goto out; msleep_interruptible_rsl(IEEE80211_SOFTMAC_SCAN_TIME); } out: if(ieee->state < IEEE80211_LINKED){ ieee->actscanning = false; up(&ieee->scan_sem); } else{ ieee->sync_scan_hurryup = 0; #ifdef ENABLE_DOT11D if(IS_DOT11D_ENABLE(ieee)) DOT11D_ScanComplete(ieee); #endif up(&ieee->scan_sem); } } void ieee80211_softmac_scan_wq(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq); static short watchdog = 0; #ifdef ENABLE_DOT11D u8 channel_map[MAX_CHANNEL_NUMBER+1]; memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1); #endif if(!ieee->ieee_up) return; down(&ieee->scan_sem); do{ ieee->current_network.channel = (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER; if (watchdog++ > MAX_CHANNEL_NUMBER) { //if current channel is not in channel map, set to default channel. #ifdef ENABLE_DOT11D if (!channel_map[ieee->current_network.channel]); #else if (!ieee->channel_map[ieee->current_network.channel]); #endif ieee->current_network.channel = 6; goto out; /* no good chans */ } #ifdef ENABLE_DOT11D }while(!channel_map[ieee->current_network.channel]); #else }while(!ieee->channel_map[ieee->current_network.channel]); #endif if (ieee->scanning == 0 ) goto out; ieee->set_chan(ieee->dev, ieee->current_network.channel); #ifdef ENABLE_DOT11D if(channel_map[ieee->current_network.channel] == 1) #endif ieee80211_send_probe_requests(ieee); queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME); up(&ieee->scan_sem); return; out: #ifdef ENABLE_DOT11D if(IS_DOT11D_ENABLE(ieee)) DOT11D_ScanComplete(ieee); #endif ieee->actscanning = false; watchdog = 0; ieee->scanning = 0; up(&ieee->scan_sem); } void ieee80211_beacons_start(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock,flags); ieee->beacon_txing = 1; ieee80211_send_beacon(ieee); spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_beacons_stop(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock,flags); ieee->beacon_txing = 0; del_timer_sync(&ieee->beacon_timer); spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_stop_send_beacons(struct ieee80211_device *ieee) { if(ieee->stop_send_beacons) ieee->stop_send_beacons(ieee->dev); if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS) ieee80211_beacons_stop(ieee); } void ieee80211_start_send_beacons(struct ieee80211_device *ieee) { if(ieee->start_send_beacons) ieee->start_send_beacons(ieee->dev,ieee->basic_rate); if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS) ieee80211_beacons_start(ieee); } void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee) { // unsigned long flags; //ieee->sync_scan_hurryup = 1; down(&ieee->scan_sem); // spin_lock_irqsave(&ieee->lock, flags); if (ieee->scanning == 1){ ieee->scanning = 0; cancel_delayed_work(&ieee->softmac_scan_wq); } // spin_unlock_irqrestore(&ieee->lock, flags); up(&ieee->scan_sem); } void ieee80211_stop_scan(struct ieee80211_device *ieee) { if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) ieee80211_softmac_stop_scan(ieee); else ieee->stop_scan(ieee->dev); } /* called with ieee->lock held */ void ieee80211_start_scan(struct ieee80211_device *ieee) { #ifdef ENABLE_DOT11D if(IS_DOT11D_ENABLE(ieee) ) { if(IS_COUNTRY_IE_VALID(ieee)) { RESET_CIE_WATCHDOG(ieee); } } #endif if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){ if (ieee->scanning == 0){ ieee->scanning = 1; queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0); } }else ieee->start_scan(ieee->dev); } /* called with wx_sem held */ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee) { #ifdef ENABLE_DOT11D if(IS_DOT11D_ENABLE(ieee) ) { if(IS_COUNTRY_IE_VALID(ieee)) { RESET_CIE_WATCHDOG(ieee); } } #endif ieee->sync_scan_hurryup = 0; if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) ieee80211_softmac_scan_syncro(ieee); else ieee->scan_syncro(ieee->dev); } inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon, struct ieee80211_device *ieee, int challengelen) { struct sk_buff *skb; struct ieee80211_authentication *auth; int len = sizeof(struct ieee80211_authentication) + challengelen + ieee->tx_headroom; skb = dev_alloc_skb(len); if (!skb) return NULL; skb_reserve(skb, ieee->tx_headroom); auth = (struct ieee80211_authentication *) skb_put(skb, sizeof(struct ieee80211_authentication)); auth->header.frame_ctl = IEEE80211_STYPE_AUTH; if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP; auth->header.duration_id = 0x013a; //FIXME memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN); memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN); //auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; if(ieee->auth_mode == 0) auth->algorithm = WLAN_AUTH_OPEN; else if(ieee->auth_mode == 1) auth->algorithm = WLAN_AUTH_SHARED_KEY; else if(ieee->auth_mode == 2) auth->algorithm = WLAN_AUTH_OPEN;//0x80; printk("=================>%s():auth->algorithm is %d\n",__FUNCTION__,auth->algorithm); auth->transaction = cpu_to_le16(ieee->associate_seq); ieee->associate_seq++; auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS); return skb; } static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest) { u8 *tag; int beacon_size; struct ieee80211_probe_response *beacon_buf; struct sk_buff *skb = NULL; int encrypt; int atim_len,erp_len; struct ieee80211_crypt_data* crypt; char *ssid = ieee->current_network.ssid; int ssid_len = ieee->current_network.ssid_len; int rate_len = ieee->current_network.rates_len+2; int rate_ex_len = ieee->current_network.rates_ex_len; int wpa_ie_len = ieee->wpa_ie_len; u8 erpinfo_content = 0; u8* tmp_ht_cap_buf; u8 tmp_ht_cap_len=0; u8* tmp_ht_info_buf; u8 tmp_ht_info_len=0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u8* tmp_generic_ie_buf=NULL; u8 tmp_generic_ie_len=0; if(rate_ex_len > 0) rate_ex_len+=2; if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS) atim_len = 4; else atim_len = 0; if(ieee80211_is_54g(ieee->current_network)) erp_len = 3; else erp_len = 0; crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len)); //HT ralated element tmp_ht_cap_buf =(u8*) &(ieee->pHTInfo->SelfHTCap); tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap); tmp_ht_info_buf =(u8*) &(ieee->pHTInfo->SelfHTInfo); tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo); HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt); HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt); if(pHTInfo->bRegRT2RTAggregation) { tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer; tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer); HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len); } // printk("===============>tmp_ht_cap_len is %d,tmp_ht_info_len is %d, tmp_generic_ie_len is %d\n",tmp_ht_cap_len,tmp_ht_info_len,tmp_generic_ie_len); beacon_size = sizeof(struct ieee80211_probe_response)+2+ ssid_len +3 //channel +rate_len +rate_ex_len +atim_len +erp_len +wpa_ie_len // +tmp_ht_cap_len // +tmp_ht_info_len // +tmp_generic_ie_len // +wmm_len+2 +ieee->tx_headroom; skb = dev_alloc_skb(beacon_size); if (!skb) return NULL; skb_reserve(skb, ieee->tx_headroom); beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, (beacon_size - ieee->tx_headroom)); memcpy (beacon_buf->header.addr1, dest,ETH_ALEN); memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN); beacon_buf->header.duration_id = 0; //FIXME beacon_buf->beacon_interval = cpu_to_le16(ieee->current_network.beacon_interval); beacon_buf->capability = cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS); beacon_buf->capability |= cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT)) cpu_to_le16((beacon_buf->capability |= WLAN_CAPABILITY_SHORT_SLOT)); crypt = ieee->crypt[ieee->tx_keyidx]; if (encrypt) beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP); beacon_buf->info_element[0].id = MFIE_TYPE_SSID; beacon_buf->info_element[0].len = ssid_len; tag = (u8*) beacon_buf->info_element[0].data; memcpy(tag, ssid, ssid_len); tag += ssid_len; *(tag++) = MFIE_TYPE_RATES; *(tag++) = rate_len-2; memcpy(tag,ieee->current_network.rates,rate_len-2); tag+=rate_len-2; *(tag++) = MFIE_TYPE_DS_SET; *(tag++) = 1; *(tag++) = ieee->current_network.channel; if(atim_len){ u16 val16; *(tag++) = MFIE_TYPE_IBSS_SET; *(tag++) = 2; //*((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window); val16 = cpu_to_le16(ieee->current_network.atim_window); memcpy((u8 *)tag, (u8 *)&val16, 2); tag+=2; } if(erp_len){ *(tag++) = MFIE_TYPE_ERP; *(tag++) = 1; *(tag++) = erpinfo_content; } if(rate_ex_len){ *(tag++) = MFIE_TYPE_RATES_EX; *(tag++) = rate_ex_len-2; memcpy(tag,ieee->current_network.rates_ex,rate_ex_len-2); tag+=rate_ex_len-2; } if (wpa_ie_len) { if (ieee->iw_mode == IW_MODE_ADHOC) {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07 memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4); } memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len); tag += wpa_ie_len; } //skb->dev = ieee->dev; return skb; } struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest) { struct sk_buff *skb; u8* tag; struct ieee80211_crypt_data* crypt; struct ieee80211_assoc_response_frame *assoc; short encrypt; unsigned int rate_len = ieee80211_MFIE_rate_len(ieee); int len = sizeof(struct ieee80211_assoc_response_frame) + rate_len + ieee->tx_headroom; skb = dev_alloc_skb(len); if (!skb) return NULL; skb_reserve(skb, ieee->tx_headroom); assoc = (struct ieee80211_assoc_response_frame *) skb_put(skb,sizeof(struct ieee80211_assoc_response_frame)); assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP); memcpy(assoc->header.addr1, dest,ETH_ALEN); memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN); memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN); assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ? WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS); if(ieee->short_slot) assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); if (ieee->host_encrypt) crypt = ieee->crypt[ieee->tx_keyidx]; else crypt = NULL; encrypt = ( crypt && crypt->ops); if (encrypt) assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); assoc->status = 0; assoc->aid = cpu_to_le16(ieee->assoc_id); if (ieee->assoc_id == 0x2007) ieee->assoc_id=0; else ieee->assoc_id++; tag = (u8*) skb_put(skb, rate_len); ieee80211_MFIE_Brate(ieee, &tag); ieee80211_MFIE_Grate(ieee, &tag); return skb; } struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest) { struct sk_buff *skb; struct ieee80211_authentication *auth; int len = ieee->tx_headroom + sizeof(struct ieee80211_authentication)+1; skb = dev_alloc_skb(len); if (!skb) return NULL; skb->len = sizeof(struct ieee80211_authentication); auth = (struct ieee80211_authentication *)skb->data; auth->status = cpu_to_le16(status); auth->transaction = cpu_to_le16(2); auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN); memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr1, dest, ETH_ALEN); auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH); return skb; } struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr) { struct sk_buff *skb; struct ieee80211_hdr_3addr* hdr; skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr)); if (!skb) return NULL; hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr)); memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN); hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | (pwr ? IEEE80211_FCTL_PM:0)); return skb; } void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest) { struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest); if (buf) softmac_mgmt_xmit(buf, ieee); } void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest) { struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest); if (buf) softmac_mgmt_xmit(buf, ieee); } void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest) { struct sk_buff *buf = ieee80211_probe_resp(ieee, dest); if (buf) softmac_mgmt_xmit(buf, ieee); } inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee) { struct sk_buff *skb; //unsigned long flags; struct ieee80211_assoc_request_frame *hdr; u8 *tag;//,*rsn_ie; //short info_addr = 0; //int i; //u16 suite_count = 0; //u8 suit_select = 0; //unsigned int wpa_len = beacon->wpa_ie_len; //for HT u8* ht_cap_buf = NULL; u8 ht_cap_len=0; u8* realtek_ie_buf=NULL; u8 realtek_ie_len=0; int wpa_ie_len= ieee->wpa_ie_len; unsigned int ckip_ie_len=0; unsigned int ccxrm_ie_len=0; unsigned int cxvernum_ie_len=0; struct ieee80211_crypt_data* crypt; int encrypt; unsigned int rate_len = ieee80211_MFIE_rate_len(ieee); unsigned int wmm_info_len = beacon->qos_data.supported?9:0; #ifdef THOMAS_TURBO unsigned int turbo_info_len = beacon->Turbo_Enable?9:0; #endif int len = 0; crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len)); //Include High Throuput capability && Realtek proprietary if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT) { ht_cap_buf = (u8*)&(ieee->pHTInfo->SelfHTCap); ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap); HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt); if(ieee->pHTInfo->bCurrentRT2RTAggregation) { realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer; realtek_ie_len = sizeof( ieee->pHTInfo->szRT2RTAggBuffer); HTConstructRT2RTAggElement(ieee, realtek_ie_buf, &realtek_ie_len); } } if(ieee->qos_support){ wmm_info_len = beacon->qos_data.supported?9:0; } if(beacon->bCkipSupported) { ckip_ie_len = 30+2; } if(beacon->bCcxRmEnable) { ccxrm_ie_len = 6+2; } if( beacon->BssCcxVerNumber >= 2 ) { cxvernum_ie_len = 5+2; } #ifdef THOMAS_TURBO len = sizeof(struct ieee80211_assoc_request_frame)+ 2 + beacon->ssid_len//essid tagged val + rate_len//rates tagged val + wpa_ie_len + wmm_info_len + turbo_info_len + ht_cap_len + realtek_ie_len + ckip_ie_len + ccxrm_ie_len + cxvernum_ie_len + ieee->tx_headroom; #else len = sizeof(struct ieee80211_assoc_request_frame)+ 2 + beacon->ssid_len//essid tagged val + rate_len//rates tagged val + wpa_ie_len + wmm_info_len + ht_cap_len + realtek_ie_len + ckip_ie_len + ccxrm_ie_len + cxvernum_ie_len + ieee->tx_headroom; #endif skb = dev_alloc_skb(len); if (!skb) return NULL; skb_reserve(skb, ieee->tx_headroom); hdr = (struct ieee80211_assoc_request_frame *) skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)+2); hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ; hdr->header.duration_id= 37; //FIXME memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN); memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN); memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS); if (beacon->capability & WLAN_CAPABILITY_PRIVACY ) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); //add short_preamble here if(ieee->short_slot) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); if (wmm_info_len) //QOS hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS); hdr->listen_interval = 0xa; //FIXME hdr->info_element[0].id = MFIE_TYPE_SSID; hdr->info_element[0].len = beacon->ssid_len; tag = skb_put(skb, beacon->ssid_len); memcpy(tag, beacon->ssid, beacon->ssid_len); tag = skb_put(skb, rate_len); ieee80211_MFIE_Brate(ieee, &tag); ieee80211_MFIE_Grate(ieee, &tag); // For CCX 1 S13, CKIP. Added by Annie, 2006-08-14. if( beacon->bCkipSupported ) { static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client" u8 CcxAironetBuf[30]; OCTET_STRING osCcxAironetIE; memset(CcxAironetBuf, 0,30); osCcxAironetIE.Octet = CcxAironetBuf; osCcxAironetIE.Length = sizeof(CcxAironetBuf); // // Ref. CCX test plan v3.61, 3.2.3.1 step 13. // We want to make the device type as "4500-client". 060926, by CCW. // memcpy(osCcxAironetIE.Octet, AironetIeOui, sizeof(AironetIeOui)); // CCX1 spec V1.13, A01.1 CKIP Negotiation (page23): // "The CKIP negotiation is started with the associate request from the client to the access point, // containing an Aironet element with both the MIC and KP bits set." osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |= (SUPPORT_CKIP_PK|SUPPORT_CKIP_MIC) ; tag = skb_put(skb, ckip_ie_len); *tag++ = MFIE_TYPE_AIRONET; *tag++ = osCcxAironetIE.Length; memcpy(tag,osCcxAironetIE.Octet,osCcxAironetIE.Length); tag += osCcxAironetIE.Length; } if(beacon->bCcxRmEnable) { static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00}; OCTET_STRING osCcxRmCap; osCcxRmCap.Octet = CcxRmCapBuf; osCcxRmCap.Length = sizeof(CcxRmCapBuf); tag = skb_put(skb,ccxrm_ie_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = osCcxRmCap.Length; memcpy(tag,osCcxRmCap.Octet,osCcxRmCap.Length); tag += osCcxRmCap.Length; } if( beacon->BssCcxVerNumber >= 2 ) { u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00}; OCTET_STRING osCcxVerNum; CcxVerNumBuf[4] = beacon->BssCcxVerNumber; osCcxVerNum.Octet = CcxVerNumBuf; osCcxVerNum.Length = sizeof(CcxVerNumBuf); tag = skb_put(skb,cxvernum_ie_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = osCcxVerNum.Length; memcpy(tag,osCcxVerNum.Octet,osCcxVerNum.Length); tag += osCcxVerNum.Length; } //HT cap element if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT){ if(ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC) { tag = skb_put(skb, ht_cap_len); *tag++ = MFIE_TYPE_HT_CAP; *tag++ = ht_cap_len - 2; memcpy(tag, ht_cap_buf,ht_cap_len -2); tag += ht_cap_len -2; } } //choose what wpa_supplicant gives to associate. tag = skb_put(skb, wpa_ie_len); if (wpa_ie_len){ memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len); } tag = skb_put(skb,wmm_info_len); if(wmm_info_len) { ieee80211_WMM_Info(ieee, &tag); } #ifdef THOMAS_TURBO tag = skb_put(skb,turbo_info_len); if(turbo_info_len) { ieee80211_TURBO_Info(ieee, &tag); } #endif if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT){ if(ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) { tag = skb_put(skb, ht_cap_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = ht_cap_len - 2; memcpy(tag, ht_cap_buf,ht_cap_len - 2); tag += ht_cap_len -2; } if(ieee->pHTInfo->bCurrentRT2RTAggregation){ tag = skb_put(skb, realtek_ie_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = realtek_ie_len - 2; memcpy(tag, realtek_ie_buf,realtek_ie_len -2 ); } } // printk("<=====%s(), %p, %p\n", __FUNCTION__, ieee->dev, ieee->dev->dev_addr); // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); return skb; } void ieee80211_associate_abort(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->lock, flags); ieee->associate_seq++; /* don't scan, and avoid to have the RX path possibily * try again to associate. Even do not react to AUTH or * ASSOC response. Just wait for the retry wq to be scheduled. * Here we will check if there are good nets to associate * with, so we retry or just get back to NO_LINK and scanning */ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){ IEEE80211_DEBUG_MGMT("Authentication failed\n"); ieee->softmac_stats.no_auth_rs++; }else{ IEEE80211_DEBUG_MGMT("Association failed\n"); ieee->softmac_stats.no_ass_rs++; } ieee->state = IEEE80211_ASSOCIATING_RETRY; queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \ IEEE80211_SOFTMAC_ASSOC_RETRY_TIME); spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_associate_abort_cb(unsigned long dev) { ieee80211_associate_abort((struct ieee80211_device *) dev); } void ieee80211_associate_step1(struct ieee80211_device *ieee) { struct ieee80211_network *beacon = &ieee->current_network; struct sk_buff *skb; IEEE80211_DEBUG_MGMT("Stopping scan\n"); ieee->softmac_stats.tx_auth_rq++; skb=ieee80211_authentication_req(beacon, ieee, 0); if (!skb) ieee80211_associate_abort(ieee); else{ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ; IEEE80211_DEBUG_MGMT("Sending authentication request\n"); //printk(KERN_WARNING "Sending authentication request\n"); softmac_mgmt_xmit(skb, ieee); //BUGON when you try to add_timer twice, using mod_timer may be better, john0709 if(!timer_pending(&ieee->associate_timer)){ ieee->associate_timer.expires = jiffies + (HZ / 2); add_timer(&ieee->associate_timer); } //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) { u8 *c; struct sk_buff *skb; struct ieee80211_network *beacon = &ieee->current_network; // int hlen = sizeof(struct ieee80211_authentication); ieee->associate_seq++; ieee->softmac_stats.tx_auth_rq++; skb = ieee80211_authentication_req(beacon, ieee, chlen+2); if (!skb) ieee80211_associate_abort(ieee); else{ c = skb_put(skb, chlen+2); *(c++) = MFIE_TYPE_CHALLENGE; *(c++) = chlen; memcpy(c, challenge, chlen); IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n"); ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr )); softmac_mgmt_xmit(skb, ieee); mod_timer(&ieee->associate_timer, jiffies + (HZ/2)); //dev_kfree_skb_any(skb);//edit by thomas } kfree(challenge); } void ieee80211_associate_step2(struct ieee80211_device *ieee) { struct sk_buff* skb; struct ieee80211_network *beacon = &ieee->current_network; del_timer_sync(&ieee->associate_timer); IEEE80211_DEBUG_MGMT("Sending association request\n"); ieee->softmac_stats.tx_ass_rq++; skb=ieee80211_association_req(beacon, ieee); if (!skb) ieee80211_associate_abort(ieee); else{ softmac_mgmt_xmit(skb, ieee); mod_timer(&ieee->associate_timer, jiffies + (HZ/2)); //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_associate_complete_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq); printk(KERN_INFO "Associated successfully\n"); if(ieee80211_is_54g(ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)){ ieee->rate = 108; printk(KERN_INFO"Using G rates:%d\n", ieee->rate); }else{ ieee->rate = 22; printk(KERN_INFO"Using B rates:%d\n", ieee->rate); } if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT) { printk("Successfully associated, ht enabled\n"); HTOnAssocRsp(ieee); } else { printk("Successfully associated, ht not enabled(%d, %d)\n", ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bEnableHT); memset(ieee->dot11HTOperationalRateSet, 0, 16); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval/500); // To prevent the immediately calling watch_dog after association. if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 ) { ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; ieee->LinkDetectInfo.NumRecvDataInPeriod= 1; } ieee->link_change(ieee->dev); if(ieee->is_silent_reset == 0){ printk("============>normal associate\n"); notify_wx_assoc_event(ieee); } else if(ieee->is_silent_reset == 1) { printk("==================>silent reset associate\n"); ieee->is_silent_reset = 0; } if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } void ieee80211_associate_complete(struct ieee80211_device *ieee) { // int i; // struct net_device* dev = ieee->dev; del_timer_sync(&ieee->associate_timer); ieee->state = IEEE80211_LINKED; //ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet); queue_work(ieee->wq, &ieee->associate_complete_wq); } void ieee80211_associate_procedure_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq); ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); ieee80211_stop_scan(ieee); printk("===>%s(), chan:%d\n", __FUNCTION__, ieee->current_network.channel); //ieee->set_chan(ieee->dev, ieee->current_network.channel); HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); ieee->associate_seq = 1; ieee80211_associate_step1(ieee); up(&ieee->wx_sem); } inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net) { u8 tmp_ssid[IW_ESSID_MAX_SIZE+1]; int tmp_ssid_len = 0; short apset,ssidset,ssidbroad,apmatch,ssidmatch; /* we are interested in new new only if we are not associated * and we are not associating / authenticating */ if (ieee->state != IEEE80211_NOLINK) return; if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability & WLAN_CAPABILITY_BSS)) return; if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS)) return; if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){ /* if the user specified the AP MAC, we need also the essid * This could be obtained by beacons or, if the network does not * broadcast it, it can be put manually. */ apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 ); ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0'; ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0'); apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0); ssidmatch = (ieee->current_network.ssid_len == net->ssid_len)&&\ (!strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len)); if ( /* if the user set the AP check if match. * if the network does not broadcast essid we check the user supplyed ANY essid * if the network does broadcast and the user does not set essid it is OK * if the network does broadcast and the user did set essid chech if essid match */ ( apset && apmatch && ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) || /* if the ap is not set, check that the user set the bssid * and the network does bradcast and that those two bssid matches */ (!apset && ssidset && ssidbroad && ssidmatch) ){ /* if the essid is hidden replace it with the * essid provided by the user. */ if (!ssidbroad){ strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE); tmp_ssid_len = ieee->current_network.ssid_len; } memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network)); if (!ssidbroad){ strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE); ieee->current_network.ssid_len = tmp_ssid_len; } printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",ieee->current_network.ssid,ieee->current_network.channel, ieee->current_network.qos_data.supported, ieee->pHTInfo->bEnableHT, ieee->current_network.bssht.bdSupportHT); //ieee->pHTInfo->IOTAction = 0; HTResetIOTSetting(ieee->pHTInfo); if (ieee->iw_mode == IW_MODE_INFRA){ /* Join the network for the first time */ ieee->AsocRetryCount = 0; //for HT by amy 080514 if((ieee->current_network.qos_data.supported == 1) && // (ieee->pHTInfo->bEnableHT && ieee->current_network.bssht.bdSupportHT)) ieee->current_network.bssht.bdSupportHT) /*WB, 2008.09.09:bCurrentHTSupport and bEnableHT two flags are going to put together to check whether we are in HT now, so needn't to check bEnableHT flags here. That's is to say we will set to HT support whenever joined AP has the ability to support HT. And whether we are in HT or not, please check bCurrentHTSupport&&bEnableHT now please.*/ { // ieee->pHTInfo->bCurrentHTSupport = true; HTResetSelfAndSavePeerSetting(ieee, &(ieee->current_network)); } else { ieee->pHTInfo->bCurrentHTSupport = false; } ieee->state = IEEE80211_ASSOCIATING; queue_work(ieee->wq, &ieee->associate_procedure_wq); }else{ if(ieee80211_is_54g(ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)){ ieee->rate = 108; ieee->SetWirelessMode(ieee->dev, IEEE_G); printk(KERN_INFO"Using G rates\n"); }else{ ieee->rate = 22; ieee->SetWirelessMode(ieee->dev, IEEE_B); printk(KERN_INFO"Using B rates\n"); } memset(ieee->dot11HTOperationalRateSet, 0, 16); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); ieee->state = IEEE80211_LINKED; } } } } void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee) { unsigned long flags; struct ieee80211_network *target; spin_lock_irqsave(&ieee->lock, flags); list_for_each_entry(target, &ieee->network_list, list) { /* if the state become different that NOLINK means * we had found what we are searching for */ if (ieee->state != IEEE80211_NOLINK) break; if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies)) ieee80211_softmac_new_net(ieee, target); } spin_unlock_irqrestore(&ieee->lock, flags); } static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen) { struct ieee80211_authentication *a; u8 *t; if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len); return 0xcafe; } *challenge = NULL; a = (struct ieee80211_authentication*) skb->data; if(skb->len > (sizeof(struct ieee80211_authentication) +3)){ t = skb->data + sizeof(struct ieee80211_authentication); if(*(t++) == MFIE_TYPE_CHALLENGE){ *chlen = *(t++); *challenge = kmemdup(t, *chlen, GFP_ATOMIC); if (!*challenge) return -ENOMEM; } } return cpu_to_le16(a->status); } int auth_rq_parse(struct sk_buff *skb,u8* dest) { struct ieee80211_authentication *a; if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){ IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len); return -1; } a = (struct ieee80211_authentication*) skb->data; memcpy(dest,a->header.addr2, ETH_ALEN); if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN) return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG; return WLAN_STATUS_SUCCESS; } static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src) { u8 *tag; u8 *skbend; u8 *ssid=NULL; u8 ssidlen = 0; struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; if (skb->len < sizeof (struct ieee80211_hdr_3addr )) return -1; /* corrupted */ memcpy(src,header->addr2, ETH_ALEN); skbend = (u8*)skb->data + skb->len; tag = skb->data + sizeof (struct ieee80211_hdr_3addr ); while (tag+1 < skbend){ if (*tag == 0){ ssid = tag+2; ssidlen = *(tag+1); break; } tag++; /* point to the len field */ tag = tag + *(tag); /* point to the last data byte of the tag */ tag++; /* point to the next tag */ } //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src)); if (ssidlen == 0) return 1; if (!ssid) return 1; /* ssid not found in tagged param */ return (!strncmp(ssid, ieee->current_network.ssid, ssidlen)); } int assoc_rq_parse(struct sk_buff *skb,u8* dest) { struct ieee80211_assoc_request_frame *a; if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) - sizeof(struct ieee80211_info_element))) { IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len); return -1; } a = (struct ieee80211_assoc_request_frame*) skb->data; memcpy(dest,a->header.addr2,ETH_ALEN); return 0; } static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb, int *aid) { struct ieee80211_assoc_response_frame *response_head; u16 status_code; if (skb->len < sizeof(struct ieee80211_assoc_response_frame)){ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len); return 0xcafe; } response_head = (struct ieee80211_assoc_response_frame*) skb->data; *aid = le16_to_cpu(response_head->aid) & 0x3fff; status_code = le16_to_cpu(response_head->status); if((status_code==WLAN_STATUS_ASSOC_DENIED_RATES || \ status_code==WLAN_STATUS_CAPS_UNSUPPORTED)&& ((ieee->mode == IEEE_G) && (ieee->current_network.mode == IEEE_N_24G) && (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) { ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE; }else { ieee->AsocRetryCount = 0; } return le16_to_cpu(response_head->status); } static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; //IEEE80211DMESG("Rx probe"); ieee->softmac_stats.rx_probe_rq++; //DMESG("Dest is "MACSTR, MAC2STR(dest)); if (probe_rq_parse(ieee, skb, dest)){ //IEEE80211DMESG("Was for me!"); ieee->softmac_stats.tx_probe_rs++; ieee80211_resp_to_probe(ieee, dest); } } static inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; int status; //IEEE80211DMESG("Rx probe"); ieee->softmac_stats.rx_auth_rq++; status = auth_rq_parse(skb, dest); if (status != -1) { ieee80211_resp_to_auth(ieee, status, dest); } //DMESG("Dest is "MACSTR, MAC2STR(dest)); } static inline void ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; //unsigned long flags; ieee->softmac_stats.rx_ass_rq++; if (assoc_rq_parse(skb,dest) != -1){ ieee80211_resp_to_assoc_rq(ieee, dest); } printk(KERN_INFO"New client associated: %pM\n", dest); //FIXME } void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr) { struct sk_buff *buf = ieee80211_null_func(ieee, pwr); if (buf) softmac_ps_mgmt_xmit(buf, ieee); } short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l) { int timeout = ieee->ps_timeout; u8 dtim; /*if(ieee->ps == IEEE80211_PS_DISABLED || ieee->iw_mode != IW_MODE_INFRA || ieee->state != IEEE80211_LINKED) return 0; */ dtim = ieee->current_network.dtim_data; //printk("DTIM\n"); if(!(dtim & IEEE80211_DTIM_VALID)) return 0; timeout = ieee->current_network.beacon_interval; //should we use ps_timeout value or beacon_interval //printk("VALID\n"); ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID; if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps)) return 2; if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout))) return 0; if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout))) return 0; if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) && (ieee->mgmt_queue_tail != ieee->mgmt_queue_head)) return 0; if(time_l){ *time_l = ieee->current_network.last_dtim_sta_time[0] + (ieee->current_network.beacon_interval * ieee->current_network.dtim_period) * 1000; } if(time_h){ *time_h = ieee->current_network.last_dtim_sta_time[1]; if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0]) *time_h += 1; } return 1; } inline void ieee80211_sta_ps(struct ieee80211_device *ieee) { u32 th,tl; short sleep; unsigned long flags,flags2; spin_lock_irqsave(&ieee->lock, flags); if((ieee->ps == IEEE80211_PS_DISABLED || ieee->iw_mode != IW_MODE_INFRA || ieee->state != IEEE80211_LINKED)){ // #warning CHECK_LOCK_HERE spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); ieee80211_sta_wakeup(ieee, 1); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl); /* 2 wake, 1 sleep, 0 do nothing */ if(sleep == 0) goto out; if(sleep == 1){ if(ieee->sta_sleep == 1) ieee->enter_sleep_state(ieee->dev,th,tl); else if(ieee->sta_sleep == 0){ // printk("send null 1\n"); spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); if(ieee->ps_is_queue_empty(ieee->dev)){ ieee->sta_sleep = 2; ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee,1); ieee->ps_th = th; ieee->ps_tl = tl; } spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } }else if(sleep == 2){ //#warning CHECK_LOCK_HERE spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); ieee80211_sta_wakeup(ieee,1); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } out: spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl) { if(ieee->sta_sleep == 0){ if(nl){ printk("Warning: driver is probably failing to report TX ps error\n"); ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee, 0); } return; } if(ieee->sta_sleep == 1) ieee->sta_wake_up(ieee->dev); ieee->sta_sleep = 0; if(nl){ ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee, 0); } } void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success) { unsigned long flags,flags2; spin_lock_irqsave(&ieee->lock, flags); if(ieee->sta_sleep == 2){ /* Null frame with PS bit set */ if(success){ ieee->sta_sleep = 1; ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl); } /* if the card report not success we can't be sure the AP * has not RXed so we can't assume the AP believe us awake */ } /* 21112005 - tx again null without PS bit if lost */ else { if((ieee->sta_sleep == 0) && !success){ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); ieee80211_sta_ps_send_null_frame(ieee, 0); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } } spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_process_action(struct ieee80211_device* ieee, struct sk_buff* skb) { struct ieee80211_hdr* header = (struct ieee80211_hdr*)skb->data; u8* act = ieee80211_get_payload(header); u8 tmp = 0; // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len); if (act == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "error to get payload of action frame\n"); return; } tmp = *act; act ++; switch (tmp) { case ACT_CAT_BA: if (*act == ACT_ADDBAREQ) ieee80211_rx_ADDBAReq(ieee, skb); else if (*act == ACT_ADDBARSP) ieee80211_rx_ADDBARsp(ieee, skb); else if (*act == ACT_DELBA) ieee80211_rx_DELBA(ieee, skb); break; default: // if (net_ratelimit()) // IEEE80211_DEBUG(IEEE80211_DL_BA, "unknown action frame(%d)\n", tmp); break; } return; } inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, u16 type, u16 stype) { struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; u16 errcode; u8* challenge; int chlen=0; int aid; struct ieee80211_assoc_response_frame *assoc_resp; // struct ieee80211_info_element *info_element; bool bSupportNmode = true, bHalfSupportNmode = false; //default support N mode, disable halfNmode if(!ieee->proto_started) return 0; if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED && ieee->iw_mode == IW_MODE_INFRA && ieee->state == IEEE80211_LINKED)) tasklet_schedule(&ieee->ps_task); if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP && WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON) ieee->last_rx_ps_time = jiffies; switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED && ieee->iw_mode == IW_MODE_INFRA){ struct ieee80211_network network_resp; struct ieee80211_network *network = &network_resp; if (0 == (errcode=assoc_parse(ieee,skb, &aid))){ ieee->state=IEEE80211_LINKED; ieee->assoc_id = aid; ieee->softmac_stats.rx_ass_ok++; /* station support qos */ /* Let the register setting defaultly with Legacy station */ if(ieee->qos_support) { assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data; memset(network, 0, sizeof(*network)); if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\ rx_stats->len - sizeof(*assoc_resp),\ network,rx_stats)){ return 1; } else { //filling the PeerHTCap. //maybe not neccesary as we can get its info from current_network. memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen); memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen); } if (ieee->handle_assoc_response != NULL) ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame*)header, network); } ieee80211_associate_complete(ieee); } else { /* aid could not been allocated */ ieee->softmac_stats.rx_ass_err++; printk( "Association response status code 0x%x\n", errcode); IEEE80211_DEBUG_MGMT( "Association response status code 0x%x\n", errcode); if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) { queue_work(ieee->wq, &ieee->associate_procedure_wq); } else { ieee80211_associate_abort(ieee); } } } break; case IEEE80211_STYPE_ASSOC_REQ: case IEEE80211_STYPE_REASSOC_REQ: if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && ieee->iw_mode == IW_MODE_MASTER) ieee80211_rx_assoc_rq(ieee, skb); break; case IEEE80211_STYPE_AUTH: if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING && ieee->iw_mode == IW_MODE_INFRA){ IEEE80211_DEBUG_MGMT("Received authentication response"); if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){ if(ieee->open_wep || !challenge){ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED; ieee->softmac_stats.rx_auth_rs_ok++; if(!(ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE)) { if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) { // WEP or TKIP encryption if(IsHTHalfNmodeAPs(ieee)) { bSupportNmode = true; bHalfSupportNmode = true; } else { bSupportNmode = false; bHalfSupportNmode = false; } printk("==========>to link with AP using SEC(%d, %d)", bSupportNmode, bHalfSupportNmode); } } /* Dummy wirless mode setting to avoid encryption issue */ if(bSupportNmode) { //N mode setting ieee->SetWirelessMode(ieee->dev, \ ieee->current_network.mode); }else{ //b/g mode setting /*TODO*/ ieee->SetWirelessMode(ieee->dev, IEEE_G); } if (ieee->current_network.mode == IEEE_N_24G && bHalfSupportNmode == true) { printk("===============>entern half N mode\n"); ieee->bHalfWirelessN24GMode = true; } else ieee->bHalfWirelessN24GMode = false; ieee80211_associate_step2(ieee); }else{ ieee80211_auth_challenge(ieee, challenge, chlen); } }else{ ieee->softmac_stats.rx_auth_rs_err++; IEEE80211_DEBUG_MGMT("Authentication respose status code 0x%x",errcode); ieee80211_associate_abort(ieee); } }else if (ieee->iw_mode == IW_MODE_MASTER){ ieee80211_rx_auth_rq(ieee, skb); } } break; case IEEE80211_STYPE_PROBE_REQ: if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) && ((ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) && ieee->state == IEEE80211_LINKED)){ ieee80211_rx_probe_rq(ieee, skb); } break; case IEEE80211_STYPE_DISASSOC: case IEEE80211_STYPE_DEAUTH: /* FIXME for now repeat all the association procedure * both for disassociation and deauthentication */ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA){ ieee->state = IEEE80211_ASSOCIATING; ieee->softmac_stats.reassoc++; notify_wx_assoc_event(ieee); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); RemovePeerTS(ieee, header->addr2); queue_work(ieee->wq, &ieee->associate_procedure_wq); } break; case IEEE80211_STYPE_MANAGE_ACT: ieee80211_process_action(ieee,skb); break; default: return -1; break; } //dev_kfree_skb_any(skb); return 0; } /* following are for a simplier TX queue management. * Instead of using netif_[stop/wake]_queue the driver * will uses these two function (plus a reset one), that * will internally uses the kernel netif_* and takes * care of the ieee802.11 fragmentation. * So the driver receives a fragment per time and might * call the stop function when it want without take care * to have enought room to TX an entire packet. * This might be useful if each fragment need it's own * descriptor, thus just keep a total free memory > than * the max fragmentation treshold is not enought.. If the * ieee802.11 stack passed a TXB struct then you needed * to keep N free descriptors where * N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD * In this way you need just one and the 802.11 stack * will take care of buffering fragments and pass them to * to the driver later, when it wakes the queue. */ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee) { unsigned int queue_index = txb->queue_index; unsigned long flags; int i; cb_desc *tcb_desc = NULL; spin_lock_irqsave(&ieee->lock,flags); /* called with 2nd parm 0, no tx mgmt lock required */ ieee80211_sta_wakeup(ieee,0); /* update the tx status */ ieee->stats.tx_bytes += txb->payload_size; ieee->stats.tx_packets++; tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); if(tcb_desc->bMulticast) { ieee->stats.multicast++; } /* if xmit available, just xmit it immediately, else just insert it to the wait queue */ for(i = 0; i < txb->nr_frags; i++) { #ifdef USB_TX_DRIVER_AGGREGATION_ENABLE if ((skb_queue_len(&ieee->skb_drv_aggQ[queue_index]) != 0) || #else if ((skb_queue_len(&ieee->skb_waitQ[queue_index]) != 0) || #endif (!ieee->check_nic_enough_desc(ieee->dev,queue_index))||\ (ieee->queue_stop)) { /* insert the skb packet to the wait queue */ /* as for the completion function, it does not need * to check it any more. * */ //printk("error:no descriptor left@queue_index %d\n", queue_index); //ieee80211_stop_queue(ieee); #ifdef USB_TX_DRIVER_AGGREGATION_ENABLE skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]); #else skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]); #endif }else{ ieee->softmac_data_hard_start_xmit( txb->fragments[i], ieee->dev,ieee->rate); //ieee->stats.tx_packets++; //ieee->stats.tx_bytes += txb->fragments[i]->len; //ieee->dev->trans_start = jiffies; } } ieee80211_txb_free(txb); //exit: spin_unlock_irqrestore(&ieee->lock,flags); } /* called with ieee->lock acquired */ void ieee80211_resume_tx(struct ieee80211_device *ieee) { int i; for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) { if (ieee->queue_stop){ ieee->tx_pending.frag = i; return; }else{ ieee->softmac_data_hard_start_xmit( ieee->tx_pending.txb->fragments[i], ieee->dev,ieee->rate); //(i+1)<ieee->tx_pending.txb->nr_frags); ieee->stats.tx_packets++; ieee->dev->trans_start = jiffies; } } ieee80211_txb_free(ieee->tx_pending.txb); ieee->tx_pending.txb = NULL; } void ieee80211_reset_queue(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->lock,flags); init_mgmt_queue(ieee); if (ieee->tx_pending.txb){ ieee80211_txb_free(ieee->tx_pending.txb); ieee->tx_pending.txb = NULL; } ieee->queue_stop = 0; spin_unlock_irqrestore(&ieee->lock,flags); } void ieee80211_wake_queue(struct ieee80211_device *ieee) { unsigned long flags; struct sk_buff *skb; struct ieee80211_hdr_3addr *header; spin_lock_irqsave(&ieee->lock,flags); if (! ieee->queue_stop) goto exit; ieee->queue_stop = 0; if(ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE){ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){ header = (struct ieee80211_hdr_3addr *) skb->data; header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); //dev_kfree_skb_any(skb);//edit by thomas } } if (!ieee->queue_stop && ieee->tx_pending.txb) ieee80211_resume_tx(ieee); if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)){ ieee->softmac_stats.swtxawake++; netif_wake_queue(ieee->dev); } exit : spin_unlock_irqrestore(&ieee->lock,flags); } void ieee80211_stop_queue(struct ieee80211_device *ieee) { //unsigned long flags; //spin_lock_irqsave(&ieee->lock,flags); if (! netif_queue_stopped(ieee->dev)){ netif_stop_queue(ieee->dev); ieee->softmac_stats.swtxstop++; } ieee->queue_stop = 1; //spin_unlock_irqrestore(&ieee->lock,flags); } inline void ieee80211_randomize_cell(struct ieee80211_device *ieee) { get_random_bytes(ieee->current_network.bssid, ETH_ALEN); /* an IBSS cell address must have the two less significant * bits of the first byte = 2 */ ieee->current_network.bssid[0] &= ~0x01; ieee->current_network.bssid[0] |= 0x02; } /* called in user context only */ void ieee80211_start_master_bss(struct ieee80211_device *ieee) { ieee->assoc_id = 1; if (ieee->current_network.ssid_len == 0){ strncpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID, IW_ESSID_MAX_SIZE); ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID); ieee->ssid_set = 1; } memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN); ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->state = IEEE80211_LINKED; ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } void ieee80211_start_monitor_mode(struct ieee80211_device *ieee) { if(ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } } void ieee80211_start_ibss_wq(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq); /* iwconfig mode ad-hoc will schedule this and return * on the other hand this will block further iwconfig SET * operations because of the wx_sem hold. * Anyway some most set operations set a flag to speed-up * (abort) this wq (when syncro scanning) before sleeping * on the semaphore */ if(!ieee->proto_started){ printk("==========oh driver down return\n"); return; } down(&ieee->wx_sem); if (ieee->current_network.ssid_len == 0){ strcpy(ieee->current_network.ssid,IEEE80211_DEFAULT_TX_ESSID); ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID); ieee->ssid_set = 1; } /* check if we have this cell in our network list */ ieee80211_softmac_check_all_nets(ieee); #ifdef ENABLE_DOT11D //if creating an ad-hoc, set its channel to 10 temporarily--this is the requirement for ASUS, not 11D, so disable 11d. // if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK)) if (ieee->state == IEEE80211_NOLINK) ieee->current_network.channel = 6; #endif /* if not then the state is not linked. Maybe the user swithced to * ad-hoc mode just after being in monitor mode, or just after * being very few time in managed mode (so the card have had no * time to scan all the chans..) or we have just run up the iface * after setting ad-hoc mode. So we have to give another try.. * Here, in ibss mode, should be safe to do this without extra care * (in bss mode we had to make sure no-one tryed to associate when * we had just checked the ieee->state and we was going to start the * scan) beacause in ibss mode the ieee80211_new_net function, when * finds a good net, just set the ieee->state to IEEE80211_LINKED, * so, at worst, we waste a bit of time to initiate an unneeded syncro * scan, that will stop at the first round because it sees the state * associated. */ if (ieee->state == IEEE80211_NOLINK) ieee80211_start_scan_syncro(ieee); /* the network definitively is not here.. create a new cell */ if (ieee->state == IEEE80211_NOLINK){ printk("creating new IBSS cell\n"); if(!ieee->wap_set) ieee80211_randomize_cell(ieee); if(ieee->modulation & IEEE80211_CCK_MODULATION){ ieee->current_network.rates_len = 4; ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; }else ieee->current_network.rates_len = 0; if(ieee->modulation & IEEE80211_OFDM_MODULATION){ ieee->current_network.rates_ex_len = 8; ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB; ieee->current_network.rates_ex[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB; ieee->current_network.rates_ex[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB; ieee->current_network.rates_ex[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB; ieee->current_network.rates_ex[4] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; ieee->current_network.rates_ex[5] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB; ieee->current_network.rates_ex[6] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB; ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB; ieee->rate = 108; }else{ ieee->current_network.rates_ex_len = 0; ieee->rate = 22; } // By default, WMM function will be disabled in IBSS mode ieee->current_network.QoS_Enable = 0; ieee->SetWirelessMode(ieee->dev, IEEE_G); ieee->current_network.atim_window = 0; ieee->current_network.capability = WLAN_CAPABILITY_IBSS; if(ieee->short_slot) ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT; } ieee->state = IEEE80211_LINKED; ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); ieee80211_start_send_beacons(ieee); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); up(&ieee->wx_sem); } inline void ieee80211_start_ibss(struct ieee80211_device *ieee) { queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150); } /* this is called only in user context, with wx_sem held */ void ieee80211_start_bss(struct ieee80211_device *ieee) { unsigned long flags; #ifdef ENABLE_DOT11D // // Ref: 802.11d 11.1.3.3 // STA shall not start a BSS unless properly formed Beacon frame including a Country IE. // if(IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) { if(! ieee->bGlobalDomain) { return; } } #endif /* check if we have already found the net we * are interested in (if any). * if not (we are disassociated and we are not * in associating / authenticating phase) start the background scanning. */ ieee80211_softmac_check_all_nets(ieee); /* ensure no-one start an associating process (thus setting * the ieee->state to ieee80211_ASSOCIATING) while we * have just cheked it and we are going to enable scan. * The ieee80211_new_net function is always called with * lock held (from both ieee80211_softmac_check_all_nets and * the rx path), so we cannot be in the middle of such function */ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state == IEEE80211_NOLINK){ ieee->actscanning = true; ieee80211_start_scan(ieee); } spin_unlock_irqrestore(&ieee->lock, flags); } /* called only in userspace context */ void ieee80211_disassociate(struct ieee80211_device *ieee) { netif_carrier_off(ieee->dev); if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) ieee80211_reset_queue(ieee); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); #ifdef ENABLE_DOT11D if(IS_DOT11D_ENABLE(ieee)) Dot11d_Reset(ieee); #endif ieee->state = IEEE80211_NOLINK; ieee->is_set_key = false; ieee->link_change(ieee->dev); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); notify_wx_assoc_event(ieee); } void ieee80211_associate_retry_wq(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq); unsigned long flags; down(&ieee->wx_sem); if(!ieee->proto_started) goto exit; if(ieee->state != IEEE80211_ASSOCIATING_RETRY) goto exit; /* until we do not set the state to IEEE80211_NOLINK * there are no possibility to have someone else trying * to start an association procdure (we get here with * ieee->state = IEEE80211_ASSOCIATING). * When we set the state to IEEE80211_NOLINK it is possible * that the RX path run an attempt to associate, but * both ieee80211_softmac_check_all_nets and the * RX path works with ieee->lock held so there are no * problems. If we are still disassociated then start a scan. * the lock here is necessary to ensure no one try to start * an association procedure when we have just checked the * state and we are going to start the scan. */ ieee->state = IEEE80211_NOLINK; ieee80211_softmac_check_all_nets(ieee); spin_lock_irqsave(&ieee->lock, flags); if(ieee->state == IEEE80211_NOLINK) ieee80211_start_scan(ieee); spin_unlock_irqrestore(&ieee->lock, flags); exit: up(&ieee->wx_sem); } struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee) { u8 broadcast_addr[] = {0xff,0xff,0xff,0xff,0xff,0xff}; struct sk_buff *skb; struct ieee80211_probe_response *b; skb = ieee80211_probe_resp(ieee, broadcast_addr); if (!skb) return NULL; b = (struct ieee80211_probe_response *) skb->data; b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON); return skb; } struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee) { struct sk_buff *skb; struct ieee80211_probe_response *b; skb = ieee80211_get_beacon_(ieee); if(!skb) return NULL; b = (struct ieee80211_probe_response *) skb->data; b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; return skb; } void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); ieee80211_stop_protocol(ieee); up(&ieee->wx_sem); } void ieee80211_stop_protocol(struct ieee80211_device *ieee) { if (!ieee->proto_started) return; ieee->proto_started = 0; ieee80211_stop_send_beacons(ieee); del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); cancel_delayed_work(&ieee->start_ibss_wq); ieee80211_stop_scan(ieee); ieee80211_disassociate(ieee); RemoveAllTS(ieee); //added as we disconnect from the previous BSS, Remove all TS } void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee) { ieee->sync_scan_hurryup = 0; down(&ieee->wx_sem); ieee80211_start_protocol(ieee); up(&ieee->wx_sem); } void ieee80211_start_protocol(struct ieee80211_device *ieee) { short ch = 0; int i = 0; if (ieee->proto_started) return; ieee->proto_started = 1; if (ieee->current_network.channel == 0){ do{ ch++; if (ch > MAX_CHANNEL_NUMBER) return; /* no channel found */ #ifdef ENABLE_DOT11D }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]); #else }while(!ieee->channel_map[ch]); #endif ieee->current_network.channel = ch; } if (ieee->current_network.beacon_interval == 0) ieee->current_network.beacon_interval = 100; // printk("===>%s(), chan:%d\n", __FUNCTION__, ieee->current_network.channel); // ieee->set_chan(ieee->dev,ieee->current_network.channel); for(i = 0; i < 17; i++) { ieee->last_rxseq_num[i] = -1; ieee->last_rxfrag_num[i] = -1; ieee->last_packet_time[i] = 0; } ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers. /* if the user set the MAC of the ad-hoc cell and then * switch to managed mode, shall we make sure that association * attempts does not fail just because the user provide the essid * and the nic is still checking for the AP MAC ?? */ if (ieee->iw_mode == IW_MODE_INFRA) ieee80211_start_bss(ieee); else if (ieee->iw_mode == IW_MODE_ADHOC) ieee80211_start_ibss(ieee); else if (ieee->iw_mode == IW_MODE_MASTER) ieee80211_start_master_bss(ieee); else if(ieee->iw_mode == IW_MODE_MONITOR) ieee80211_start_monitor_mode(ieee); } #define DRV_NAME "Ieee80211" void ieee80211_softmac_init(struct ieee80211_device *ieee) { int i; memset(&ieee->current_network, 0, sizeof(struct ieee80211_network)); ieee->state = IEEE80211_NOLINK; ieee->sync_scan_hurryup = 0; for(i = 0; i < 5; i++) { ieee->seq_ctrl[i] = 0; } #ifdef ENABLE_DOT11D ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC); if (!ieee->pDot11dInfo) IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n"); #endif //added for AP roaming ieee->LinkDetectInfo.SlotNum = 2; ieee->LinkDetectInfo.NumRecvBcnInPeriod=0; ieee->LinkDetectInfo.NumRecvDataInPeriod=0; ieee->assoc_id = 0; ieee->queue_stop = 0; ieee->scanning = 0; ieee->softmac_features = 0; //so IEEE2100-like driver are happy ieee->wap_set = 0; ieee->ssid_set = 0; ieee->proto_started = 0; ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE; ieee->rate = 22; ieee->ps = IEEE80211_PS_DISABLED; ieee->sta_sleep = 0; ieee->Regdot11HTOperationalRateSet[0]= 0xff;//support MCS 0~7 ieee->Regdot11HTOperationalRateSet[1]= 0xff;//support MCS 8~15 ieee->Regdot11HTOperationalRateSet[4]= 0x01; //added by amy ieee->actscanning = false; ieee->beinretry = false; ieee->is_set_key = false; init_mgmt_queue(ieee); ieee->sta_edca_param[0] = 0x0000A403; ieee->sta_edca_param[1] = 0x0000A427; ieee->sta_edca_param[2] = 0x005E4342; ieee->sta_edca_param[3] = 0x002F3262; ieee->aggregation = true; ieee->enable_rx_imm_BA = 1; ieee->tx_pending.txb = NULL; init_timer(&ieee->associate_timer); ieee->associate_timer.data = (unsigned long)ieee; ieee->associate_timer.function = ieee80211_associate_abort_cb; init_timer(&ieee->beacon_timer); ieee->beacon_timer.data = (unsigned long) ieee; ieee->beacon_timer.function = ieee80211_send_beacon_cb; #ifdef PF_SYNCTHREAD ieee->wq = create_workqueue(DRV_NAME,0); #else ieee->wq = create_workqueue(DRV_NAME); #endif INIT_DELAYED_WORK(&ieee->start_ibss_wq,ieee80211_start_ibss_wq); INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq); INIT_WORK(&ieee->associate_procedure_wq, ieee80211_associate_procedure_wq); INIT_DELAYED_WORK(&ieee->softmac_scan_wq,ieee80211_softmac_scan_wq); INIT_DELAYED_WORK(&ieee->associate_retry_wq, ieee80211_associate_retry_wq); INIT_WORK(&ieee->wx_sync_scan_wq,ieee80211_wx_sync_scan_wq); sema_init(&ieee->wx_sem, 1); sema_init(&ieee->scan_sem, 1); spin_lock_init(&ieee->mgmt_tx_lock); spin_lock_init(&ieee->beacon_lock); tasklet_init(&ieee->ps_task, (void(*)(unsigned long)) ieee80211_sta_ps, (unsigned long)ieee); } void ieee80211_softmac_free(struct ieee80211_device *ieee) { down(&ieee->wx_sem); #ifdef ENABLE_DOT11D if(NULL != ieee->pDot11dInfo) { kfree(ieee->pDot11dInfo); ieee->pDot11dInfo = NULL; } #endif del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); destroy_workqueue(ieee->wq); up(&ieee->wx_sem); } /******************************************************** * Start of WPA code. * * this is stolen from the ipw2200 driver * ********************************************************/ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value) { /* This is called when wpa_supplicant loads and closes the driver * interface. */ printk("%s WPA\n",value ? "enabling" : "disabling"); ieee->wpa_enabled = value; return 0; } void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len) { /* make sure WPA is enabled */ ieee80211_wpa_enable(ieee, 1); ieee80211_disassociate(ieee); } static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason) { int ret = 0; switch (command) { case IEEE_MLME_STA_DEAUTH: // silently ignore break; case IEEE_MLME_STA_DISASSOC: ieee80211_disassociate(ieee); break; default: printk("Unknown MLME request: %d\n", command); ret = -EOPNOTSUPP; } return ret; } static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee, struct ieee_param *param, int plen) { u8 *buf; if (param->u.wpa_ie.len > MAX_WPA_IE_LEN || (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL)) return -EINVAL; if (param->u.wpa_ie.len) { buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len, GFP_KERNEL); if (buf == NULL) return -ENOMEM; kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = param->u.wpa_ie.len; } else { kfree(ieee->wpa_ie); ieee->wpa_ie = NULL; ieee->wpa_ie_len = 0; } ieee80211_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len); return 0; } #define AUTH_ALG_OPEN_SYSTEM 0x1 #define AUTH_ALG_SHARED_KEY 0x2 static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value) { struct ieee80211_security sec = { .flags = SEC_AUTH_MODE, }; int ret = 0; if (value & AUTH_ALG_SHARED_KEY) { sec.auth_mode = WLAN_AUTH_SHARED_KEY; ieee->open_wep = 0; ieee->auth_mode = 1; } else if (value & AUTH_ALG_OPEN_SYSTEM){ sec.auth_mode = WLAN_AUTH_OPEN; ieee->open_wep = 1; ieee->auth_mode = 0; } else if (value & IW_AUTH_ALG_LEAP){ sec.auth_mode = WLAN_AUTH_LEAP; ieee->open_wep = 1; ieee->auth_mode = 2; } if (ieee->set_security) ieee->set_security(ieee->dev, &sec); //else // ret = -EOPNOTSUPP; return ret; } static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value) { int ret=0; unsigned long flags; switch (name) { case IEEE_PARAM_WPA_ENABLED: ret = ieee80211_wpa_enable(ieee, value); break; case IEEE_PARAM_TKIP_COUNTERMEASURES: ieee->tkip_countermeasures=value; break; case IEEE_PARAM_DROP_UNENCRYPTED: { /* HACK: * * wpa_supplicant calls set_wpa_enabled when the driver * is loaded and unloaded, regardless of if WPA is being * used. No other calls are made which can be used to * determine if encryption will be used or not prior to * association being expected. If encryption is not being * used, drop_unencrypted is set to false, else true -- we * can use this to determine if the CAP_PRIVACY_ON bit should * be set. */ struct ieee80211_security sec = { .flags = SEC_ENABLED, .enabled = value, }; ieee->drop_unencrypted = value; /* We only change SEC_LEVEL for open mode. Others * are set by ipw_wpa_set_encryption. */ if (!value) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_0; } else { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } if (ieee->set_security) ieee->set_security(ieee->dev, &sec); break; } case IEEE_PARAM_PRIVACY_INVOKED: ieee->privacy_invoked=value; break; case IEEE_PARAM_AUTH_ALGS: ret = ieee80211_wpa_set_auth_algs(ieee, value); break; case IEEE_PARAM_IEEE_802_1X: ieee->ieee802_1x=value; break; case IEEE_PARAM_WPAX_SELECT: // added for WPA2 mixed mode spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags); ieee->wpax_type_set = 1; ieee->wpax_type_notify = value; spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags); break; default: printk("Unknown WPA param: %d\n",name); ret = -EOPNOTSUPP; } return ret; } /* implementation borrowed from hostap driver */ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, struct ieee_param *param, int param_len) { int ret = 0; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; struct ieee80211_security sec = { .flags = 0, }; param->u.crypt.err = 0; param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0'; if (param_len != (int) ((char *) param->u.crypt.key - (char *) param) + param->u.crypt.key_len) { printk("Len mismatch %d, %d\n", param_len, param->u.crypt.key_len); return -EINVAL; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { if (param->u.crypt.idx >= WEP_KEYS) return -EINVAL; crypt = &ieee->crypt[param->u.crypt.idx]; } else { return -EINVAL; } if (strcmp(param->u.crypt.alg, "none") == 0) { if (crypt) { sec.enabled = 0; // FIXME FIXME //sec.encrypt = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_ENABLED | SEC_LEVEL; ieee80211_crypt_delayed_deinit(ieee, crypt); } goto done; } sec.enabled = 1; // FIXME FIXME // sec.encrypt = 1; sec.flags |= SEC_ENABLED; /* IPW HW cannot build TKIP MIC, host decryption still needed. */ if (!(ieee->host_encrypt || ieee->host_decrypt) && strcmp(param->u.crypt.alg, "TKIP")) goto skip_host_crypt; ops = ieee80211_get_crypto_ops(param->u.crypt.alg); if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { request_module("ieee80211_crypt_wep"); ops = ieee80211_get_crypto_ops(param->u.crypt.alg); //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { request_module("ieee80211_crypt_tkip"); ops = ieee80211_get_crypto_ops(param->u.crypt.alg); } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { request_module("ieee80211_crypt_ccmp"); ops = ieee80211_get_crypto_ops(param->u.crypt.alg); } if (ops == NULL) { printk("unknown crypto alg '%s'\n", param->u.crypt.alg); param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; ret = -EINVAL; goto done; } if (*crypt == NULL || (*crypt)->ops != ops) { struct ieee80211_crypt_data *new_crypt; ieee80211_crypt_delayed_deinit(ieee, crypt); new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); if (new_crypt->priv == NULL) { kfree(new_crypt); param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED; ret = -EINVAL; goto done; } *crypt = new_crypt; } if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key && (*crypt)->ops->set_key(param->u.crypt.key, param->u.crypt.key_len, param->u.crypt.seq, (*crypt)->priv) < 0) { printk("key setting failed\n"); param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED; ret = -EINVAL; goto done; } skip_host_crypt: if (param->u.crypt.set_tx) { ieee->tx_keyidx = param->u.crypt.idx; sec.active_key = param->u.crypt.idx; sec.flags |= SEC_ACTIVE_KEY; } else sec.flags &= ~SEC_ACTIVE_KEY; if (param->u.crypt.alg != NULL) { memcpy(sec.keys[param->u.crypt.idx], param->u.crypt.key, param->u.crypt.key_len); sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len; sec.flags |= (1 << param->u.crypt.idx); if (strcmp(param->u.crypt.alg, "WEP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_2; } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_3; } } done: if (ieee->set_security) ieee->set_security(ieee->dev, &sec); /* Do not reset port if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. If your hardware requires a reset after WEP * configuration (for example... Prism2), implement the reset_port in * the callbacks structures used to initialize the 802.11 stack. */ if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(ieee->dev)) { printk("reset_port failed\n"); param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED; return -EINVAL; } return ret; } inline struct sk_buff *ieee80211_disassociate_skb( struct ieee80211_network *beacon, struct ieee80211_device *ieee, u8 asRsn) { struct sk_buff *skb; struct ieee80211_disassoc *disass; skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc)); if (!skb) return NULL; disass = (struct ieee80211_disassoc *) skb_put(skb,sizeof(struct ieee80211_disassoc)); disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC); disass->header.duration_id = 0; memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN); memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN); disass->reason = asRsn; return skb; } void SendDisassociation( struct ieee80211_device *ieee, u8* asSta, u8 asRsn ) { struct ieee80211_network *beacon = &ieee->current_network; struct sk_buff *skb; skb = ieee80211_disassociate_skb(beacon,ieee,asRsn); if (skb){ softmac_mgmt_xmit(skb, ieee); //dev_kfree_skb_any(skb);//edit by thomas } } int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p) { struct ieee_param *param; int ret=0; down(&ieee->wx_sem); //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length); if (p->length < sizeof(struct ieee_param) || !p->pointer){ ret = -EINVAL; goto out; } param = kmalloc(p->length, GFP_KERNEL); if (param == NULL){ ret = -ENOMEM; goto out; } if (copy_from_user(param, p->pointer, p->length)) { kfree(param); ret = -EFAULT; goto out; } switch (param->cmd) { case IEEE_CMD_SET_WPA_PARAM: ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name, param->u.wpa_param.value); break; case IEEE_CMD_SET_WPA_IE: ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length); break; case IEEE_CMD_SET_ENCRYPTION: ret = ieee80211_wpa_set_encryption(ieee, param, p->length); break; case IEEE_CMD_MLME: ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command, param->u.mlme.reason_code); break; default: printk("Unknown WPA supplicant request: %d\n",param->cmd); ret = -EOPNOTSUPP; break; } if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; kfree(param); out: up(&ieee->wx_sem); return ret; } void notify_wx_assoc_event(struct ieee80211_device *ieee) { union iwreq_data wrqu; wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (ieee->state == IEEE80211_LINKED) memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); else memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL); } EXPORT_SYMBOL(ieee80211_get_beacon); EXPORT_SYMBOL(ieee80211_wake_queue); EXPORT_SYMBOL(ieee80211_stop_queue); EXPORT_SYMBOL(ieee80211_reset_queue); EXPORT_SYMBOL(ieee80211_softmac_stop_protocol); EXPORT_SYMBOL(ieee80211_softmac_start_protocol); EXPORT_SYMBOL(ieee80211_is_shortslot); EXPORT_SYMBOL(ieee80211_is_54g); EXPORT_SYMBOL(ieee80211_wpa_supplicant_ioctl); EXPORT_SYMBOL(ieee80211_ps_tx_ack); EXPORT_SYMBOL(ieee80211_softmac_xmit); EXPORT_SYMBOL(ieee80211_stop_send_beacons); EXPORT_SYMBOL(notify_wx_assoc_event); EXPORT_SYMBOL(SendDisassociation); EXPORT_SYMBOL(ieee80211_disassociate); EXPORT_SYMBOL(ieee80211_start_send_beacons); EXPORT_SYMBOL(ieee80211_stop_scan); EXPORT_SYMBOL(ieee80211_send_probe_requests); EXPORT_SYMBOL(ieee80211_softmac_scan_syncro); EXPORT_SYMBOL(ieee80211_start_scan_syncro); //EXPORT_SYMBOL(ieee80211_sta_ps_send_null_frame);
gpl-2.0
jetonbacaj/SomeKernel_920P_OL1
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
2135
22751
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe.h" #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 #define BIT_PFC 0x02 #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 #define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ #define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; struct tc_configuration *src = NULL; struct tc_configuration *dst = NULL; int i, j; int tx = DCB_TX_CONFIG; int rx = DCB_RX_CONFIG; int changes = 0; #ifdef IXGBE_FCOE struct dcb_app app = { .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = ETH_P_FCOE, }; u8 up = dcb_getapp(adapter->netdev, &app); if (up && !(up & (1 << adapter->fcoe.up))) changes |= BIT_APP_UPCHG; #endif for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; if (dst->path[tx].prio_type != src->path[tx].prio_type) { dst->path[tx].prio_type = src->path[tx].prio_type; changes |= BIT_PG_TX; } if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { dst->path[tx].bwg_id = src->path[tx].bwg_id; changes |= BIT_PG_TX; } if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { dst->path[tx].bwg_percent = src->path[tx].bwg_percent; changes |= BIT_PG_TX; } if (dst->path[tx].up_to_tc_bitmap != src->path[tx].up_to_tc_bitmap) { dst->path[tx].up_to_tc_bitmap = src->path[tx].up_to_tc_bitmap; changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); } if (dst->path[rx].prio_type != src->path[rx].prio_type) { dst->path[rx].prio_type = src->path[rx].prio_type; changes |= BIT_PG_RX; } if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { dst->path[rx].bwg_id = src->path[rx].bwg_id; changes |= BIT_PG_RX; } if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { dst->path[rx].bwg_percent = src->path[rx].bwg_percent; changes |= BIT_PG_RX; } if (dst->path[rx].up_to_tc_bitmap != src->path[rx].up_to_tc_bitmap) { dst->path[rx].up_to_tc_bitmap = src->path[rx].up_to_tc_bitmap; changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); } } for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { j = i - DCB_PG_ATTR_BW_ID_0; if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; changes |= BIT_PG_TX; } if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; changes |= BIT_PG_RX; } } for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { j = i - DCB_PFC_UP_ATTR_0; if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; changes |= BIT_PFC; } } if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { dcfg->pfc_mode_enable = scfg->pfc_mode_enable; changes |= BIT_PFC; } return changes; } static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int err = 0; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; /* verify there is something to do, if not then exit */ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) goto out; err = ixgbe_setup_tc(netdev, state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); out: return !!err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; default: break; } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = up_map; } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = up_map; } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; } static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, u8 setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != adapter->dcb_cfg.tc_config[priority].dcb_pfc) adapter->temp_dcb_cfg.pfc_mode_enable = true; } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, u8 *setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } static void ixgbe_dcbnl_devreset(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (netif_running(dev)) dev->netdev_ops->ndo_stop(dev); ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(dev)) dev->netdev_ops->ndo_open(dev); clear_bit(__IXGBE_RESETTING, &adapter->state); } static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; int i; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ret; adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) return ret; if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; /* Priority to TC mapping in CEE case default to 1:1 */ u8 prio_tc[MAX_USER_PRIORITY]; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, DCB_RX_CONFIG); ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(dcb_cfg, max); ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, prio_type, prio_tc); for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(netdev, i, prio_tc[i]); ret = DCB_HW_CHG_RST; } if (adapter->dcb_set_bitmap & BIT_PFC) { if (dcb_cfg->pfc_mode_enable) { u8 pfc_en; u8 prio_tc[MAX_USER_PRIORITY]; ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); } else { hw->mac.ops.fc_enable(hw); } ixgbe_set_rx_drop_en(adapter); ret = DCB_HW_CHG; } #ifdef IXGBE_FCOE /* Reprogam FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { struct dcb_app app = { .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = ETH_P_FCOE, }; u8 up = dcb_getapp(netdev, &app); adapter->fcoe.up = ffs(up) - 1; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; } #endif adapter->dcb_set_bitmap = 0x00; return ret; } static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (capid) { case DCB_CAP_ATTR_PG: *cap = true; break; case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_UP2TC: *cap = false; break; case DCB_CAP_ATTR_PG_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_BCN: *cap = false; break; case DCB_CAP_ATTR_DCBX: *cap = adapter->dcbx_cap; break; default: *cap = false; break; } return 0; } static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = adapter->dcb_cfg.num_tcs.pg_tcs; break; case DCB_NUMTCS_ATTR_PFC: *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: return -EINVAL; break; } } else { return -EINVAL; } return 0; } static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; } /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev * @idtype : identifies the id as ether type or TCP/UDP port number * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to indicate an * error. */ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 0; return dcb_getapp(netdev, &app); } static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; /* No IEEE PFC settings available */ if (!my_ets) return 0; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; } static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err = 0; __u8 max_tc = 0; __u8 map_chg = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; /* initialize UP2TC mappings to invalid value */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) map_chg = 1; } memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); if (max_tc) max_tc++; if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); else if (map_chg) ixgbe_dcbnl_devreset(dev); if (err) goto err_out; err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); err_out: return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; /* No IEEE PFC settings available */ if (!my_pfc) return 0; pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; } return 0; } static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; u8 *prio_tc; int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_pfc) { adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), GFP_KERNEL); if (!adapter->ixgbe_ieee_pfc) return -ENOMEM; } prio_tc = adapter->ixgbe_ieee_ets->prio_tc; memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); /* Enable link flow control parameters if PFC is disabled */ if (pfc->pfc_en) err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); else err = hw->mac.ops.fc_enable(hw); ixgbe_set_rx_drop_en(adapter); return err; } static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err = -EINVAL; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return err; err = dcb_ieee_setapp(dev, app); if (err) return err; #ifdef IXGBE_FCOE if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); if (app_mask & (1 << adapter->fcoe.up)) return err; adapter->fcoe.up = app->priority; ixgbe_dcbnl_devreset(dev); } #endif /* VF devices should use default UP when available */ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == 0) { int vf; adapter->default_up = app->priority; for (vf = 0; vf < adapter->num_vfs; vf++) { struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; if (!vfinfo->pf_qos) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, app->priority, vf); } } return 0; } static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; err = dcb_ieee_delapp(dev, app); #ifdef IXGBE_FCOE if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); if (app_mask & (1 << adapter->fcoe.up)) return err; adapter->fcoe.up = app_mask ? ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; ixgbe_dcbnl_devreset(dev); } #endif /* IF default priority is being removed clear VF default UP */ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == 0 && adapter->default_up == app->priority) { int vf; long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; adapter->default_up = qos; for (vf = 0; vf < adapter->num_vfs; vf++) { struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; if (!vfinfo->pf_qos) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, qos, vf); } } return err; } static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); return adapter->dcbx_cap; } static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets ets = {0}; struct ieee_pfc pfc = {0}; int err = 0; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) return 1; if (mode == adapter->dcbx_cap) return 0; adapter->dcbx_cap = mode; /* ETS and PFC defaults */ ets.ets_cap = 8; pfc.pfc_cap = 8; if (mode & DCB_CAP_DCBX_VER_IEEE) { ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; adapter->dcb_set_bitmap |= mask; ixgbe_dcbnl_set_all(dev); } else { /* Drop into single TC mode strict priority as this * indicates CEE and IEEE versions are disabled */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); err = ixgbe_setup_tc(dev, 0); } return err ? 1 : 0; } const struct dcbnl_rtnl_ops dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setapp = ixgbe_dcbnl_ieee_setapp, .ieee_delapp = ixgbe_dcbnl_ieee_delapp, .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, .setall = ixgbe_dcbnl_set_all, .getcap = ixgbe_dcbnl_getcap, .getnumtcs = ixgbe_dcbnl_getnumtcs, .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .getdcbx = ixgbe_dcbnl_getdcbx, .setdcbx = ixgbe_dcbnl_setdcbx, };
gpl-2.0
anilsingh1605/android_kernel_samsung_logan2g
net/ipv4/inet_lro.c
2391
15308
/* * linux/net/ipv4/inet_lro.c * * Large Receive Offload (ipv4 / tcp) * * (C) Copyright IBM Corp. 2007 * * Authors: * Jan-Bernd Themann <themann@de.ibm.com> * Christoph Raisch <raisch@de.ibm.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/if_vlan.h> #include <linux/inet_lro.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jan-Bernd Themann <themann@de.ibm.com>"); MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)"); #define TCP_HDR_LEN(tcph) (tcph->doff << 2) #define IP_HDR_LEN(iph) (iph->ihl << 2) #define TCP_PAYLOAD_LENGTH(iph, tcph) \ (ntohs(iph->tot_len) - IP_HDR_LEN(iph) - TCP_HDR_LEN(tcph)) #define IPH_LEN_WO_OPTIONS 5 #define TCPH_LEN_WO_OPTIONS 5 #define TCPH_LEN_W_TIMESTAMP 8 #define LRO_MAX_PG_HLEN 64 #define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; } /* * Basic tcp checks whether packet is suitable for LRO */ static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph, int len, const struct net_lro_desc *lro_desc) { /* check ip header: don't aggregate padded frames */ if (ntohs(iph->tot_len) != len) return -1; if (TCP_PAYLOAD_LENGTH(iph, tcph) == 0) return -1; if (iph->ihl != IPH_LEN_WO_OPTIONS) return -1; if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack || tcph->rst || tcph->syn || tcph->fin) return -1; if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) return -1; if (tcph->doff != TCPH_LEN_WO_OPTIONS && tcph->doff != TCPH_LEN_W_TIMESTAMP) return -1; /* check tcp options (only timestamp allowed) */ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) { __be32 *topt = (__be32 *)(tcph + 1); if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) return -1; /* timestamp should be in right order */ topt++; if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval), ntohl(*topt))) return -1; /* timestamp reply should not be zero */ topt++; if (*topt == 0) return -1; } return 0; } static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc) { struct iphdr *iph = lro_desc->iph; struct tcphdr *tcph = lro_desc->tcph; __be32 *p; __wsum tcp_hdr_csum; tcph->ack_seq = lro_desc->tcp_ack; tcph->window = lro_desc->tcp_window; if (lro_desc->tcp_saw_tstamp) { p = (__be32 *)(tcph + 1); *(p+2) = lro_desc->tcp_rcv_tsecr; } iph->tot_len = htons(lro_desc->ip_tot_len); iph->check = 0; iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl); tcph->check = 0; tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0); lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum); tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, lro_desc->ip_tot_len - IP_HDR_LEN(iph), IPPROTO_TCP, lro_desc->data_csum); } static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len) { __wsum tcp_csum; __wsum tcp_hdr_csum; __wsum tcp_ps_hdr_csum; tcp_csum = ~csum_unfold(tcph->check); tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), tcp_csum); tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, len + TCP_HDR_LEN(tcph), IPPROTO_TCP, 0); return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum), tcp_ps_hdr_csum); } static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph, u16 vlan_tag, struct vlan_group *vgrp) { int nr_frags; __be32 *ptr; u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); nr_frags = skb_shinfo(skb)->nr_frags; lro_desc->parent = skb; lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]); lro_desc->iph = iph; lro_desc->tcph = tcph; lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len; lro_desc->tcp_ack = tcph->ack_seq; lro_desc->tcp_window = tcph->window; lro_desc->pkt_aggr_cnt = 1; lro_desc->ip_tot_len = ntohs(iph->tot_len); if (tcph->doff == 8) { ptr = (__be32 *)(tcph+1); lro_desc->tcp_saw_tstamp = 1; lro_desc->tcp_rcv_tsval = *(ptr+1); lro_desc->tcp_rcv_tsecr = *(ptr+2); } lro_desc->mss = tcp_data_len; lro_desc->vgrp = vgrp; lro_desc->vlan_tag = vlan_tag; lro_desc->active = 1; lro_desc->data_csum = lro_tcp_data_csum(iph, tcph, tcp_data_len); } static inline void lro_clear_desc(struct net_lro_desc *lro_desc) { memset(lro_desc, 0, sizeof(struct net_lro_desc)); } static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph, struct tcphdr *tcph, int tcp_data_len) { struct sk_buff *parent = lro_desc->parent; __be32 *topt; lro_desc->pkt_aggr_cnt++; lro_desc->ip_tot_len += tcp_data_len; lro_desc->tcp_next_seq += tcp_data_len; lro_desc->tcp_window = tcph->window; lro_desc->tcp_ack = tcph->ack_seq; /* don't update tcp_rcv_tsval, would not work with PAWS */ if (lro_desc->tcp_saw_tstamp) { topt = (__be32 *) (tcph + 1); lro_desc->tcp_rcv_tsecr = *(topt + 2); } lro_desc->data_csum = csum_block_add(lro_desc->data_csum, lro_tcp_data_csum(iph, tcph, tcp_data_len), parent->len); parent->len += tcp_data_len; parent->data_len += tcp_data_len; if (tcp_data_len > lro_desc->mss) lro_desc->mss = tcp_data_len; } static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph) { struct sk_buff *parent = lro_desc->parent; int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); lro_add_common(lro_desc, iph, tcph, tcp_data_len); skb_pull(skb, (skb->len - tcp_data_len)); parent->truesize += skb->truesize; if (lro_desc->last_skb) lro_desc->last_skb->next = skb; else skb_shinfo(parent)->frag_list = skb; lro_desc->last_skb = skb; } static void lro_add_frags(struct net_lro_desc *lro_desc, int len, int hlen, int truesize, struct skb_frag_struct *skb_frags, struct iphdr *iph, struct tcphdr *tcph) { struct sk_buff *skb = lro_desc->parent; int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); lro_add_common(lro_desc, iph, tcph, tcp_data_len); skb->truesize += truesize; skb_frags[0].page_offset += hlen; skb_frags[0].size -= hlen; while (tcp_data_len > 0) { *(lro_desc->next_frag) = *skb_frags; tcp_data_len -= skb_frags->size; lro_desc->next_frag++; skb_frags++; skb_shinfo(skb)->nr_frags++; } } static int lro_check_tcp_conn(struct net_lro_desc *lro_desc, struct iphdr *iph, struct tcphdr *tcph) { if ((lro_desc->iph->saddr != iph->saddr) || (lro_desc->iph->daddr != iph->daddr) || (lro_desc->tcph->source != tcph->source) || (lro_desc->tcph->dest != tcph->dest)) return -1; return 0; } static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr, struct net_lro_desc *lro_arr, struct iphdr *iph, struct tcphdr *tcph) { struct net_lro_desc *lro_desc = NULL; struct net_lro_desc *tmp; int max_desc = lro_mgr->max_desc; int i; for (i = 0; i < max_desc; i++) { tmp = &lro_arr[i]; if (tmp->active) if (!lro_check_tcp_conn(tmp, iph, tcph)) { lro_desc = tmp; goto out; } } for (i = 0; i < max_desc; i++) { if (!lro_arr[i].active) { lro_desc = &lro_arr[i]; goto out; } } LRO_INC_STATS(lro_mgr, no_desc); out: return lro_desc; } static void lro_flush(struct net_lro_mgr *lro_mgr, struct net_lro_desc *lro_desc) { if (lro_desc->pkt_aggr_cnt > 1) lro_update_tcp_ip_header(lro_desc); skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss; if (lro_desc->vgrp) { if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(lro_desc->parent, lro_desc->vgrp, lro_desc->vlan_tag); else vlan_hwaccel_rx(lro_desc->parent, lro_desc->vgrp, lro_desc->vlan_tag); } else { if (lro_mgr->features & LRO_F_NAPI) netif_receive_skb(lro_desc->parent); else netif_rx(lro_desc->parent); } LRO_INC_STATS(lro_mgr, flushed); lro_clear_desc(lro_desc); } static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, struct vlan_group *vgrp, u16 vlan_tag, void *priv) { struct net_lro_desc *lro_desc; struct iphdr *iph; struct tcphdr *tcph; u64 flags; int vlan_hdr_len = 0; if (!lro_mgr->get_skb_header || lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph, &flags, priv)) goto out; if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) goto out; lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); if (!lro_desc) goto out; if ((skb->protocol == htons(ETH_P_8021Q)) && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID)) vlan_hdr_len = VLAN_HLEN; if (!lro_desc->active) { /* start new lro session */ if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL)) goto out; skb->ip_summed = lro_mgr->ip_summed_aggr; lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp); LRO_INC_STATS(lro_mgr, aggregated); return 0; } if (lro_desc->tcp_next_seq != ntohl(tcph->seq)) goto out2; if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc)) goto out2; lro_add_packet(lro_desc, skb, iph, tcph); LRO_INC_STATS(lro_mgr, aggregated); if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) || lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu)) lro_flush(lro_mgr, lro_desc); return 0; out2: /* send aggregated SKBs to stack */ lro_flush(lro_mgr, lro_desc); out: return 1; } static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, void *mac_hdr, int hlen, __wsum sum, u32 ip_summed) { struct sk_buff *skb; struct skb_frag_struct *skb_frags; int data_len = len; int hdr_len = min(len, hlen); skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad); if (!skb) return NULL; skb_reserve(skb, lro_mgr->frag_align_pad); skb->len = len; skb->data_len = len - hdr_len; skb->truesize += true_size; skb->tail += hdr_len; memcpy(skb->data, mac_hdr, hdr_len); skb_frags = skb_shinfo(skb)->frags; while (data_len > 0) { *skb_frags = *frags; data_len -= frags->size; skb_frags++; frags++; skb_shinfo(skb)->nr_frags++; } skb_shinfo(skb)->frags[0].page_offset += hdr_len; skb_shinfo(skb)->frags[0].size -= hdr_len; skb->ip_summed = ip_summed; skb->csum = sum; skb->protocol = eth_type_trans(skb, lro_mgr->dev); return skb; } static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, struct vlan_group *vgrp, u16 vlan_tag, void *priv, __wsum sum) { struct net_lro_desc *lro_desc; struct iphdr *iph; struct tcphdr *tcph; struct sk_buff *skb; u64 flags; void *mac_hdr; int mac_hdr_len; int hdr_len = LRO_MAX_PG_HLEN; int vlan_hdr_len = 0; if (!lro_mgr->get_frag_header || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, (void *)&tcph, &flags, priv)) { mac_hdr = page_address(frags->page) + frags->page_offset; goto out1; } if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) goto out1; hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr); mac_hdr_len = (int)((void *)(iph) - mac_hdr); lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); if (!lro_desc) goto out1; if (!lro_desc->active) { /* start new lro session */ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL)) goto out1; skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, hdr_len, 0, lro_mgr->ip_summed_aggr); if (!skb) goto out; if ((skb->protocol == htons(ETH_P_8021Q)) && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID)) vlan_hdr_len = VLAN_HLEN; iph = (void *)(skb->data + vlan_hdr_len); tcph = (void *)((u8 *)skb->data + vlan_hdr_len + IP_HDR_LEN(iph)); lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL); LRO_INC_STATS(lro_mgr, aggregated); return NULL; } if (lro_desc->tcp_next_seq != ntohl(tcph->seq)) goto out2; if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc)) goto out2; lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph); LRO_INC_STATS(lro_mgr, aggregated); if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) || lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu)) lro_flush(lro_mgr, lro_desc); return NULL; out2: /* send aggregated packets to the stack */ lro_flush(lro_mgr, lro_desc); out1: /* Original packet has to be posted to the stack */ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, hdr_len, sum, lro_mgr->ip_summed); out: return skb; } void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, void *priv) { if (__lro_proc_skb(lro_mgr, skb, NULL, 0, priv)) { if (lro_mgr->features & LRO_F_NAPI) netif_receive_skb(skb); else netif_rx(skb); } } EXPORT_SYMBOL(lro_receive_skb); void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, struct vlan_group *vgrp, u16 vlan_tag, void *priv) { if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) { if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); else vlan_hwaccel_rx(skb, vgrp, vlan_tag); } } EXPORT_SYMBOL(lro_vlan_hwaccel_receive_skb); void lro_receive_frags(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, void *priv, __wsum sum) { struct sk_buff *skb; skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0, priv, sum); if (!skb) return; if (lro_mgr->features & LRO_F_NAPI) netif_receive_skb(skb); else netif_rx(skb); } EXPORT_SYMBOL(lro_receive_frags); void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, struct vlan_group *vgrp, u16 vlan_tag, void *priv, __wsum sum) { struct sk_buff *skb; skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp, vlan_tag, priv, sum); if (!skb) return; if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); else vlan_hwaccel_rx(skb, vgrp, vlan_tag); } EXPORT_SYMBOL(lro_vlan_hwaccel_receive_frags); void lro_flush_all(struct net_lro_mgr *lro_mgr) { int i; struct net_lro_desc *lro_desc = lro_mgr->lro_arr; for (i = 0; i < lro_mgr->max_desc; i++) { if (lro_desc[i].active) lro_flush(lro_mgr, &lro_desc[i]); } } EXPORT_SYMBOL(lro_flush_all); void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph, struct tcphdr *tcph) { struct net_lro_desc *lro_desc; lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); if (lro_desc->active) lro_flush(lro_mgr, lro_desc); } EXPORT_SYMBOL(lro_flush_pkt);
gpl-2.0
belucha/linux
drivers/watchdog/sbc7240_wdt.c
2391
7049
/* * NANO7240 SBC Watchdog device driver * * Based on w83877f.c by Scott Jennings, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * (c) Copyright 2007 Gilles GIGAN <gilles.gigan@jcu.edu.au> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/miscdevice.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/atomic.h> #define SBC7240_ENABLE_PORT 0x443 #define SBC7240_DISABLE_PORT 0x043 #define SBC7240_SET_TIMEOUT_PORT SBC7240_ENABLE_PORT #define SBC7240_MAGIC_CHAR 'V' #define SBC7240_TIMEOUT 30 #define SBC7240_MAX_TIMEOUT 255 static int timeout = SBC7240_TIMEOUT; /* in seconds */ module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=" __MODULE_STRING(SBC7240_MAX_TIMEOUT) ", default=" __MODULE_STRING(SBC7240_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Disable watchdog when closing device file"); #define SBC7240_OPEN_STATUS_BIT 0 #define SBC7240_ENABLED_STATUS_BIT 1 #define SBC7240_EXPECT_CLOSE_STATUS_BIT 2 static unsigned long wdt_status; /* * Utility routines */ static void wdt_disable(void) { /* disable the watchdog */ if (test_and_clear_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) { inb_p(SBC7240_DISABLE_PORT); pr_info("Watchdog timer is now disabled\n"); } } static void wdt_enable(void) { /* enable the watchdog */ if (!test_and_set_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) { inb_p(SBC7240_ENABLE_PORT); pr_info("Watchdog timer is now enabled\n"); } } static int wdt_set_timeout(int t) { if (t < 1 || t > SBC7240_MAX_TIMEOUT) { pr_err("timeout value must be 1<=x<=%d\n", SBC7240_MAX_TIMEOUT); return -1; } /* set the timeout */ outb_p((unsigned)t, SBC7240_SET_TIMEOUT_PORT); timeout = t; pr_info("timeout set to %d seconds\n", t); return 0; } /* Whack the dog */ static inline void wdt_keepalive(void) { if (test_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) inb_p(SBC7240_ENABLE_PORT); } /* * /dev/watchdog handling */ static ssize_t fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { size_t i; char c; if (count) { if (!nowayout) { clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status); /* is there a magic char ? */ for (i = 0; i != count; i++) { if (get_user(c, buf + i)) return -EFAULT; if (c == SBC7240_MAGIC_CHAR) { set_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status); break; } } } wdt_keepalive(); } return count; } static int fop_open(struct inode *inode, struct file *file) { if (test_and_set_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status)) return -EBUSY; wdt_enable(); return nonseekable_open(inode, file); } static int fop_close(struct inode *inode, struct file *file) { if (test_and_clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status) || !nowayout) { wdt_disable(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); wdt_keepalive(); } clear_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status); return 0; } static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING| WDIOF_SETTIMEOUT| WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "SBC7240", }; static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user((void __user *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, (int __user *)arg); case WDIOC_SETOPTIONS: { int options; int retval = -EINVAL; if (get_user(options, (int __user *)arg)) return -EFAULT; if (options & WDIOS_DISABLECARD) { wdt_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { wdt_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, (int __user *)arg)) return -EFAULT; if (wdt_set_timeout(new_timeout)) return -EINVAL; /* Fall through */ } case WDIOC_GETTIMEOUT: return put_user(timeout, (int __user *)arg); default: return -ENOTTY; } } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = fop_write, .open = fop_open, .release = fop_close, .unlocked_ioctl = fop_ioctl, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * Notifier for system down */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_disable(); return NOTIFY_DONE; } static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static void __exit sbc7240_wdt_unload(void) { pr_info("Removing watchdog\n"); misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); release_region(SBC7240_ENABLE_PORT, 1); } static int __init sbc7240_wdt_init(void) { int rc = -EBUSY; if (!request_region(SBC7240_ENABLE_PORT, 1, "SBC7240 WDT")) { pr_err("I/O address 0x%04x already in use\n", SBC7240_ENABLE_PORT); rc = -EIO; goto err_out; } /* The IO port 0x043 used to disable the watchdog * is already claimed by the system timer, so we * can't request_region() it ...*/ if (timeout < 1 || timeout > SBC7240_MAX_TIMEOUT) { timeout = SBC7240_TIMEOUT; pr_info("timeout value must be 1<=x<=%d, using %d\n", SBC7240_MAX_TIMEOUT, timeout); } wdt_set_timeout(timeout); wdt_disable(); rc = register_reboot_notifier(&wdt_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_region; } rc = misc_register(&wdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", wdt_miscdev.minor, rc); goto err_out_reboot_notifier; } pr_info("Watchdog driver for SBC7240 initialised (nowayout=%d)\n", nowayout); return 0; err_out_reboot_notifier: unregister_reboot_notifier(&wdt_notifier); err_out_region: release_region(SBC7240_ENABLE_PORT, 1); err_out: return rc; } module_init(sbc7240_wdt_init); module_exit(sbc7240_wdt_unload); MODULE_AUTHOR("Gilles Gigan"); MODULE_DESCRIPTION("Watchdog device driver for single board" " computers EPIC Nano 7240 from iEi"); MODULE_LICENSE("GPL");
gpl-2.0
OptiPop/kernel_asus_grouper
arch/arm/mach-exynos4/dev-pd.c
2647
2951
/* linux/arch/arm/mach-exynos4/dev-pd.c * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4 - Power Domain support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <mach/regs-pmu.h> #include <plat/pd.h> static int exynos4_pd_enable(struct device *dev) { struct samsung_pd_info *pdata = dev->platform_data; u32 timeout; __raw_writel(S5P_INT_LOCAL_PWR_EN, pdata->base); /* Wait max 1ms */ timeout = 10; while ((__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) != S5P_INT_LOCAL_PWR_EN) { if (timeout == 0) { printk(KERN_ERR "Power domain %s enable failed.\n", dev_name(dev)); return -ETIMEDOUT; } timeout--; udelay(100); } return 0; } static int exynos4_pd_disable(struct device *dev) { struct samsung_pd_info *pdata = dev->platform_data; u32 timeout; __raw_writel(0, pdata->base); /* Wait max 1ms */ timeout = 10; while (__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) { if (timeout == 0) { printk(KERN_ERR "Power domain %s disable failed.\n", dev_name(dev)); return -ETIMEDOUT; } timeout--; udelay(100); } return 0; } struct platform_device exynos4_device_pd[] = { { .name = "samsung-pd", .id = 0, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_MFC_CONF, }, }, }, { .name = "samsung-pd", .id = 1, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_G3D_CONF, }, }, }, { .name = "samsung-pd", .id = 2, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_LCD0_CONF, }, }, }, { .name = "samsung-pd", .id = 3, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_LCD1_CONF, }, }, }, { .name = "samsung-pd", .id = 4, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_TV_CONF, }, }, }, { .name = "samsung-pd", .id = 5, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_CAM_CONF, }, }, }, { .name = "samsung-pd", .id = 6, .dev = { .platform_data = &(struct samsung_pd_info) { .enable = exynos4_pd_enable, .disable = exynos4_pd_disable, .base = S5P_PMU_GPS_CONF, }, }, }, };
gpl-2.0
StanTRC/lge-kernel-e430
drivers/net/usb/kaweth.c
2903
37430
/**************************************************************** * * kaweth.c - driver for KL5KUSB101 based USB->Ethernet * * (c) 2000 Interlan Communications * (c) 2000 Stephane Alnet * (C) 2001 Brad Hards * (C) 2002 Oliver Neukum * * Original author: The Zapman <zapman@interlan.net> * Inspired by, and much credit goes to Michael Rothwell * <rothwell@interlan.net> for the test equipment, help, and patience * Based off of (and with thanks to) Petko Manolov's pegaus.c driver. * Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki * for providing the firmware and driver resources. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ****************************************************************/ /* TODO: * Develop test procedures for USB net interfaces * Run test procedures * Fix bugs from previous two steps * Snoop other OSs for any tricks we're not doing * Reduce arbitrary timeouts * Smart multicast support * Temporary MAC change support * Tunable SOFs parameter - ioctl()? * Ethernet stats collection * Code formatting improvements */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/types.h> #include <linux/ethtool.h> #include <linux/dma-mapping.h> #include <linux/wait.h> #include <linux/firmware.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #undef DEBUG #define KAWETH_MTU 1514 #define KAWETH_BUF_SIZE 1664 #define KAWETH_TX_TIMEOUT (5 * HZ) #define KAWETH_SCRATCH_SIZE 32 #define KAWETH_FIRMWARE_BUF_SIZE 4096 #define KAWETH_CONTROL_TIMEOUT (30000) #define KAWETH_STATUS_BROKEN 0x0000001 #define KAWETH_STATUS_CLOSING 0x0000002 #define KAWETH_STATUS_SUSPENDING 0x0000004 #define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING) #define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01 #define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02 #define KAWETH_PACKET_FILTER_DIRECTED 0x04 #define KAWETH_PACKET_FILTER_BROADCAST 0x08 #define KAWETH_PACKET_FILTER_MULTICAST 0x10 /* Table 7 */ #define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00 #define KAWETH_COMMAND_MULTICAST_FILTERS 0x01 #define KAWETH_COMMAND_SET_PACKET_FILTER 0x02 #define KAWETH_COMMAND_STATISTICS 0x03 #define KAWETH_COMMAND_SET_TEMP_MAC 0x06 #define KAWETH_COMMAND_GET_TEMP_MAC 0x07 #define KAWETH_COMMAND_SET_URB_SIZE 0x08 #define KAWETH_COMMAND_SET_SOFS_WAIT 0x09 #define KAWETH_COMMAND_SCAN 0xFF #define KAWETH_SOFS_TO_WAIT 0x05 #define INTBUFFERSIZE 4 #define STATE_OFFSET 0 #define STATE_MASK 0x40 #define STATE_SHIFT 5 #define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED) MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>"); MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("kaweth/new_code.bin"); MODULE_FIRMWARE("kaweth/new_code_fix.bin"); MODULE_FIRMWARE("kaweth/trigger_code.bin"); MODULE_FIRMWARE("kaweth/trigger_code_fix.bin"); static const char driver_name[] = "kaweth"; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ); static void kaweth_disconnect(struct usb_interface *intf); static int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout); static int kaweth_suspend(struct usb_interface *intf, pm_message_t message); static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ static struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ { USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */ { USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */ { USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */ { USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */ { USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */ { USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */ { USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */ { USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */ { USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */ { USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */ { USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */ { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */ { USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */ { USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */ { USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */ { USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */ { USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */ { USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */ { USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */ { USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */ { USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */ { USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */ { USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */ {} /* Null terminator */ }; MODULE_DEVICE_TABLE (usb, usb_klsi_table); /**************************************************************** * kaweth_driver ****************************************************************/ static struct usb_driver kaweth_driver = { .name = driver_name, .probe = kaweth_probe, .disconnect = kaweth_disconnect, .suspend = kaweth_suspend, .resume = kaweth_resume, .id_table = usb_klsi_table, .supports_autosuspend = 1, }; typedef __u8 eth_addr_t[6]; /**************************************************************** * usb_eth_dev ****************************************************************/ struct usb_eth_dev { char *name; __u16 vendor; __u16 device; void *pdata; }; /**************************************************************** * kaweth_ethernet_configuration * Refer Table 8 ****************************************************************/ struct kaweth_ethernet_configuration { __u8 size; __u8 reserved1; __u8 reserved2; eth_addr_t hw_addr; __u32 statistics_mask; __le16 segment_size; __u16 max_multicast_filters; __u8 reserved3; } __packed; /**************************************************************** * kaweth_device ****************************************************************/ struct kaweth_device { spinlock_t device_lock; __u32 status; int end; int suspend_lowmem_rx; int suspend_lowmem_ctrl; int linkstate; int opened; struct delayed_work lowmem_work; struct usb_device *dev; struct usb_interface *intf; struct net_device *net; wait_queue_head_t term_wait; struct urb *rx_urb; struct urb *tx_urb; struct urb *irq_urb; dma_addr_t intbufferhandle; __u8 *intbuffer; dma_addr_t rxbufferhandle; __u8 *rx_buf; struct sk_buff *tx_skb; __u8 *firmware_buf; __u8 scratch[KAWETH_SCRATCH_SIZE]; __u16 packet_filter_bitmap; struct kaweth_ethernet_configuration configuration; struct net_device_stats stats; }; /**************************************************************** * kaweth_control ****************************************************************/ static int kaweth_control(struct kaweth_device *kaweth, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int retval; dbg("kaweth_control()"); if(in_interrupt()) { dbg("in_interrupt()"); return -EBUSY; } dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); if (!dr) { dbg("kmalloc() failed"); return -ENOMEM; } dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); retval = kaweth_internal_control_msg(kaweth->dev, pipe, dr, data, size, timeout); kfree(dr); return retval; } /**************************************************************** * kaweth_read_configuration ****************************************************************/ static int kaweth_read_configuration(struct kaweth_device *kaweth) { int retval; dbg("Reading kaweth configuration"); retval = kaweth_control(kaweth, usb_rcvctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_GET_ETHERNET_DESC, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, (void *)&kaweth->configuration, sizeof(kaweth->configuration), KAWETH_CONTROL_TIMEOUT); return retval; } /**************************************************************** * kaweth_set_urb_size ****************************************************************/ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size) { int retval; dbg("Setting URB size to %d", (unsigned)urb_size); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_URB_SIZE, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, urb_size, 0, (void *)&kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); return retval; } /**************************************************************** * kaweth_set_sofs_wait ****************************************************************/ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait) { int retval; dbg("Set SOFS wait to %d", (unsigned)sofs_wait); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_SOFS_WAIT, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, sofs_wait, 0, (void *)&kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); return retval; } /**************************************************************** * kaweth_set_receive_filter ****************************************************************/ static int kaweth_set_receive_filter(struct kaweth_device *kaweth, __u16 receive_filter) { int retval; dbg("Set receive filter to %d", (unsigned)receive_filter); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, receive_filter, 0, (void *)&kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); return retval; } /**************************************************************** * kaweth_download_firmware ****************************************************************/ static int kaweth_download_firmware(struct kaweth_device *kaweth, const char *fwname, __u8 interrupt, __u8 type) { const struct firmware *fw; int data_len; int ret; ret = request_firmware(&fw, fwname, &kaweth->dev->dev); if (ret) { err("Firmware request failed\n"); return ret; } if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { err("Firmware too big: %zu", fw->size); release_firmware(fw); return -ENOSPC; } data_len = fw->size; memcpy(kaweth->firmware_buf, fw->data, fw->size); release_firmware(fw); kaweth->firmware_buf[2] = (data_len & 0xFF) - 7; kaweth->firmware_buf[3] = data_len >> 8; kaweth->firmware_buf[4] = type; kaweth->firmware_buf[5] = interrupt; dbg("High: %i, Low:%i", kaweth->firmware_buf[3], kaweth->firmware_buf[2]); dbg("Downloading firmware at %p to kaweth device at %p", fw->data, kaweth); dbg("Firmware length: %d", data_len); return kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, (void *)kaweth->firmware_buf, data_len, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_trigger_firmware ****************************************************************/ static int kaweth_trigger_firmware(struct kaweth_device *kaweth, __u8 interrupt) { kaweth->firmware_buf[0] = 0xB6; kaweth->firmware_buf[1] = 0xC3; kaweth->firmware_buf[2] = 0x01; kaweth->firmware_buf[3] = 0x00; kaweth->firmware_buf[4] = 0x06; kaweth->firmware_buf[5] = interrupt; kaweth->firmware_buf[6] = 0x00; kaweth->firmware_buf[7] = 0x00; dbg("Triggering firmware"); return kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, (void *)kaweth->firmware_buf, 8, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_reset ****************************************************************/ static int kaweth_reset(struct kaweth_device *kaweth) { int result; dbg("kaweth_reset(%p)", kaweth); result = usb_reset_configuration(kaweth->dev); mdelay(10); dbg("kaweth_reset() returns %d.",result); return result; } static void kaweth_usb_receive(struct urb *); static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t); /**************************************************************** int_callback *****************************************************************/ static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf) { int status; status = usb_submit_urb (kaweth->irq_urb, mf); if (unlikely(status == -ENOMEM)) { kaweth->suspend_lowmem_ctrl = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } else { kaweth->suspend_lowmem_ctrl = 0; } if (status) err ("can't resubmit intr, %s-%s, status %d", kaweth->dev->bus->bus_name, kaweth->dev->devpath, status); } static void int_callback(struct urb *u) { struct kaweth_device *kaweth = u->context; int act_state; int status = u->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ goto resubmit; } /* we check the link state to report changes */ if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) { if (act_state) netif_carrier_on(kaweth->net); else netif_carrier_off(kaweth->net); kaweth->linkstate = act_state; } resubmit: kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); } static void kaweth_resubmit_tl(struct work_struct *work) { struct kaweth_device *kaweth = container_of(work, struct kaweth_device, lowmem_work.work); if (IS_BLOCKED(kaweth->status)) return; if (kaweth->suspend_lowmem_rx) kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); if (kaweth->suspend_lowmem_ctrl) kaweth_resubmit_int_urb(kaweth, GFP_NOIO); } /**************************************************************** * kaweth_resubmit_rx_urb ****************************************************************/ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, gfp_t mem_flags) { int result; usb_fill_bulk_urb(kaweth->rx_urb, kaweth->dev, usb_rcvbulkpipe(kaweth->dev, 1), kaweth->rx_buf, KAWETH_BUF_SIZE, kaweth_usb_receive, kaweth); kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle; if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) { if (result == -ENOMEM) { kaweth->suspend_lowmem_rx = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } err("resubmitting rx_urb %d failed", result); } else { kaweth->suspend_lowmem_rx = 0; } return result; } static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth); /**************************************************************** * kaweth_usb_receive ****************************************************************/ static void kaweth_usb_receive(struct urb *urb) { struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; __u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf); struct sk_buff *skb; if (unlikely(status == -EPIPE)) { kaweth->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); dbg("Status was -EPIPE."); return; } if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { /* we are killed - set a flag and wake the disconnect handler */ kaweth->end = 1; wake_up(&kaweth->term_wait); dbg("Status was -ECONNRESET or -ESHUTDOWN."); return; } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { kaweth->stats.rx_errors++; dbg("Status was -EPROTO, -ETIME, or -EILSEQ."); return; } if (unlikely(status == -EOVERFLOW)) { kaweth->stats.rx_errors++; dbg("Status was -EOVERFLOW."); } spin_lock(&kaweth->device_lock); if (IS_BLOCKED(kaweth->status)) { spin_unlock(&kaweth->device_lock); return; } spin_unlock(&kaweth->device_lock); if(status && status != -EREMOTEIO && count != 1) { err("%s RX status: %d count: %d packet_len: %d", net->name, status, count, (int)pkt_len); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(kaweth->net && (count > 2)) { if(pkt_len > (count - 2)) { err("Packet length too long for USB frame (pkt_len: %x, count: %x)",pkt_len, count); err("Packet len & 2047: %x", pkt_len & 2047); err("Count 2: %x", count2); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(!(skb = dev_alloc_skb(pkt_len+2))) { kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, net); netif_rx(skb); kaweth->stats.rx_packets++; kaweth->stats.rx_bytes += pkt_len; } kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); } /**************************************************************** * kaweth_open ****************************************************************/ static int kaweth_open(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); int res; dbg("Opening network device."); res = usb_autopm_get_interface(kaweth->intf); if (res) { err("Interface cannot be resumed."); return -EIO; } res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); if (res) goto err_out; usb_fill_int_urb( kaweth->irq_urb, kaweth->dev, usb_rcvintpipe(kaweth->dev, 3), kaweth->intbuffer, INTBUFFERSIZE, int_callback, kaweth, 250); /* overriding the descriptor */ kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle; kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); if (res) { usb_kill_urb(kaweth->rx_urb); goto err_out; } kaweth->opened = 1; netif_start_queue(net); kaweth_async_set_rx_mode(kaweth); return 0; err_out: usb_autopm_put_interface(kaweth->intf); return -EIO; } /**************************************************************** * kaweth_kill_urbs ****************************************************************/ static void kaweth_kill_urbs(struct kaweth_device *kaweth) { usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); usb_kill_urb(kaweth->tx_urb); cancel_delayed_work_sync(&kaweth->lowmem_work); /* a scheduled work may have resubmitted, we hit them again */ usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); } /**************************************************************** * kaweth_close ****************************************************************/ static int kaweth_close(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); netif_stop_queue(net); kaweth->opened = 0; kaweth->status |= KAWETH_STATUS_CLOSING; kaweth_kill_urbs(kaweth); kaweth->status &= ~KAWETH_STATUS_CLOSING; usb_autopm_put_interface(kaweth->intf); return 0; } static u32 kaweth_get_link(struct net_device *dev) { struct kaweth_device *kaweth = netdev_priv(dev); return kaweth->linkstate; } static const struct ethtool_ops ops = { .get_link = kaweth_get_link }; /**************************************************************** * kaweth_usb_transmit_complete ****************************************************************/ static void kaweth_usb_transmit_complete(struct urb *urb) { struct kaweth_device *kaweth = urb->context; struct sk_buff *skb = kaweth->tx_skb; int status = urb->status; if (unlikely(status != 0)) if (status != -ENOENT) dbg("%s: TX status %d.", kaweth->net->name, status); netif_wake_queue(kaweth->net); dev_kfree_skb_irq(skb); } /**************************************************************** * kaweth_start_xmit ****************************************************************/ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __le16 *private_header; int res; spin_lock_irq(&kaweth->device_lock); kaweth_async_set_rx_mode(kaweth); netif_stop_queue(net); if (IS_BLOCKED(kaweth->status)) { goto skip; } /* We now decide whether we can put our special header into the sk_buff */ if (skb_cloned(skb) || skb_headroom(skb) < 2) { /* no such luck - we make our own */ struct sk_buff *copied_skb; copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); dev_kfree_skb_irq(skb); skb = copied_skb; if (!copied_skb) { kaweth->stats.tx_errors++; netif_start_queue(net); spin_unlock_irq(&kaweth->device_lock); return NETDEV_TX_OK; } } private_header = (__le16 *)__skb_push(skb, 2); *private_header = cpu_to_le16(skb->len-2); kaweth->tx_skb = skb; usb_fill_bulk_urb(kaweth->tx_urb, kaweth->dev, usb_sndbulkpipe(kaweth->dev, 2), private_header, skb->len, kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) { dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res); skip: kaweth->stats.tx_errors++; netif_start_queue(net); dev_kfree_skb_irq(skb); } else { kaweth->stats.tx_packets++; kaweth->stats.tx_bytes += skb->len; } spin_unlock_irq(&kaweth->device_lock); return NETDEV_TX_OK; } /**************************************************************** * kaweth_set_rx_mode ****************************************************************/ static void kaweth_set_rx_mode(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST; dbg("Setting Rx mode to %d", packet_filter_bitmap); netif_stop_queue(net); if (net->flags & IFF_PROMISC) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST; } kaweth->packet_filter_bitmap = packet_filter_bitmap; netif_wake_queue(net); } /**************************************************************** * kaweth_async_set_rx_mode ****************************************************************/ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth) { int result; __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap; kaweth->packet_filter_bitmap = 0; if (packet_filter_bitmap == 0) return; if (in_interrupt()) return; result = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, packet_filter_bitmap, 0, (void *)&kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); if(result < 0) { err("Failed to set Rx mode: %d", result); } else { dbg("Set Rx mode to %d", packet_filter_bitmap); } } /**************************************************************** * kaweth_netdev_stats ****************************************************************/ static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev) { struct kaweth_device *kaweth = netdev_priv(dev); return &kaweth->stats; } /**************************************************************** * kaweth_tx_timeout ****************************************************************/ static void kaweth_tx_timeout(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); kaweth->stats.tx_errors++; net->trans_start = jiffies; usb_unlink_urb(kaweth->tx_urb); } /**************************************************************** * kaweth_suspend ****************************************************************/ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; dbg("Suspending device"); spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status |= KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); kaweth_kill_urbs(kaweth); return 0; } /**************************************************************** * kaweth_resume ****************************************************************/ static int kaweth_resume(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; dbg("Resuming device"); spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status &= ~KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); if (!kaweth->opened) return 0; kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); kaweth_resubmit_int_urb(kaweth, GFP_NOIO); return 0; } /**************************************************************** * kaweth_probe ****************************************************************/ static const struct net_device_ops kaweth_netdev_ops = { .ndo_open = kaweth_open, .ndo_stop = kaweth_close, .ndo_start_xmit = kaweth_start_xmit, .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_rx_mode = kaweth_set_rx_mode, .ndo_get_stats = kaweth_netdev_stats, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ) { struct usb_device *dev = interface_to_usbdev(intf); struct kaweth_device *kaweth; struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct), le16_to_cpu(dev->descriptor.bcdDevice)); dbg("Device at %p", dev); dbg("Descriptor length: %x type: %x", (int)dev->descriptor.bLength, (int)dev->descriptor.bDescriptorType); netdev = alloc_etherdev(sizeof(*kaweth)); if (!netdev) return -ENOMEM; kaweth = netdev_priv(netdev); kaweth->dev = dev; kaweth->net = netdev; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); dbg("Resetting."); kaweth_reset(kaweth); /* * If high byte of bcdDevice is nonzero, firmware is already * downloaded. Don't try to do it again, or we'll hang the device. */ if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) { dev_info(&intf->dev, "Firmware present in device.\n"); } else { /* Download the firmware */ dev_info(&intf->dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, 2)) < 0) { err("Error downloading firmware (%d)", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code_fix.bin", 100, 3)) < 0) { err("Error downloading firmware fix (%d)", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code.bin", 126, 2)) < 0) { err("Error downloading trigger code (%d)", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code_fix.bin", 126, 3)) < 0) { err("Error downloading trigger code fix (%d)", result); goto err_fw; } if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { err("Error triggering firmware (%d)", result); goto err_fw; } /* Device will now disappear for a moment... */ dev_info(&intf->dev, "Firmware loaded. I'll be back...\n"); err_fw: free_page((unsigned long)kaweth->firmware_buf); free_netdev(netdev); return -EIO; } result = kaweth_read_configuration(kaweth); if(result < 0) { err("Error reading configuration (%d), no net device created", result); goto err_free_netdev; } dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); if(!memcmp(&kaweth->configuration.hw_addr, &bcast_addr, sizeof(bcast_addr))) { err("Firmware not functioning properly, no net device created"); goto err_free_netdev; } if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { dbg("Error setting URB size"); goto err_free_netdev; } if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { err("Error setting SOFS wait"); goto err_free_netdev; } result = kaweth_set_receive_filter(kaweth, KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST); if(result < 0) { err("Error setting receive filter"); goto err_free_netdev; } dbg("Initializing net device."); kaweth->intf = intf; kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->rx_urb) goto err_only_tx; kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->irq_urb) goto err_tx_and_rx; kaweth->intbuffer = usb_alloc_coherent( kaweth->dev, INTBUFFERSIZE, GFP_KERNEL, &kaweth->intbufferhandle); if (!kaweth->intbuffer) goto err_tx_and_rx_and_irq; kaweth->rx_buf = usb_alloc_coherent( kaweth->dev, KAWETH_BUF_SIZE, GFP_KERNEL, &kaweth->rxbufferhandle); if (!kaweth->rx_buf) goto err_all_but_rxbuf; memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr)); memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr, sizeof(kaweth->configuration.hw_addr)); netdev->netdev_ops = &kaweth_netdev_ops; netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); SET_ETHTOOL_OPS(netdev, &ops); /* kaweth is zeroed as part of alloc_netdev */ INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); usb_set_intfdata(intf, kaweth); #if 0 // dma_supported() is deeply broken on almost all architectures if (dma_supported (&intf->dev, 0xffffffffffffffffULL)) kaweth->net->features |= NETIF_F_HIGHDMA; #endif SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { err("Error registering netdev."); goto err_intfdata; } dev_info(&intf->dev, "kaweth interface created at %s\n", kaweth->net->name); dbg("Kaweth probe returning."); return 0; err_intfdata: usb_set_intfdata(intf, NULL); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); err_all_but_rxbuf: usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); err_tx_and_rx_and_irq: usb_free_urb(kaweth->irq_urb); err_tx_and_rx: usb_free_urb(kaweth->rx_urb); err_only_tx: usb_free_urb(kaweth->tx_urb); err_free_netdev: free_netdev(netdev); return -EIO; } /**************************************************************** * kaweth_disconnect ****************************************************************/ static void kaweth_disconnect(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); struct net_device *netdev; dev_info(&intf->dev, "Unregistering\n"); usb_set_intfdata(intf, NULL); if (!kaweth) { dev_warn(&intf->dev, "unregistering non-existent device\n"); return; } netdev = kaweth->net; dbg("Unregistering net device"); unregister_netdev(netdev); usb_free_urb(kaweth->rx_urb); usb_free_urb(kaweth->tx_urb); usb_free_urb(kaweth->irq_urb); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); free_netdev(netdev); } // FIXME this completion stuff is a modified clone of // an OLD version of some stuff in usb.c ... struct usb_api_data { wait_queue_head_t wqh; int done; }; /*-------------------------------------------------------------------* * completion handler for compatibility wrappers (sync control/bulk) * *-------------------------------------------------------------------*/ static void usb_api_blocking_completion(struct urb *urb) { struct usb_api_data *awd = (struct usb_api_data *)urb->context; awd->done=1; wake_up(&awd->wqh); } /*-------------------------------------------------------------------* * COMPATIBILITY STUFF * *-------------------------------------------------------------------*/ // Starts urb and waits for completion or timeout static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) { struct usb_api_data awd; int status; init_waitqueue_head(&awd.wqh); awd.done = 0; urb->context = &awd; status = usb_submit_urb(urb, GFP_NOIO); if (status) { // something went wrong usb_free_urb(urb); return status; } if (!wait_event_timeout(awd.wqh, awd.done, timeout)) { // timeout dev_warn(&urb->dev->dev, "usb_control/bulk_msg: timeout\n"); usb_kill_urb(urb); // remove urb safely status = -ETIMEDOUT; } else { status = urb->status; } if (actual_length) { *actual_length = urb->actual_length; } usb_free_urb(urb); return status; } /*-------------------------------------------------------------------*/ // returns status (negative) or length (positive) static int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length = 0; /* shut up GCC */ urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char*)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) { return retv; } else { return length; } } module_usb_driver(kaweth_driver);
gpl-2.0
cybernet/rhel7-kernel
kernel/drivers/message/i2o/i2o_proc.c
3159
51904
/* * procfs handler for Linux I2O subsystem * * (c) Copyright 1999 Deepak Saxena * * Originally written by Deepak Saxena(deepak@plexity.net) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This is an initial test release. The code is based on the design of the * ide procfs system (drivers/block/ide-proc.c). Some code taken from * i2o-core module by Alan Cox. * * DISCLAIMER: This code is still under development/test and may cause * your system to behave unpredictably. Use at your own discretion. * * * Fixes/additions: * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) * University of Helsinki, Department of Computer Science * LAN entries * Markus Lidel <Markus.Lidel@shadowconnect.com> * Changes for new I2O API */ #define OSM_NAME "proc-osm" #define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O ProcFS OSM" #define I2O_MAX_MODULES 4 // FIXME! #define FMT_U64_HEX "0x%08x%08x" #define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/i2o.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/byteorder.h> /* Structure used to define /proc entries */ typedef struct _i2o_proc_entry_t { char *name; /* entry name */ umode_t mode; /* mode */ const struct file_operations *fops; /* open function */ } i2o_proc_entry; /* global I2O /proc/i2o entry */ static struct proc_dir_entry *i2o_proc_dir_root; /* proc OSM driver struct */ static struct i2o_driver i2o_proc_driver = { .name = OSM_NAME, }; static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len) { int i; /* 19990419 -sralston * The I2O v1.5 (and v2.0 so far) "official specification" * got serial numbers WRONG! * Apparently, and despite what Section 3.4.4 says and * Figure 3-35 shows (pg 3-39 in the pdf doc), * the convention / consensus seems to be: * + First byte is SNFormat * + Second byte is SNLen (but only if SNFormat==7 (?)) * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format */ switch (serialno[0]) { case I2O_SNFORMAT_BINARY: /* Binary */ seq_printf(seq, "0x"); for (i = 0; i < serialno[1]; i++) { seq_printf(seq, "%02X", serialno[2 + i]); } break; case I2O_SNFORMAT_ASCII: /* ASCII */ if (serialno[1] < ' ') { /* printable or SNLen? */ /* sanity */ max_len = (max_len < serialno[1]) ? max_len : serialno[1]; serialno[1 + max_len] = '\0'; /* just print it */ seq_printf(seq, "%s", &serialno[2]); } else { /* print chars for specified length */ for (i = 0; i < serialno[1]; i++) { seq_printf(seq, "%c", serialno[2 + i]); } } break; case I2O_SNFORMAT_UNICODE: /* UNICODE */ seq_printf(seq, "UNICODE Format. Can't Display\n"); break; case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]); break; case I2O_SNFORMAT_WAN: /* WAN MAC Address */ /* FIXME: Figure out what a WAN access address looks like?? */ seq_printf(seq, "WAN Access Address"); break; /* plus new in v2.0 */ case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ /* FIXME: Figure out what a LAN-64 address really looks like?? */ seq_printf(seq, "LAN-64 MAC address @ [?:%02X:%02X:?] %pM", serialno[8], serialno[9], &serialno[2]); break; case I2O_SNFORMAT_DDM: /* I2O DDM */ seq_printf(seq, "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", *(u16 *) & serialno[2], *(u16 *) & serialno[4], *(u16 *) & serialno[6]); break; case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ /* FIXME: Figure if this is even close?? */ seq_printf(seq, "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", *(u32 *) & serialno[2], *(u32 *) & serialno[6], *(u32 *) & serialno[10], *(u32 *) & serialno[14]); break; case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ default: seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]); break; } return 0; } /** * i2o_get_class_name - do i2o class name lookup * @class: class number * * Return a descriptive string for an i2o class. */ static const char *i2o_get_class_name(int class) { int idx = 16; static char *i2o_class_name[] = { "Executive", "Device Driver Module", "Block Device", "Tape Device", "LAN Interface", "WAN Interface", "Fibre Channel Port", "Fibre Channel Device", "SCSI Device", "ATE Port", "ATE Device", "Floppy Controller", "Floppy Device", "Secondary Bus Port", "Peer Transport Agent", "Peer Transport", "Unknown" }; switch (class & 0xfff) { case I2O_CLASS_EXECUTIVE: idx = 0; break; case I2O_CLASS_DDM: idx = 1; break; case I2O_CLASS_RANDOM_BLOCK_STORAGE: idx = 2; break; case I2O_CLASS_SEQUENTIAL_STORAGE: idx = 3; break; case I2O_CLASS_LAN: idx = 4; break; case I2O_CLASS_WAN: idx = 5; break; case I2O_CLASS_FIBRE_CHANNEL_PORT: idx = 6; break; case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: idx = 7; break; case I2O_CLASS_SCSI_PERIPHERAL: idx = 8; break; case I2O_CLASS_ATE_PORT: idx = 9; break; case I2O_CLASS_ATE_PERIPHERAL: idx = 10; break; case I2O_CLASS_FLOPPY_CONTROLLER: idx = 11; break; case I2O_CLASS_FLOPPY_DEVICE: idx = 12; break; case I2O_CLASS_BUS_ADAPTER: idx = 13; break; case I2O_CLASS_PEER_TRANSPORT_AGENT: idx = 14; break; case I2O_CLASS_PEER_TRANSPORT: idx = 15; break; } return i2o_class_name[idx]; } #define SCSI_TABLE_SIZE 13 static char *scsi_devices[] = { "Direct-Access Read/Write", "Sequential-Access Storage", "Printer", "Processor", "WORM Device", "CD-ROM Device", "Scanner Device", "Optical Memory Device", "Medium Changer Device", "Communications Device", "Graphics Art Pre-Press Device", "Graphics Art Pre-Press Device", "Array Controller Device" }; static char *chtostr(char *tmp, u8 *chars, int n) { tmp[0] = 0; return strncat(tmp, (char *)chars, n); } static int i2o_report_query_status(struct seq_file *seq, int block_status, char *group) { switch (block_status) { case -ETIMEDOUT: return seq_printf(seq, "Timeout reading group %s.\n", group); case -ENOMEM: return seq_printf(seq, "No free memory to read the table.\n"); case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: return seq_printf(seq, "Group %s not supported.\n", group); default: return seq_printf(seq, "Error reading group %s. BlockStatus 0x%02X\n", group, -block_status); } } static char *bus_strings[] = { "Local Bus", "ISA", "EISA", "PCI", "PCMCIA", "NUBUS", "CARDBUS" }; static int i2o_seq_show_hrt(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt; u32 bus; int i; if (hrt->hrt_version) { seq_printf(seq, "HRT table for controller is too new a version.\n"); return 0; } seq_printf(seq, "HRT has %d entries of %d bytes each.\n", hrt->num_entries, hrt->entry_len << 2); for (i = 0; i < hrt->num_entries; i++) { seq_printf(seq, "Entry %d:\n", i); seq_printf(seq, " Adapter ID: %0#10x\n", hrt->hrt_entry[i].adapter_id); seq_printf(seq, " Controlling tid: %0#6x\n", hrt->hrt_entry[i].parent_tid); if (hrt->hrt_entry[i].bus_type != 0x80) { bus = hrt->hrt_entry[i].bus_type; seq_printf(seq, " %s Information\n", bus_strings[bus]); switch (bus) { case I2O_BUS_LOCAL: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.local_bus. LbBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x\n", hrt->hrt_entry[i].bus.local_bus. LbBaseMemoryAddress); break; case I2O_BUS_ISA: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.isa_bus. IsaBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x,", hrt->hrt_entry[i].bus.isa_bus. IsaBaseMemoryAddress); seq_printf(seq, " CSN: %0#4x,", hrt->hrt_entry[i].bus.isa_bus.CSN); break; case I2O_BUS_EISA: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.eisa_bus. EisaBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x,", hrt->hrt_entry[i].bus.eisa_bus. EisaBaseMemoryAddress); seq_printf(seq, " Slot: %0#4x,", hrt->hrt_entry[i].bus.eisa_bus. EisaSlotNumber); break; case I2O_BUS_PCI: seq_printf(seq, " Bus: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciBusNumber); seq_printf(seq, " Dev: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciDeviceNumber); seq_printf(seq, " Func: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciFunctionNumber); seq_printf(seq, " Vendor: %0#6x", hrt->hrt_entry[i].bus.pci_bus. PciVendorID); seq_printf(seq, " Device: %0#6x\n", hrt->hrt_entry[i].bus.pci_bus. PciDeviceID); break; default: seq_printf(seq, " Unsupported Bus Type\n"); } } else seq_printf(seq, " Unknown Bus Type\n"); } return 0; } static int i2o_seq_show_lct(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; i2o_lct *lct = (i2o_lct *) c->lct; int entries; int i; #define BUS_TABLE_SIZE 3 static char *bus_ports[] = { "Generic Bus", "SCSI Bus", "Fibre Channel Bus" }; entries = (lct->table_size - 3) / 9; seq_printf(seq, "LCT contains %d %s\n", entries, entries == 1 ? "entry" : "entries"); if (lct->boot_tid) seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); for (i = 0; i < entries; i++) { seq_printf(seq, "Entry %d\n", i); seq_printf(seq, " Class, SubClass : %s", i2o_get_class_name(lct->lct_entry[i].class_id)); /* * Classes which we'll print subclass info for */ switch (lct->lct_entry[i].class_id & 0xFFF) { case I2O_CLASS_RANDOM_BLOCK_STORAGE: switch (lct->lct_entry[i].sub_class) { case 0x00: seq_printf(seq, ", Direct-Access Read/Write"); break; case 0x04: seq_printf(seq, ", WORM Drive"); break; case 0x05: seq_printf(seq, ", CD-ROM Drive"); break; case 0x07: seq_printf(seq, ", Optical Memory Device"); break; default: seq_printf(seq, ", Unknown (0x%02x)", lct->lct_entry[i].sub_class); break; } break; case I2O_CLASS_LAN: switch (lct->lct_entry[i].sub_class & 0xFF) { case 0x30: seq_printf(seq, ", Ethernet"); break; case 0x40: seq_printf(seq, ", 100base VG"); break; case 0x50: seq_printf(seq, ", IEEE 802.5/Token-Ring"); break; case 0x60: seq_printf(seq, ", ANSI X3T9.5 FDDI"); break; case 0x70: seq_printf(seq, ", Fibre Channel"); break; default: seq_printf(seq, ", Unknown Sub-Class (0x%02x)", lct->lct_entry[i].sub_class & 0xFF); break; } break; case I2O_CLASS_SCSI_PERIPHERAL: if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) seq_printf(seq, ", %s", scsi_devices[lct->lct_entry[i]. sub_class]); else seq_printf(seq, ", Unknown Device Type"); break; case I2O_CLASS_BUS_ADAPTER: if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) seq_printf(seq, ", %s", bus_ports[lct->lct_entry[i]. sub_class]); else seq_printf(seq, ", Unknown Bus Type"); break; } seq_printf(seq, "\n"); seq_printf(seq, " Local TID : 0x%03x\n", lct->lct_entry[i].tid); seq_printf(seq, " User TID : 0x%03x\n", lct->lct_entry[i].user_tid); seq_printf(seq, " Parent TID : 0x%03x\n", lct->lct_entry[i].parent_tid); seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", lct->lct_entry[i].identity_tag[0], lct->lct_entry[i].identity_tag[1], lct->lct_entry[i].identity_tag[2], lct->lct_entry[i].identity_tag[3], lct->lct_entry[i].identity_tag[4], lct->lct_entry[i].identity_tag[5], lct->lct_entry[i].identity_tag[6], lct->lct_entry[i].identity_tag[7]); seq_printf(seq, " Change Indicator : %0#10x\n", lct->lct_entry[i].change_ind); seq_printf(seq, " Event Capab Mask : %0#10x\n", lct->lct_entry[i].device_flags); } return 0; } static int i2o_seq_show_status(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; char prodstr[25]; int version; i2o_status_block *sb = c->status_block.virt; i2o_status_get(c); // reread the status block seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id); version = sb->i2o_version; /* FIXME for Spec 2.0 if (version == 0x02) { seq_printf(seq, "Lowest I2O version supported: "); switch(workspace[2]) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; } seq_printf(seq, "Highest I2O version supported: "); switch(workspace[3]) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; } } */ seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id); seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id); seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number); seq_printf(seq, "I2O version : "); switch (version) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; default: seq_printf(seq, "Unknown version\n"); } seq_printf(seq, "IOP State : "); switch (sb->iop_state) { case 0x01: seq_printf(seq, "INIT\n"); break; case 0x02: seq_printf(seq, "RESET\n"); break; case 0x04: seq_printf(seq, "HOLD\n"); break; case 0x05: seq_printf(seq, "READY\n"); break; case 0x08: seq_printf(seq, "OPERATIONAL\n"); break; case 0x10: seq_printf(seq, "FAILED\n"); break; case 0x11: seq_printf(seq, "FAULTED\n"); break; default: seq_printf(seq, "Unknown\n"); break; } seq_printf(seq, "Messenger Type : "); switch (sb->msg_type) { case 0x00: seq_printf(seq, "Memory mapped\n"); break; case 0x01: seq_printf(seq, "Memory mapped only\n"); break; case 0x02: seq_printf(seq, "Remote only\n"); break; case 0x03: seq_printf(seq, "Memory mapped and remote\n"); break; default: seq_printf(seq, "Unknown\n"); } seq_printf(seq, "Inbound Frame Size : %d bytes\n", sb->inbound_frame_size << 2); seq_printf(seq, "Max Inbound Frames : %d\n", sb->max_inbound_frames); seq_printf(seq, "Current Inbound Frames : %d\n", sb->cur_inbound_frames); seq_printf(seq, "Max Outbound Frames : %d\n", sb->max_outbound_frames); /* Spec doesn't say if NULL terminated or not... */ memcpy(prodstr, sb->product_id, 24); prodstr[24] = '\0'; seq_printf(seq, "Product ID : %s\n", prodstr); seq_printf(seq, "Expected LCT Size : %d bytes\n", sb->expected_lct_size); seq_printf(seq, "IOP Capabilities\n"); seq_printf(seq, " Context Field Size Support : "); switch (sb->iop_capabilities & 0x0000003) { case 0: seq_printf(seq, "Supports only 32-bit context fields\n"); break; case 1: seq_printf(seq, "Supports only 64-bit context fields\n"); break; case 2: seq_printf(seq, "Supports 32-bit and 64-bit context fields, " "but not concurrently\n"); break; case 3: seq_printf(seq, "Supports 32-bit and 64-bit context fields " "concurrently\n"); break; default: seq_printf(seq, "0x%08x\n", sb->iop_capabilities); } seq_printf(seq, " Current Context Field Size : "); switch (sb->iop_capabilities & 0x0000000C) { case 0: seq_printf(seq, "not configured\n"); break; case 4: seq_printf(seq, "Supports only 32-bit context fields\n"); break; case 8: seq_printf(seq, "Supports only 64-bit context fields\n"); break; case 12: seq_printf(seq, "Supports both 32-bit or 64-bit context fields " "concurrently\n"); break; default: seq_printf(seq, "\n"); } seq_printf(seq, " Inbound Peer Support : %s\n", (sb-> iop_capabilities & 0x00000010) ? "Supported" : "Not supported"); seq_printf(seq, " Outbound Peer Support : %s\n", (sb-> iop_capabilities & 0x00000020) ? "Supported" : "Not supported"); seq_printf(seq, " Peer to Peer Support : %s\n", (sb-> iop_capabilities & 0x00000040) ? "Supported" : "Not supported"); seq_printf(seq, "Desired private memory size : %d kB\n", sb->desired_mem_size >> 10); seq_printf(seq, "Allocated private memory size : %d kB\n", sb->current_mem_size >> 10); seq_printf(seq, "Private memory base address : %0#10x\n", sb->current_mem_base); seq_printf(seq, "Desired private I/O size : %d kB\n", sb->desired_io_size >> 10); seq_printf(seq, "Allocated private I/O size : %d kB\n", sb->current_io_size >> 10); seq_printf(seq, "Private I/O base address : %0#10x\n", sb->current_io_base); return 0; } static int i2o_seq_show_hw(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; static u32 work32[5]; static u8 *work8 = (u8 *) work32; static u16 *work16 = (u16 *) work32; int token; u32 hwcap; static char *cpu_table[] = { "Intel 80960 series", "AMD2900 series", "Motorola 68000 series", "ARM series", "MIPS series", "Sparc series", "PowerPC series", "Intel x86 series" }; token = i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0x0000 IOP Hardware"); return 0; } seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]); seq_printf(seq, "Product ID : %0#6x\n", work16[1]); seq_printf(seq, "CPU : "); if (work8[16] > 8) seq_printf(seq, "Unknown\n"); else seq_printf(seq, "%s\n", cpu_table[work8[16]]); /* Anyone using ProcessorVersion? */ seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10); seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10); hwcap = work32[3]; seq_printf(seq, "Capabilities : 0x%08x\n", hwcap); seq_printf(seq, " [%s] Self booting\n", (hwcap & 0x00000001) ? "+" : "-"); seq_printf(seq, " [%s] Upgradable IRTOS\n", (hwcap & 0x00000002) ? "+" : "-"); seq_printf(seq, " [%s] Supports downloading DDMs\n", (hwcap & 0x00000004) ? "+" : "-"); seq_printf(seq, " [%s] Supports installing DDMs\n", (hwcap & 0x00000008) ? "+" : "-"); seq_printf(seq, " [%s] Battery-backed RAM\n", (hwcap & 0x00000010) ? "+" : "-"); return 0; } /* Executive group 0003h - Executing DDM List (table) */ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; int token; int i; typedef struct _i2o_exec_execute_ddm_table { u16 ddm_tid; u8 module_type; u8 reserved; u16 i2o_vendor_id; u16 module_id; u8 module_name_version[28]; u32 data_size; u32 code_size; } i2o_exec_execute_ddm_table; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES]; } *result; i2o_exec_execute_ddm_table ddm_table; char tmp[28 + 1]; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0x0003 Executing DDM List"); goto out; } seq_printf(seq, "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); ddm_table = result->ddm_table[0]; for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) { seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF); switch (ddm_table.module_type) { case 0x01: seq_printf(seq, "Downloaded DDM "); break; case 0x22: seq_printf(seq, "Embedded DDM "); break; default: seq_printf(seq, " "); } seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); seq_printf(seq, "%-#8x", ddm_table.module_id); seq_printf(seq, "%-29s", chtostr(tmp, ddm_table.module_name_version, 28)); seq_printf(seq, "%9d ", ddm_table.data_size); seq_printf(seq, "%8d", ddm_table.code_size); seq_printf(seq, "\n"); } out: kfree(result); return 0; } /* Executive group 0004h - Driver Store (scalar) */ static int i2o_seq_show_driver_store(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; u32 work32[8]; int token; token = i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0x0004 Driver Store"); return 0; } seq_printf(seq, "Module limit : %d\n" "Module count : %d\n" "Current space : %d kB\n" "Free space : %d kB\n", work32[0], work32[1], work32[2] >> 10, work32[3] >> 10); return 0; } /* Executive group 0005h - Driver Store Table (table) */ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) { typedef struct _i2o_driver_store { u16 stored_ddm_index; u8 module_type; u8 reserved; u16 i2o_vendor_id; u16 module_id; u8 module_name_version[28]; u8 date[8]; u32 module_size; u32 mpb_size; u32 module_flags; } i2o_driver_store_table; struct i2o_controller *c = (struct i2o_controller *)seq->private; int token; int i; typedef struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_driver_store_table dst[I2O_MAX_MODULES]; } i2o_driver_result_table; i2o_driver_result_table *result; i2o_driver_store_table *dst; char tmp[28 + 1]; result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); if (result == NULL) return -ENOMEM; token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0x0005 DRIVER STORE TABLE"); kfree(result); return 0; } seq_printf(seq, "# Module_type Vendor Mod_id Module_name Vrs" "Date Mod_size Par_size Flags\n"); for (i = 0, dst = &result->dst[0]; i < result->row_count; dst = &result->dst[++i]) { seq_printf(seq, "%-3d", dst->stored_ddm_index); switch (dst->module_type) { case 0x01: seq_printf(seq, "Downloaded DDM "); break; case 0x22: seq_printf(seq, "Embedded DDM "); break; default: seq_printf(seq, " "); } seq_printf(seq, "%-#7x", dst->i2o_vendor_id); seq_printf(seq, "%-#8x", dst->module_id); seq_printf(seq, "%-29s", chtostr(tmp, dst->module_name_version, 28)); seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8)); seq_printf(seq, "%8d ", dst->module_size); seq_printf(seq, "%8d ", dst->mpb_size); seq_printf(seq, "0x%04x", dst->module_flags); seq_printf(seq, "\n"); } kfree(result); return 0; } /* Generic group F000h - Params Descriptor (table) */ static int i2o_seq_show_groups(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; u8 properties; typedef struct _i2o_group_info { u16 group_number; u16 field_count; u16 row_count; u8 properties; u8 reserved; } i2o_group_info; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_group_info group[256]; } *result; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF000 Params Descriptor"); goto out; } seq_printf(seq, "# Group FieldCount RowCount Type Add Del Clear\n"); for (i = 0; i < result->row_count; i++) { seq_printf(seq, "%-3d", i); seq_printf(seq, "0x%04X ", result->group[i].group_number); seq_printf(seq, "%10d ", result->group[i].field_count); seq_printf(seq, "%8d ", result->group[i].row_count); properties = result->group[i].properties; if (properties & 0x1) seq_printf(seq, "Table "); else seq_printf(seq, "Scalar "); if (properties & 0x2) seq_printf(seq, " + "); else seq_printf(seq, " - "); if (properties & 0x4) seq_printf(seq, " + "); else seq_printf(seq, " - "); if (properties & 0x8) seq_printf(seq, " + "); else seq_printf(seq, " - "); seq_printf(seq, "\n"); } if (result->more_flag) seq_printf(seq, "There is more...\n"); out: kfree(result); return 0; } /* Generic group F001h - Physical Device Table (table) */ static int i2o_seq_show_phys_device(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u32 adapter_id[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF001 Physical Device Table"); return 0; } if (result.row_count) seq_printf(seq, "# AdapterId\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x\n", result.adapter_id[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F002h - Claimed Table (table) */ static int i2o_seq_show_claimed(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u16 claimed_tid[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF002 Claimed Table"); return 0; } if (result.row_count) seq_printf(seq, "# ClaimedTid\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x\n", result.claimed_tid[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F003h - User Table (table) */ static int i2o_seq_show_users(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; typedef struct _i2o_user_table { u16 instance; u16 user_tid; u8 claim_type; u8 reserved1; u16 reserved2; } i2o_user_table; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_user_table user[64]; } *result; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF003 User Table"); goto out; } seq_printf(seq, "# Instance UserTid ClaimType\n"); for (i = 0; i < result->row_count; i++) { seq_printf(seq, "%-3d", i); seq_printf(seq, "%#8x ", result->user[i].instance); seq_printf(seq, "%#7x ", result->user[i].user_tid); seq_printf(seq, "%#9x\n", result->user[i].claim_type); } if (result->more_flag) seq_printf(seq, "There is more...\n"); out: kfree(result); return 0; } /* Generic group F005h - Private message extensions (table) (optional) */ static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; typedef struct _i2o_private { u16 ext_instance; u16 organization_id; u16 x_function_code; } i2o_private; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_private extension[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF005 Private Message Extensions (optional)"); return 0; } seq_printf(seq, "Instance# OrgId FunctionCode\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%0#9x ", result.extension[i].ext_instance); seq_printf(seq, "%0#6x ", result.extension[i].organization_id); seq_printf(seq, "%0#6x", result.extension[i].x_function_code); seq_printf(seq, "\n"); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F006h - Authorized User Table (table) */ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u32 alternate_tid[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF006 Autohorized User Table"); return 0; } if (result.row_count) seq_printf(seq, "# AlternateTid\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x ", result.alternate_tid[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F100h - Device Identity (scalar) */ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number // == (allow) 512d bytes (max) static u16 *work16 = (u16 *) work32; int token; char tmp[16 + 1]; token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0xF100 Device Identity"); return 0; } seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); seq_printf(seq, "Vendor info : %s\n", chtostr(tmp, (u8 *) (work32 + 2), 16)); seq_printf(seq, "Product info : %s\n", chtostr(tmp, (u8 *) (work32 + 6), 16)); seq_printf(seq, "Description : %s\n", chtostr(tmp, (u8 *) (work32 + 10), 16)); seq_printf(seq, "Product rev. : %s\n", chtostr(tmp, (u8 *) (work32 + 14), 8)); seq_printf(seq, "Serial number : "); print_serial_number(seq, (u8 *) (work32 + 16), /* allow for SNLen plus * possible trailing '\0' */ sizeof(work32) - (16 * sizeof(u32)) - 2); seq_printf(seq, "\n"); return 0; } static int i2o_seq_show_dev_name(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; seq_printf(seq, "%s\n", dev_name(&d->device)); return 0; } /* Generic group F101h - DDM Identity (scalar) */ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u16 ddm_tid; u8 module_name[24]; u8 module_rev[8]; u8 sn_format; u8 serial_number[12]; u8 pad[256]; // allow up to 256 byte (max) serial number } result; char tmp[24 + 1]; token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF101 DDM Identity"); return 0; } seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); seq_printf(seq, "Module name : %s\n", chtostr(tmp, result.module_name, 24)); seq_printf(seq, "Module revision : %s\n", chtostr(tmp, result.module_rev, 8)); seq_printf(seq, "Serial number : "); print_serial_number(seq, result.serial_number, sizeof(result) - 36); /* allow for SNLen plus possible trailing '\0' */ seq_printf(seq, "\n"); return 0; } /* Generic group F102h - User Information (scalar) */ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u8 device_name[64]; u8 service_name[64]; u8 physical_location[64]; u8 instance_number[4]; } result; char tmp[64 + 1]; token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF102 User Information"); return 0; } seq_printf(seq, "Device name : %s\n", chtostr(tmp, result.device_name, 64)); seq_printf(seq, "Service name : %s\n", chtostr(tmp, result.service_name, 64)); seq_printf(seq, "Physical name : %s\n", chtostr(tmp, result.physical_location, 64)); seq_printf(seq, "Instance number : %s\n", chtostr(tmp, result.instance_number, 4)); return 0; } /* Generic group F103h - SGL Operating Limits (scalar) */ static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; static u32 work32[12]; static u16 *work16 = (u16 *) work32; static u8 *work8 = (u8 *) work32; int token; token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0xF103 SGL Operating Limits"); return 0; } seq_printf(seq, "SGL chain size : %d\n", work32[0]); seq_printf(seq, "Max SGL chain size : %d\n", work32[1]); seq_printf(seq, "SGL chain size target : %d\n", work32[2]); seq_printf(seq, "SGL frag count : %d\n", work16[6]); seq_printf(seq, "Max SGL frag count : %d\n", work16[7]); seq_printf(seq, "SGL frag count target : %d\n", work16[8]); /* FIXME if (d->i2oversion == 0x02) { */ seq_printf(seq, "SGL data alignment : %d\n", work16[8]); seq_printf(seq, "SGL addr limit : %d\n", work8[20]); seq_printf(seq, "SGL addr sizes supported : "); if (work8[21] & 0x01) seq_printf(seq, "32 bit "); if (work8[21] & 0x02) seq_printf(seq, "64 bit "); if (work8[21] & 0x04) seq_printf(seq, "96 bit "); if (work8[21] & 0x08) seq_printf(seq, "128 bit "); seq_printf(seq, "\n"); /* } */ return 0; } /* Generic group F200h - Sensors (scalar) */ static int i2o_seq_show_sensors(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u16 sensor_instance; u8 component; u16 component_instance; u8 sensor_class; u8 sensor_type; u8 scaling_exponent; u32 actual_reading; u32 minimum_reading; u32 low2lowcat_treshold; u32 lowcat2low_treshold; u32 lowwarn2low_treshold; u32 low2lowwarn_treshold; u32 norm2lowwarn_treshold; u32 lowwarn2norm_treshold; u32 nominal_reading; u32 hiwarn2norm_treshold; u32 norm2hiwarn_treshold; u32 high2hiwarn_treshold; u32 hiwarn2high_treshold; u32 hicat2high_treshold; u32 hi2hicat_treshold; u32 maximum_reading; u8 sensor_state; u16 event_enable; } result; token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF200 Sensors (optional)"); return 0; } seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance); seq_printf(seq, "Component : %d = ", result.component); switch (result.component) { case 0: seq_printf(seq, "Other"); break; case 1: seq_printf(seq, "Planar logic Board"); break; case 2: seq_printf(seq, "CPU"); break; case 3: seq_printf(seq, "Chassis"); break; case 4: seq_printf(seq, "Power Supply"); break; case 5: seq_printf(seq, "Storage"); break; case 6: seq_printf(seq, "External"); break; } seq_printf(seq, "\n"); seq_printf(seq, "Component instance : %d\n", result.component_instance); seq_printf(seq, "Sensor class : %s\n", result.sensor_class ? "Analog" : "Digital"); seq_printf(seq, "Sensor type : %d = ", result.sensor_type); switch (result.sensor_type) { case 0: seq_printf(seq, "Other\n"); break; case 1: seq_printf(seq, "Thermal\n"); break; case 2: seq_printf(seq, "DC voltage (DC volts)\n"); break; case 3: seq_printf(seq, "AC voltage (AC volts)\n"); break; case 4: seq_printf(seq, "DC current (DC amps)\n"); break; case 5: seq_printf(seq, "AC current (AC volts)\n"); break; case 6: seq_printf(seq, "Door open\n"); break; case 7: seq_printf(seq, "Fan operational\n"); break; } seq_printf(seq, "Scaling exponent : %d\n", result.scaling_exponent); seq_printf(seq, "Actual reading : %d\n", result.actual_reading); seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading); seq_printf(seq, "Low2LowCat treshold : %d\n", result.low2lowcat_treshold); seq_printf(seq, "LowCat2Low treshold : %d\n", result.lowcat2low_treshold); seq_printf(seq, "LowWarn2Low treshold : %d\n", result.lowwarn2low_treshold); seq_printf(seq, "Low2LowWarn treshold : %d\n", result.low2lowwarn_treshold); seq_printf(seq, "Norm2LowWarn treshold : %d\n", result.norm2lowwarn_treshold); seq_printf(seq, "LowWarn2Norm treshold : %d\n", result.lowwarn2norm_treshold); seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading); seq_printf(seq, "HiWarn2Norm treshold : %d\n", result.hiwarn2norm_treshold); seq_printf(seq, "Norm2HiWarn treshold : %d\n", result.norm2hiwarn_treshold); seq_printf(seq, "High2HiWarn treshold : %d\n", result.high2hiwarn_treshold); seq_printf(seq, "HiWarn2High treshold : %d\n", result.hiwarn2high_treshold); seq_printf(seq, "HiCat2High treshold : %d\n", result.hicat2high_treshold); seq_printf(seq, "High2HiCat treshold : %d\n", result.hi2hicat_treshold); seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading); seq_printf(seq, "Sensor state : %d = ", result.sensor_state); switch (result.sensor_state) { case 0: seq_printf(seq, "Normal\n"); break; case 1: seq_printf(seq, "Abnormal\n"); break; case 2: seq_printf(seq, "Unknown\n"); break; case 3: seq_printf(seq, "Low Catastrophic (LoCat)\n"); break; case 4: seq_printf(seq, "Low (Low)\n"); break; case 5: seq_printf(seq, "Low Warning (LoWarn)\n"); break; case 6: seq_printf(seq, "High Warning (HiWarn)\n"); break; case 7: seq_printf(seq, "High (High)\n"); break; case 8: seq_printf(seq, "High Catastrophic (HiCat)\n"); break; } seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable); seq_printf(seq, " [%s] Operational state change. \n", (result.event_enable & 0x01) ? "+" : "-"); seq_printf(seq, " [%s] Low catastrophic. \n", (result.event_enable & 0x02) ? "+" : "-"); seq_printf(seq, " [%s] Low reading. \n", (result.event_enable & 0x04) ? "+" : "-"); seq_printf(seq, " [%s] Low warning. \n", (result.event_enable & 0x08) ? "+" : "-"); seq_printf(seq, " [%s] Change back to normal from out of range state. \n", (result.event_enable & 0x10) ? "+" : "-"); seq_printf(seq, " [%s] High warning. \n", (result.event_enable & 0x20) ? "+" : "-"); seq_printf(seq, " [%s] High reading. \n", (result.event_enable & 0x40) ? "+" : "-"); seq_printf(seq, " [%s] High catastrophic. \n", (result.event_enable & 0x80) ? "+" : "-"); return 0; } static int i2o_seq_open_hrt(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_hrt, PDE_DATA(inode)); }; static int i2o_seq_open_lct(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_lct, PDE_DATA(inode)); }; static int i2o_seq_open_status(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_status, PDE_DATA(inode)); }; static int i2o_seq_open_hw(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_hw, PDE_DATA(inode)); }; static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_ddm_table, PDE_DATA(inode)); }; static int i2o_seq_open_driver_store(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_driver_store, PDE_DATA(inode)); }; static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_drivers_stored, PDE_DATA(inode)); }; static int i2o_seq_open_groups(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_groups, PDE_DATA(inode)); }; static int i2o_seq_open_phys_device(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_phys_device, PDE_DATA(inode)); }; static int i2o_seq_open_claimed(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_claimed, PDE_DATA(inode)); }; static int i2o_seq_open_users(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_users, PDE_DATA(inode)); }; static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_priv_msgs, PDE_DATA(inode)); }; static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_authorized_users, PDE_DATA(inode)); }; static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_dev_identity, PDE_DATA(inode)); }; static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_ddm_identity, PDE_DATA(inode)); }; static int i2o_seq_open_uinfo(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_uinfo, PDE_DATA(inode)); }; static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_sgl_limits, PDE_DATA(inode)); }; static int i2o_seq_open_sensors(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_sensors, PDE_DATA(inode)); }; static int i2o_seq_open_dev_name(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_dev_name, PDE_DATA(inode)); }; static const struct file_operations i2o_seq_fops_lct = { .open = i2o_seq_open_lct, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_hrt = { .open = i2o_seq_open_hrt, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_status = { .open = i2o_seq_open_status, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_hw = { .open = i2o_seq_open_hw, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_ddm_table = { .open = i2o_seq_open_ddm_table, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_driver_store = { .open = i2o_seq_open_driver_store, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_drivers_stored = { .open = i2o_seq_open_drivers_stored, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_groups = { .open = i2o_seq_open_groups, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_phys_device = { .open = i2o_seq_open_phys_device, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_claimed = { .open = i2o_seq_open_claimed, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_users = { .open = i2o_seq_open_users, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_priv_msgs = { .open = i2o_seq_open_priv_msgs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_authorized_users = { .open = i2o_seq_open_authorized_users, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_dev_name = { .open = i2o_seq_open_dev_name, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_dev_identity = { .open = i2o_seq_open_dev_identity, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_ddm_identity = { .open = i2o_seq_open_ddm_identity, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_uinfo = { .open = i2o_seq_open_uinfo, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_sgl_limits = { .open = i2o_seq_open_sgl_limits, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_sensors = { .open = i2o_seq_open_sensors, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * IOP specific entries...write field just in case someone * ever wants one. */ static i2o_proc_entry i2o_proc_generic_iop_entries[] = { {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt}, {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct}, {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status}, {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw}, {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table}, {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store}, {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored}, {NULL, 0, NULL} }; /* * Device specific entries */ static i2o_proc_entry generic_dev_entries[] = { {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups}, {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device}, {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed}, {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users}, {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs}, {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users}, {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity}, {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity}, {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo}, {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits}, {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors}, {NULL, 0, NULL} }; /* * Storage unit specific entries (SCSI Periph, BS) with device names */ static i2o_proc_entry rbs_dev_entries[] = { {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name}, {NULL, 0, NULL} }; /** * i2o_proc_create_entries - Creates proc dir entries * @dir: proc dir entry under which the entries should be placed * @i2o_pe: pointer to the entries which should be added * @data: pointer to I2O controller or device * * Create proc dir entries for a I2O controller or I2O device. * * Returns 0 on success or negative error code on failure. */ static int i2o_proc_create_entries(struct proc_dir_entry *dir, i2o_proc_entry * i2o_pe, void *data) { struct proc_dir_entry *tmp; while (i2o_pe->name) { tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir, i2o_pe->fops, data); if (!tmp) return -1; i2o_pe++; } return 0; } /** * i2o_proc_device_add - Add an I2O device to the proc dir * @dir: proc dir entry to which the device should be added * @dev: I2O device which should be added * * Add an I2O device to the proc dir entry dir and create the entries for * the device depending on the class of the I2O device. */ static void i2o_proc_device_add(struct proc_dir_entry *dir, struct i2o_device *dev) { char buff[10]; struct proc_dir_entry *devdir; i2o_proc_entry *i2o_pe = NULL; sprintf(buff, "%03x", dev->lct_data.tid); osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff); devdir = proc_mkdir_data(buff, 0, dir, dev); if (!devdir) { osm_warn("Could not allocate procdir!\n"); return; } i2o_proc_create_entries(devdir, generic_dev_entries, dev); /* Inform core that we want updates about this device's status */ switch (dev->lct_data.class_id) { case I2O_CLASS_SCSI_PERIPHERAL: case I2O_CLASS_RANDOM_BLOCK_STORAGE: i2o_pe = rbs_dev_entries; break; default: break; } if (i2o_pe) i2o_proc_create_entries(devdir, i2o_pe, dev); } /** * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree * @dir: parent proc dir entry * @c: I2O controller which should be added * * Add the entries to the parent proc dir entry. Also each device is added * to the controllers proc dir entry. * * Returns 0 on success or negative error code on failure. */ static int i2o_proc_iop_add(struct proc_dir_entry *dir, struct i2o_controller *c) { struct proc_dir_entry *iopdir; struct i2o_device *dev; osm_debug("adding IOP /proc/i2o/%s\n", c->name); iopdir = proc_mkdir_data(c->name, 0, dir, c); if (!iopdir) return -1; i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c); list_for_each_entry(dev, &c->devices, list) i2o_proc_device_add(iopdir, dev); return 0; } /** * i2o_proc_fs_create - Create the i2o proc fs. * * Iterate over each I2O controller and create the entries for it. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_proc_fs_create(void) { struct i2o_controller *c; i2o_proc_dir_root = proc_mkdir("i2o", NULL); if (!i2o_proc_dir_root) return -1; list_for_each_entry(c, &i2o_controllers, list) i2o_proc_iop_add(i2o_proc_dir_root, c); return 0; }; /** * i2o_proc_fs_destroy - Cleanup the all i2o proc entries * * Iterate over each I2O controller and remove the entries for it. * * Returns 0 on success or negative error code on failure. */ static int __exit i2o_proc_fs_destroy(void) { remove_proc_subtree("i2o", NULL); return 0; }; /** * i2o_proc_init - Init function for procfs * * Registers Proc OSM and creates procfs entries. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_proc_init(void) { int rc; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); rc = i2o_driver_register(&i2o_proc_driver); if (rc) return rc; rc = i2o_proc_fs_create(); if (rc) { i2o_driver_unregister(&i2o_proc_driver); return rc; } return 0; }; /** * i2o_proc_exit - Exit function for procfs * * Unregisters Proc OSM and removes procfs entries. */ static void __exit i2o_proc_exit(void) { i2o_driver_unregister(&i2o_proc_driver); i2o_proc_fs_destroy(); }; MODULE_AUTHOR("Deepak Saxena"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_proc_init); module_exit(i2o_proc_exit);
gpl-2.0
Tommy-Geenexus/sony_sources
arch/arm/mach-msm/board-mahimahi.c
3671
2111
/* linux/arch/arm/mach-msm/board-mahimahi.c * * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation. * Author: Dima Zavin <dima@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/input.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/setup.h> #include <mach/board.h> #include <mach/hardware.h> #include <mach/system.h> #include <mach/proc_comm.h> #include "board-mahimahi.h" #include "devices.h" static uint debug_uart; module_param_named(debug_uart, debug_uart, uint, 0); static struct platform_device *devices[] __initdata = { #if !defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart1, #endif &msm_device_uart_dm1, &msm_device_nand, }; static void __init mahimahi_init(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); } static void __init mahimahi_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { mi->nr_banks = 2; mi->bank[0].start = PHYS_OFFSET; mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); mi->bank[0].size = (219*1024*1024); mi->bank[1].start = MSM_HIGHMEM_BASE; mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE); mi->bank[1].size = MSM_HIGHMEM_SIZE; } static void __init mahimahi_map_io(void) { msm_map_common_io(); msm_clock_init(); } extern struct sys_timer msm_timer; MACHINE_START(MAHIMAHI, "mahimahi") .atag_offset = 0x100, .fixup = mahimahi_fixup, .map_io = mahimahi_map_io, .init_irq = msm_init_irq, .init_machine = mahimahi_init, .timer = &msm_timer, MACHINE_END
gpl-2.0
aosp-samsung-msm7x30/android_kernel_samsung_msm7x30-common
net/ethernet/eth.c
4439
10997
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Ethernet-type device handling. * * Version: @(#)eth.c 1.0.7 05/25/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Mr Linux : Arp problems * Alan Cox : Generic queue tidyup (very tiny here) * Alan Cox : eth_header ntohs should be htons * Alan Cox : eth_rebuild_header missing an htons and * minor other things. * Tegge : Arp bug fixes. * Florian : Removed many unnecessary functions, code cleanup * and changes for new arp and skbuff. * Alan Cox : Redid header building to reflect new format. * Alan Cox : ARP only when compiled with CONFIG_INET * Greg Page : 802.2 and SNAP stuff. * Alan Cox : MAC layer pointers/new format. * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding. * Alan Cox : Protect against forwarding explosions with * older network drivers and IFF_ALLMULTI. * Christer Weinigel : Better rebuild header message. * Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup(). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/if_ether.h> #include <net/dst.h> #include <net/arp.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip.h> #include <net/dsa.h> #include <asm/uaccess.h> __setup("ether=", netdev_boot_setup); /** * eth_header - create the Ethernet header * @skb: buffer to alter * @dev: source device * @type: Ethernet type field * @daddr: destination address (NULL leave destination address) * @saddr: source address (NULL use device source address) * @len: packet length (<= skb->len) * * * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length * in here instead. */ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); if (type != ETH_P_802_3 && type != ETH_P_802_2) eth->h_proto = htons(type); else eth->h_proto = htons(len); /* * Set the source hardware address. */ if (!saddr) saddr = dev->dev_addr; memcpy(eth->h_source, saddr, ETH_ALEN); if (daddr) { memcpy(eth->h_dest, daddr, ETH_ALEN); return ETH_HLEN; } /* * Anyway, the loopback-device should never use this function... */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { memset(eth->h_dest, 0, ETH_ALEN); return ETH_HLEN; } return -ETH_HLEN; } EXPORT_SYMBOL(eth_header); /** * eth_rebuild_header- rebuild the Ethernet MAC header. * @skb: socket buffer to update * * This is called after an ARP or IPV6 ndisc it's resolution on this * sk_buff. We now let protocol (ARP) fill in the other fields. * * This routine CANNOT use cached dst->neigh! * Really, it is used only when dst->neigh is wrong. */ int eth_rebuild_header(struct sk_buff *skb) { struct ethhdr *eth = (struct ethhdr *)skb->data; struct net_device *dev = skb->dev; switch (eth->h_proto) { #ifdef CONFIG_INET case htons(ETH_P_IP): return arp_find(eth->h_dest, skb); #endif default: printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n", dev->name, ntohs(eth->h_proto)); memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); break; } return 0; } EXPORT_SYMBOL(eth_rebuild_header); /** * eth_type_trans - determine the packet's protocol ID. * @skb: received socket data * @dev: receiving network device * * The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; skb->dev = dev; skb_reset_mac_header(skb); skb_pull_inline(skb, ETH_HLEN); eth = eth_hdr(skb); if (unlikely(is_multicast_ether_addr(eth->h_dest))) { if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } /* * This ALLMULTI check should be redundant by 1.4 * so don't forget to remove it. * * Seems, you forgot to remove it. All silly devices * seems to set IFF_PROMISC. */ else if (1 /*dev->flags&IFF_PROMISC */ ) { if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr))) skb->pkt_type = PACKET_OTHERHOST; } /* * Some variants of DSA tagging don't have an ethertype field * at all, so we check here whether one of those tagging * variants has been configured on the receiving interface, * and if so, set skb->protocol without looking at the packet. */ if (netdev_uses_dsa_tags(dev)) return htons(ETH_P_DSA); if (netdev_uses_trailer_tags(dev)) return htons(ETH_P_TRAILER); if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } EXPORT_SYMBOL(eth_type_trans); /** * eth_header_parse - extract hardware address from packet * @skb: packet to extract header from * @haddr: destination buffer */ int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const struct ethhdr *eth = eth_hdr(skb); memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; } EXPORT_SYMBOL(eth_header_parse); /** * eth_header_cache - fill cache entry from neighbour * @neigh: source neighbour * @hh: destination cache entry * @type: Ethernet type field * Create an Ethernet header template from the neighbour. */ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { struct ethhdr *eth; const struct net_device *dev = neigh->dev; eth = (struct ethhdr *) (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth)))); if (type == htons(ETH_P_802_3)) return -1; eth->h_proto = type; memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); memcpy(eth->h_dest, neigh->ha, ETH_ALEN); hh->hh_len = ETH_HLEN; return 0; } EXPORT_SYMBOL(eth_header_cache); /** * eth_header_cache_update - update cache entry * @hh: destination cache entry * @dev: network device * @haddr: new hardware address * * Called by Address Resolution module to notify changes in address. */ void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr) { memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, ETH_ALEN); } EXPORT_SYMBOL(eth_header_cache_update); /** * eth_mac_addr - set new Ethernet hardware address * @dev: network device * @p: socket address * Change hardware address of device. * * This doesn't change hardware matching, so needs to be overridden * for most real devices. */ int eth_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); /* if device marked as NET_ADDR_RANDOM, reset it */ dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } EXPORT_SYMBOL(eth_mac_addr); /** * eth_change_mtu - set new MTU size * @dev: network device * @new_mtu: new Maximum Transfer Unit * * Allow changing MTU size. Needs to be overridden for devices * supporting jumbo frames. */ int eth_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < 68 || new_mtu > ETH_DATA_LEN) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(eth_change_mtu); int eth_validate_addr(struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; return 0; } EXPORT_SYMBOL(eth_validate_addr); const struct header_ops eth_header_ops ____cacheline_aligned = { .create = eth_header, .parse = eth_header_parse, .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; /** * ether_setup - setup Ethernet network device * @dev: network device * Fill in the fields of the device structure with Ethernet-generic values. */ void ether_setup(struct net_device *dev) { dev->header_ops = &eth_header_ops; dev->type = ARPHRD_ETHER; dev->hard_header_len = ETH_HLEN; dev->mtu = ETH_DATA_LEN; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->flags = IFF_BROADCAST|IFF_MULTICAST; dev->priv_flags |= IFF_TX_SKB_SHARING; memset(dev->broadcast, 0xFF, ETH_ALEN); } EXPORT_SYMBOL(ether_setup); /** * alloc_etherdev_mqs - Allocates and sets up an Ethernet device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device * @txqs: The number of TX queues this device has. * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. * * Constructs a new net device, complete with a private data area of * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, unsigned int rxqs) { return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); } EXPORT_SYMBOL(alloc_etherdev_mqs); static size_t _format_mac_addr(char *buf, int buflen, const unsigned char *addr, int len) { int i; char *cp = buf; for (i = 0; i < len; i++) { cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]); if (i == len - 1) break; cp += scnprintf(cp, buflen - (cp - buf), ":"); } return cp - buf; } ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) { size_t l; l = _format_mac_addr(buf, PAGE_SIZE, addr, len); l += scnprintf(buf + l, PAGE_SIZE - l, "\n"); return (ssize_t)l; } EXPORT_SYMBOL(sysfs_format_mac);
gpl-2.0
proxuser/kartal
arch/x86/kernel/apic/apic_noop.c
4695
4702
/* * NOOP APIC driver. * * Does almost nothing and should be substituted by a real apic driver via * probe routine. * * Though in case if apic is disabled (for some reason) we try * to not uglify the caller's code and allow to call (some) apic routines * like self-ipi, etc... */ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/errno.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/setup.h> #include <linux/smp.h> #include <asm/ipi.h> #include <linux/interrupt.h> #include <asm/acpi.h> #include <asm/e820.h> static void noop_init_apic_ldr(void) { } static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_allbutself(int vector) { } static void noop_send_IPI_all(int vector) { } static void noop_send_IPI_self(int vector) { } static void noop_apic_wait_icr_idle(void) { } static void noop_apic_icr_write(u32 low, u32 id) { } static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) { return -1; } static u32 noop_safe_apic_wait_icr_idle(void) { return 0; } static u64 noop_apic_icr_read(void) { return 0; } static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; } static unsigned int noop_get_apic_id(unsigned long x) { return 0; } static int noop_probe(void) { /* * NOOP apic should not ever be * enabled via probe routine */ return 0; } static int noop_apic_id_registered(void) { /* * if we would be really "pedantic" * we should pass read_apic_id() here * but since NOOP suppose APIC ID = 0 * lets save a few cycles */ return physid_isset(0, phys_cpu_present_map); } static const struct cpumask *noop_target_cpus(void) { /* only BSP here */ return cpumask_of(0); } static unsigned long noop_check_apicid_used(physid_mask_t *map, int apicid) { return physid_isset(apicid, *map); } static unsigned long noop_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) { if (cpu != 0) pr_warning("APIC: Vector allocated for non-BSP cpu\n"); cpumask_clear(retmask); cpumask_set_cpu(cpu, retmask); } static u32 noop_apic_read(u32 reg) { WARN_ON_ONCE((cpu_has_apic && !disable_apic)); return 0; } static void noop_apic_write(u32 reg, u32 v) { WARN_ON_ONCE(cpu_has_apic && !disable_apic); } struct apic apic_noop = { .name = "noop", .probe = noop_probe, .acpi_madt_oem_check = NULL, .apic_id_valid = default_apic_id_valid, .apic_id_registered = noop_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, .target_cpus = noop_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = noop_check_apicid_used, .check_apicid_present = noop_check_apicid_present, .vector_allocation_domain = noop_vector_allocation_domain, .init_apic_ldr = noop_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = NULL, .multi_timer_check = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = noop_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = noop_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = noop_send_IPI_mask, .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, .send_IPI_allbutself = noop_send_IPI_allbutself, .send_IPI_all = noop_send_IPI_all, .send_IPI_self = noop_send_IPI_self, .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, /* should be safe */ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = NULL, .read = noop_apic_read, .write = noop_apic_write, .icr_read = noop_apic_icr_read, .icr_write = noop_apic_icr_write, .wait_icr_idle = noop_apic_wait_icr_idle, .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, #ifdef CONFIG_X86_32 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, #endif };
gpl-2.0
morisbartyno/android_kernel_samsung_millet1
drivers/net/ethernet/qlogic/qla3xxx.c
4951
104022
/* * QLogic QLA3xxx NIC HBA Driver * Copyright (c) 2003-2006 QLogic Corporation * * See LICENSE.qla3xxx for copyright and licensing details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/prefetch.h> #include "qla3xxx.h" #define DRV_NAME "qla3xxx" #define DRV_STRING "QLogic ISP3XXX Network Driver" #define DRV_VERSION "v2.03.00-k5" static const char ql3xxx_driver_name[] = DRV_NAME; static const char ql3xxx_driver_version[] = DRV_VERSION; #define TIMED_OUT_MSG \ "Timed out waiting for management port to get free before issuing command\n" MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); /* * These are the known PHY's which are used */ enum PHY_DEVICE_TYPE { PHY_TYPE_UNKNOWN = 0, PHY_VITESSE_VSC8211, PHY_AGERE_ET1011C, MAX_PHY_DEV_TYPES }; struct PHY_DEVICE_INFO { const enum PHY_DEVICE_TYPE phyDevice; const u32 phyIdOUI; const u16 phyIdModel; const char *name; }; static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, }; /* * Caller must take hw_lock. */ static int ql_sem_spinlock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; unsigned int seconds = 3; do { writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); if ((value & (sem_mask >> 16)) == sem_bits) return 0; ssleep(1); } while (--seconds); return -1; } static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); readl(&port_regs->CommonRegs.semaphoreReg); } static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); return ((value & (sem_mask >> 16)) == sem_bits); } /* * Caller holds hw_lock. */ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; while (i < 10) { if (i) ssleep(1); if (ql_sem_lock(qdev, QL_DRVR_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 1)) { netdev_printk(KERN_DEBUG, qdev->ndev, "driver lock acquired\n"); return 1; } } netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); return 0; } static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(((ISP_CONTROL_NP_MASK << 16) | page), &port_regs->CommonRegs.ispControlStatus); readl(&port_regs->CommonRegs.ispControlStatus); qdev->current_page = page; } static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { return readl(reg); } static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (qdev->current_page != 0) ql_set_register_page(qdev, 0); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { if (qdev->current_page != 0) ql_set_register_page(qdev, 0); return readl(reg); } static void ql_write_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); writel(value, reg); readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } static void ql_write_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); } static void ql_write_nvram_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); udelay(1); } static void ql_write_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 0) ql_set_register_page(qdev, 0); writel(value, reg); readl(reg); } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page1_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 1) ql_set_register_page(qdev, 1); writel(value, reg); readl(reg); } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page2_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 2) ql_set_register_page(qdev, 2); writel(value, reg); readl(reg); } static void ql_disable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, (ISP_IMR_ENABLE_INT << 16)); } static void ql_enable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, ((0xff << 16) | ISP_IMR_ENABLE_INT)); } static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, struct ql_rcv_buf_cb *lrg_buf_cb) { dma_addr_t map; int err; lrg_buf_cb->next = NULL; if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; } else { qdev->lrg_buf_free_tail->next = lrg_buf_cb; qdev->lrg_buf_free_tail = lrg_buf_cb; } if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); qdev->lrg_buf_skb_check++; } else { /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; qdev->lrg_buf_skb_check++; return; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); } } qdev->lrg_buf_free_count++; } static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; if (lrg_buf_cb != NULL) { qdev->lrg_buf_free_head = lrg_buf_cb->next; if (qdev->lrg_buf_free_head == NULL) qdev->lrg_buf_free_tail = NULL; qdev->lrg_buf_free_count--; } return lrg_buf_cb; } static u32 addrBits = EEPROM_NO_ADDR_BITS; static u32 dataBits = EEPROM_NO_DATA_BITS; static void fm93c56a_deselect(struct ql3_adapter *qdev); static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value); /* * Caller holds hw_lock. */ static void fm93c56a_select(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); ql_write_nvram_reg(qdev, spir, ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); } /* * Caller holds hw_lock. */ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) { int i; u32 mask; u32 dataBit; u32 previousBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Clock in a zero, then do the start bit */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < FM93C56A_CMD_BITS; i++) { dataBit = (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* If the bit changed, change the DO state to match */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL)); cmd = cmd << 1; } mask = 1 << (addrBits - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < addrBits; i++) { dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL)); eepromAddr = eepromAddr << 1; } } /* * Caller holds hw_lock. */ static void fm93c56a_deselect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* * Caller holds hw_lock. */ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) { int i; u32 data = 0; u32 dataBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ for (i = 0; i < dataBits; i++) { ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_FALL); dataBit = (ql_read_common_reg(qdev, spir) & AUBURN_EEPROM_DI_1) ? 1 : 0; data = (data << 1) | dataBit; } *value = (u16)data; } /* * Caller holds hw_lock. */ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value) { fm93c56a_select(qdev); fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); fm93c56a_datain(qdev, value); fm93c56a_deselect(qdev); } static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { __le16 *p = (__le16 *)ndev->dev_addr; p[0] = cpu_to_le16(addr[0]); p[1] = cpu_to_le16(addr[1]); p[2] = cpu_to_le16(addr[2]); } static int ql_get_nvram_params(struct ql3_adapter *qdev) { u16 *pEEPROMData; u16 checksum = 0; u32 index; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); pEEPROMData = (u16 *)&qdev->nvram_data; qdev->eeprom_cmd_data = 0; if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 10)) { pr_err("%s: Failed ql_sem_spinlock()\n", __func__); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } for (index = 0; index < EEPROM_SIZE; index++) { eeprom_readword(qdev, index, pEEPROMData); checksum += *pEEPROMData; pEEPROMData++; } ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); if (checksum != 0) { netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", checksum); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return checksum; } static const u32 PHYAddr[2] = { PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS }; static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 temp; int count = 1000; while (count) { temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); if (!(temp & MAC_MII_STATUS_BSY)) return 0; udelay(10); count--; } return -1; } static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 scanControl; if (qdev->numPorts > 1) { /* Auto scan will cycle through multiple ports */ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; } else { scanControl = MAC_MII_CONTROL_SC; } /* * Scan register 1 of PHY/PETBI, * Set up to scan both devices * The autoscan starts from the first register, completes * the last one before rolling over to the first */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (scanControl) | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); } static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { u8 ret; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* See if scan mode is enabled before we turn it off */ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { /* Scan is enabled */ ret = 1; } else { /* Scan is disabled */ ret = 0; } /* * When disabling scan mode you must first change the MII register * address */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | MAC_MII_CONTROL_RC) << 16)); return ret; } static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 *value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; u32 temp; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { u32 temp; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; ql_mii_enable_scan_mode(qdev); return 0; } static void ql_petbi_reset(struct ql3_adapter *qdev) { ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); } static void ql_petbi_start_neg(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); ql_mii_write_reg(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); } static void ql_petbi_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, PHYAddr[qdev->mac_index]); } static void ql_petbi_init(struct ql3_adapter *qdev) { ql_petbi_reset(qdev); ql_petbi_start_neg(qdev); } static void ql_petbi_init_ex(struct ql3_adapter *qdev) { ql_petbi_reset_ex(qdev); ql_petbi_start_neg_ex(qdev); } static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0) return 0; return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; } static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) { netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); /* power down device bit 11 = 1 */ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); /* enable diagnostic mode bit 2 = 1 */ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); /* point to hidden reg 0x2806 */ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) */ ql_mii_write_reg(qdev, 0x12, 0x840a); ql_mii_write_reg(qdev, 0x00, 0x1140); ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; if (phyIdReg0 == 0xffff) return result; if (phyIdReg1 == 0xffff) return result; /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) { netdev_info(qdev->ndev, "Phy: %s\n", PHY_DEVICES[i].name); result = PHY_DEVICES[i].phyDevice; break; } } return result; } static int ql_phy_get_speed(struct ql3_adapter *qdev) { u16 reg; switch (qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) return 0; reg = (reg >> 8) & 3; break; } default: if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; reg = (((reg & 0x18) >> 3) & 3); } switch (reg) { case 2: return SPEED_1000; case 1: return SPEED_100; case 0: return SPEED_10; default: return -1; } } static int ql_is_full_dup(struct ql3_adapter *qdev) { u16 reg; switch (qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg)) return 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: default: { if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; return (reg & PHY_AUX_DUPLEX_STAT) != 0; } } } static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0) return 0; return (reg & PHY_NEG_PAUSE) != 0; } static int PHY_Setup(struct ql3_adapter *qdev) { u16 reg1; u16 reg2; bool agereAddrChangeNeeded = false; u32 miiAddr = 0; int err; /* Determine the PHY we are using by reading the ID's */ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); return err; } err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); return err; } /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) miiAddr = MII_AGERE_ADDR_1; else miiAddr = MII_AGERE_ADDR_2; err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG after Agere detected\n"); return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); return err; } /* We need to remember to initialize the Agere PHY */ agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply PHY specific initializations */ qdev->phyType = getPhyType(qdev, reg1, reg2); if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { netdev_err(qdev->ndev, "PHY is unknown\n"); return -EIO; } return 0; } /* * Caller holds hw_lock. */ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); else value = (MAC_CONFIG_REG_PE << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); else value = (MAC_CONFIG_REG_SR << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); else value = (MAC_CONFIG_REG_GM << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); else value = (MAC_CONFIG_REG_FD << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); else value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static int ql_is_fiber(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_SM0; break; case 1: bitToCheck = PORT_STATUS_SM1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static int ql_is_auto_cfg(struct ql3_adapter *qdev) { u16 reg; ql_mii_read_reg(qdev, 0x00, &reg); return (reg & 0x1000) != 0; } /* * Caller holds hw_lock. */ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AC0; break; case 1: bitToCheck = PORT_STATUS_AC1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); return 1; } netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); return 0; } /* * ql_is_neg_pause() returns 1 if pause was negotiated to be on */ static int ql_is_neg_pause(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return ql_is_petbi_neg_pause(qdev); else return ql_is_phy_neg_pause(qdev); } static int ql_auto_neg_error(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AE0; break; case 1: bitToCheck = PORT_STATUS_AE1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static u32 ql_get_link_speed(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return SPEED_1000; else return ql_phy_get_speed(qdev); } static int ql_is_link_full_dup(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return 1; else return ql_is_full_dup(qdev); } /* * Caller holds hw_lock. */ static int ql_link_down_detect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = ISP_CONTROL_LINK_DN_0; break; case 1: bitToCheck = ISP_CONTROL_LINK_DN_1; break; } temp = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); return (temp & bitToCheck) != 0; } /* * Caller holds hw_lock. */ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; switch (qdev->mac_index) { case 0: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_0) | (ISP_CONTROL_LINK_DN_0 << 16)); break; case 1: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_1) | (ISP_CONTROL_LINK_DN_1 << 16)); break; default: return 1; } return 0; } /* * Caller holds hw_lock. */ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_F1_ENABLED; break; case 1: bitToCheck = PORT_STATUS_F3_ENABLED; break; default: break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "not link master\n"); return 0; } netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); return 1; } static void ql_phy_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; u16 portConfiguration; if (qdev->phyType == PHY_AGERE_ET1011C) ql_mii_write_reg(qdev, 0x13, 0x0000); /* turn off external loopback */ if (qdev->mac_index == 0) portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; else portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; /* Some HBA's in the field are set to 0 and they need to be reinterpreted with a default value */ if (portConfiguration == 0) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) reg |= PHY_GIG_ADV_1000F; else reg |= PHY_GIG_ADV_1000H; } ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_NEG_ALL_PARAMS; if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if (portConfiguration & PORT_CONFIG_1000MB_SPEED) reg |= 1; ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } static void ql_phy_init_ex(struct ql3_adapter *qdev) { ql_phy_reset_ex(qdev); PHY_Setup(qdev); ql_phy_start_neg_ex(qdev); } /* * Caller holds hw_lock. */ static u32 ql_get_link_state(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_UP0; break; case 1: bitToCheck = PORT_STATUS_UP1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) linkState = LS_UP; else linkState = LS_DOWN; return linkState; } static int ql_port_start(struct ql3_adapter *qdev) { if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); return -1; } if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); } else { /* Copper port */ ql_phy_init_ex(qdev); } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static int ql_finish_auto_neg(struct ql3_adapter *qdev) { if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER, &qdev->flags)) { /* configure the MAC */ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Configuring link\n"); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed (qdev) == SPEED_1000)); ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup (qdev)); ql_mac_cfg_pause(qdev, ql_is_neg_pause (qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Enabling mac\n"); ql_mac_enable(qdev, 1); } qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); netif_info(qdev, link, qdev->ndev, "Link is up at %d Mbps, %s duplex\n", ql_get_link_speed(qdev), ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ if (test_bit(QL_LINK_MASTER, &qdev->flags)) { netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Remote error detected. Calling ql_port_start()\n"); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); if (ql_port_start(qdev)) /* Restart port */ return -1; return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static void ql_link_state_machine_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, link_state_work.work); u32 curr_link_state; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); curr_link_state = ql_get_link_state(qdev); if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { netif_info(qdev, link, qdev->ndev, "Reset in progress, skip processing link state\n"); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); return; } switch (qdev->port_link_state) { default: if (test_bit(QL_LINK_MASTER, &qdev->flags)) ql_port_start(qdev); qdev->port_link_state = LS_DOWN; /* Fall Through */ case LS_DOWN: if (curr_link_state == LS_UP) { netif_info(qdev, link, qdev->ndev, "Link is up\n"); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); if (qdev->port_link_state == LS_UP) ql_link_down_detect_clear(qdev); qdev->port_link_state = LS_UP; } break; case LS_UP: /* * See if the link is currently down or went down and came * back up */ if (curr_link_state == LS_DOWN) { netif_info(qdev, link, qdev->ndev, "Link is down\n"); qdev->port_link_state = LS_DOWN; } if (ql_link_down_detect(qdev)) qdev->port_link_state = LS_DOWN; break; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev)) set_bit(QL_LINK_MASTER, &qdev->flags); else clear_bit(QL_LINK_MASTER, &qdev->flags); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_init_scan_mode(struct ql3_adapter *qdev) { ql_mii_enable_scan_mode(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { if (ql_this_adapter_controls_port(qdev)) ql_phy_init_ex(qdev); } } /* * MII_Setup needs to be called before taking the PHY out of reset * so that the management interface clock speed can be set properly. * It would be better if we had a way to disable MDC until after the * PHY is out of reset, but we don't have that capability. */ static int ql_mii_setup(struct ql3_adapter *qdev) { u32 reg; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (qdev->device_id == QL3032_DEVICE_ID) ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ reg = MAC_MII_CONTROL_CLK_SEL_DIV28; ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ SUPPORTED_FIBRE | \ SUPPORTED_Autoneg) #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | \ SUPPORTED_100baseT_Full | \ SUPPORTED_1000baseT_Half | \ SUPPORTED_1000baseT_Full | \ SUPPORTED_Autoneg | \ SUPPORTED_TP) \ static u32 ql_supported_modes(struct ql3_adapter *qdev) { if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) return SUPPORTED_OPTICAL_MODES; return SUPPORTED_TP_MODES; } static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static u32 ql_get_speed(struct ql3_adapter *qdev) { u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_full_dup(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct ql3_adapter *qdev = netdev_priv(ndev); ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; ecmd->phy_address = qdev->PHYAddr; } ecmd->advertising = ql_supported_modes(qdev); ecmd->autoneg = ql_get_auto_cfg_status(qdev); ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); ecmd->duplex = ql_get_full_dup(qdev); return 0; } static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ql3xxx_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), sizeof(drvinfo->bus_info)); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } static u32 ql_get_msglevel(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return qdev->msg_enable; } static void ql_set_msglevel(struct net_device *ndev, u32 value) { struct ql3_adapter *qdev = netdev_priv(ndev); qdev->msg_enable = value; } static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 reg; if (qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); pause->autoneg = ql_get_auto_cfg_status(qdev); pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; } static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; dma_addr_t map; int err; while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { netdev_printk(KERN_DEBUG, qdev->ndev, "Failed netdev_alloc_skb()\n"); break; } else { /* * We save some space to copy the ethhdr from * first buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; if (!qdev->lrg_buf_skb_check) return 1; } } lrg_buf_cb = lrg_buf_cb->next; } return 0; } /* * Caller holds hw_lock. */ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; if (qdev->small_buf_q_producer_index == NUM_SBUFQ_ENTRIES) qdev->small_buf_q_producer_index = 0; qdev->small_buf_release_cnt -= 8; } wmb(); writel(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); } } /* * Caller holds hw_lock. */ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) { struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { if (qdev->lrg_buf_skb_check) if (!ql_populate_free_queue(qdev)) return; lrg_buf_q_ele = qdev->lrg_buf_next_free; while ((qdev->lrg_buf_release_cnt >= 16) && (qdev->lrg_buf_free_count >= 8)) { for (i = 0; i < 8; i++) { lrg_buf_cb = ql_get_from_lrg_buf_free_list(qdev); lrg_buf_q_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; lrg_buf_q_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; lrg_buf_q_ele++; qdev->lrg_buf_release_cnt--; } qdev->lrg_buf_q_producer_index++; if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == (qdev->num_lbufq_entries - 1)) { lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; } } wmb(); qdev->lrg_buf_next_free = lrg_buf_q_ele; writel(qdev->lrg_buf_q_producer_index, &port_regs->CommonRegs.rxLargeQProducerIndex); } } static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct ql_tx_buf_cb *tx_cb; int i; int retval = 0; if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { netdev_warn(qdev->ndev, "Frame too short but it was padded and sent\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { netdev_err(qdev->ndev, "Frame too short to be legal, frame not sent\n"); qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } if (tx_cb->seg_count == 0) { netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { pci_unmap_page(qdev->pdev, dma_unmap_addr(&tx_cb->map[i], mapaddr), dma_unmap_len(&tx_cb->map[i], maplen), PCI_DMA_TODEVICE); } } qdev->ndev->stats.tx_packets++; qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL; invalid_seg_count: atomic_inc(&qdev->tx_count); } static void ql_get_sbuf(struct ql3_adapter *qdev) { if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) qdev->small_buf_index = 0; qdev->small_buf_release_cnt++; } static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = NULL; lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; return lrg_buf_cb; } /* * The difference between 3022 and 3032 for inbound completions: * 3022 uses two buffers per completion. The first buffer contains * (some) header info, the second the remainder of the headers plus * the data. For this chip we reserve some space at the top of the * receive buffer so that the header info in buffer one can be * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb; u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) lrg_buf_cb1 = ql_get_lbuf(qdev); /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; qdev->ndev->stats.rx_packets++; qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, dma_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb->data); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, qdev->ndev); netif_receive_skb(skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) { /* start of first buffer on 3022 */ lrg_buf_cb1 = ql_get_lbuf(qdev); skb1 = lrg_buf_cb1->skb; size = ETH_HLEN; if (*((u16 *) skb1->data) != 0xFFFF) size += VLAN_ETH_HLEN - ETH_HLEN; } /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb2 = lrg_buf_cb2->skb; skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, dma_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb2->data); skb_checksum_none_assert(skb2); if (qdev->device_id == QL3022_DEVICE_ID) { /* * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { netdev_err(ndev, "%s: Bad checksum for this %s packet, checksum = %x\n", __func__, ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"), checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { skb2->ip_summed = CHECKSUM_UNNECESSARY; } } skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static int ql_tx_rx_clean(struct ql3_adapter *qdev, int *tx_cleaned, int *rx_cleaned, int work_to_do) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; int work_done = 0; /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; rmb(); /* * Fix 4032 chip's undocumented "feature" where bit-8 is set * if the inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; default: { u32 *tmp = (u32 *)net_rsp; netdev_err(ndev, "Hit default case, not handled!\n" " dropping the packet, opcode = %x\n" "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", net_rsp->opcode, (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], (unsigned long int)tmp[3]); } } qdev->rsp_consumer_index++; if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; } else { qdev->rsp_current++; } work_done = *tx_cleaned + *rx_cleaned; } return work_done; } static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); if (tx_cleaned + rx_cleaned != budget) { spin_lock_irqsave(&qdev->hw_lock, hw_flags); __napi_complete(napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); } return tx_cleaned + rx_cleaned; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE, &qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); netdev_warn(ndev, "Resetting chip. PortFatalErrStatus register = 0x%x\n", var); set_bit(QL_RESET_START, &qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; netdev_err(ndev, "Another function issued a reset to the chip. ISR value = %x\n", value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(napi_schedule_prep(&qdev->napi))) __napi_schedule(&qdev->napi); } else return IRQ_NONE; return IRQ_RETVAL(handled); } /* * Get the total number of segments needed for the given number of fragments. * This is necessary because outbound address lists (OAL) will be used when * more than two frags are given. Each address list has 5 addr/len pairs. * The 5th pair in each OAL is used to point to the next OAL if more frags * are coming. That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) { if (qdev->device_id == QL3022_DEVICE_ID) return 1; if (frags <= 2) return frags + 1; else if (frags <= 6) return frags + 2; else if (frags <= 10) return frags + 3; else if (frags <= 14) return frags + 4; else if (frags <= 18) return frags + 5; return -1; } static void ql_hw_csum_setup(const struct sk_buff *skb, struct ob_mac_iocb_req *mac_iocb_ptr) { const struct iphdr *ip = ip_hdr(skb); mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); mac_iocb_ptr->ip_hdr_len = ip->ihl; if (ip->protocol == IPPROTO_TCP) { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | OB_3032MAC_IOCB_REQ_IC; } else { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | OB_3032MAC_IOCB_REQ_IC; } } /* * Map the buffers for this transmit. * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct ql_tx_buf_cb *tx_cb, struct sk_buff *skb) { struct oal *oal; struct oal_entry *oal_entry; int len = skb_headlen(skb); dma_addr_t map; int err; int completed_segs, i; int seg_cnt, seg = 0; int frag_cnt = (int)skb_shinfo(skb)->nr_frags; seg_cnt = tx_cb->seg_count; /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); return NETDEV_TX_BUSY; } oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); return NETDEV_TX_OK; } oal = tx_cb->oal; for (completed_segs = 0; completed_segs < frag_cnt; completed_segs++, seg++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; oal_entry++; /* * Check for continuation requirements. * It's strange but necessary. * Continuation entry points to outbound address list. */ if ((seg == 2 && seg_cnt > 3) || (seg == 7 && seg_cnt > 8) || (seg == 12 && seg_cnt > 13) || (seg == 17 && seg_cnt > 18)) { map = pci_map_single(qdev->pdev, oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping outbound address list with error: %d\n", err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; seg++; } map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping frags failed with error: %d\n", err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(skb_frag_size(frag)); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); } /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); return NETDEV_TX_OK; map_error: /* A PCI mapping failed and now we will need to back out * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; for (i = 0; i < completed_segs; i++, seg++) { oal_entry++; /* * Check for continuation requirements. * It's strange but necessary. */ if ((seg == 2 && seg_cnt > 3) || (seg == 7 && seg_cnt > 8) || (seg == 12 && seg_cnt > 13) || (seg == 17 && seg_cnt > 18)) { pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); oal++; seg++; } pci_unmap_page(qdev->pdev, dma_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); } pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_addr(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); return NETDEV_TX_BUSY; } /* * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). * The 3032 supports sglists by using the 3 addr/len pairs (ALP) * in the IOCB plus a chain of outbound address lists (OAL) that * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) * will be used to point to an OAL when more ALP entries are required. * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql_tx_buf_cb *tx_cb; u32 tot_len = skb->len; struct ob_mac_iocb_req *mac_iocb_ptr; if (unlikely(atomic_read(&qdev->tx_count) < 2)) return NETDEV_TX_BUSY; tx_cb = &qdev->tx_buf[qdev->req_producer_index]; tx_cb->seg_count = ql_get_seg_count(qdev, skb_shinfo(skb)->nr_frags); if (tx_cb->seg_count == -1) { netdev_err(ndev, "%s: invalid segment count!\n", __func__); return NETDEV_TX_OK; } mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; mac_iocb_ptr->flags |= qdev->mb_bit_mask; mac_iocb_ptr->transaction_id = qdev->req_producer_index; mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); tx_cb->skb = skb; if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { netdev_err(ndev, "%s: Could not map the segments!\n", __func__); return NETDEV_TX_BUSY; } wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) qdev->req_producer_index = 0; wmb(); ql_write_common_reg_l(qdev, &port_regs->CommonRegs.reqQProducerIndex, qdev->req_producer_index); netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, "tx queued, slot %d, len %d\n", qdev->req_producer_index, skb->len); atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; } static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) { qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); qdev->req_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->req_q_size, &qdev->req_q_phy_addr); if ((qdev->req_q_virt_addr == NULL) || LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { netdev_err(qdev->ndev, "reqQ failed\n"); return -ENOMEM; } qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); qdev->rsp_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->rsp_q_size, &qdev->rsp_q_phy_addr); if ((qdev->rsp_q_virt_addr == NULL) || LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { netdev_err(qdev->ndev, "rspQ allocation failed\n"); pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); return -ENOMEM; } set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } pci_free_consistent(qdev->pdev, qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); qdev->req_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->rsp_q_size, qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); qdev->rsp_q_virt_addr = NULL; clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) { /* Create Large Buffer Queue */ qdev->lrg_buf_q_size = qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); if (qdev->lrg_buf_q_size < PAGE_SIZE) qdev->lrg_buf_q_alloc_size = PAGE_SIZE; else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), GFP_KERNEL); if (qdev->lrg_buf == NULL) { netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); return -ENOMEM; } qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, &qdev->lrg_buf_q_alloc_phy_addr); if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "lBufQ failed\n"); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; /* Create Small Buffer Queue */ qdev->small_buf_q_size = NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); if (qdev->small_buf_q_size < PAGE_SIZE) qdev->small_buf_q_alloc_size = PAGE_SIZE; else qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; qdev->small_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, &qdev->small_buf_q_alloc_phy_addr); if (qdev->small_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); return -ENOMEM; } qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } kfree(qdev->lrg_buf); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); qdev->lrg_buf_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, qdev->small_buf_q_alloc_virt_addr, qdev->small_buf_q_alloc_phy_addr); qdev->small_buf_q_virt_addr = NULL; clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) { int i; struct bufq_addr_element *small_buf_q_entry; /* Currently we allocate on one of memory and use it for smallbuffers */ qdev->small_buf_total_size = (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * QL_SMALL_BUFFER_SIZE); qdev->small_buf_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_total_size, &qdev->small_buf_phy_addr); if (qdev->small_buf_virt_addr == NULL) { netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); return -ENOMEM; } qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); small_buf_q_entry = qdev->small_buf_q_virt_addr; /* Initialize the small buffer queue. */ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { small_buf_q_entry->addr_high = cpu_to_le32(qdev->small_buf_phy_addr_high); small_buf_q_entry->addr_low = cpu_to_le32(qdev->small_buf_phy_addr_low + (i * QL_SMALL_BUFFER_SIZE)); small_buf_q_entry++; } qdev->small_buf_index = 0; set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } if (qdev->small_buf_virt_addr != NULL) { pci_free_consistent(qdev->pdev, qdev->small_buf_total_size, qdev->small_buf_virt_addr, qdev->small_buf_phy_addr); qdev->small_buf_virt_addr = NULL; } } static void ql_free_large_buffers(struct ql3_adapter *qdev) { int i = 0; struct ql_rcv_buf_cb *lrg_buf_cb; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; if (lrg_buf_cb->skb) { dev_kfree_skb(lrg_buf_cb->skb); pci_unmap_single(qdev->pdev, dma_unmap_addr(lrg_buf_cb, mapaddr), dma_unmap_len(lrg_buf_cb, maplen), PCI_DMA_FROMDEVICE); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); } else { break; } } } static void ql_init_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; buf_addr_ele++; } qdev->lrg_buf_index = 0; qdev->lrg_buf_skb_check = 0; } static int ql_alloc_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct sk_buff *skb; dma_addr_t map; int err; for (i = 0; i < qdev->num_large_buffers; i++) { skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ netdev_err(qdev->ndev, "large buff alloc failed for %d bytes at index %d\n", qdev->lrg_buffer_len * 2, i); ql_free_large_buffers(qdev); return -ENOMEM; } else { lrg_buf_cb = &qdev->lrg_buf[i]; memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); ql_free_large_buffers(qdev); return -ENOMEM; } dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); } } return 0; } static void ql_free_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; tx_cb = &qdev->tx_buf[0]; for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { kfree(tx_cb->oal); tx_cb->oal = NULL; tx_cb++; } } static int ql_create_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { tx_cb = &qdev->tx_buf[i]; tx_cb->skb = NULL; tx_cb->queue_entry = req_q_curr; req_q_curr++; tx_cb->oal = kmalloc(512, GFP_KERNEL); if (tx_cb->oal == NULL) return -ENOMEM; } return 0; } static int ql_alloc_mem_resources(struct ql3_adapter *qdev) { if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = NORMAL_MTU_SIZE; } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { /* * Bigger buffers, so less of them. */ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = JUMBO_MTU_SIZE; } else { netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); return -ENOMEM; } qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; qdev->max_frame_size = (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; /* * First allocate a page of shared memory and use it for shadow * locations of Network Request Queue Consumer Address Register and * Network Completion Queue Producer Index Register */ qdev->shadow_reg_virt_addr = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->shadow_reg_phy_addr); if (qdev->shadow_reg_virt_addr != NULL) { qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; qdev->req_consumer_index_phy_addr_high = MS_64BITS(qdev->shadow_reg_phy_addr); qdev->req_consumer_index_phy_addr_low = LS_64BITS(qdev->shadow_reg_phy_addr); qdev->prsp_producer_index = (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); qdev->rsp_producer_index_phy_addr_high = qdev->req_consumer_index_phy_addr_high; qdev->rsp_producer_index_phy_addr_low = qdev->req_consumer_index_phy_addr_low + 8; } else { netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); return -ENOMEM; } if (ql_alloc_net_req_rsp_queues(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); goto err_req_rsp; } if (ql_alloc_buffer_queues(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); goto err_buffer_queues; } if (ql_alloc_small_buffers(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); goto err_small_buffers; } if (ql_alloc_large_buffers(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); goto err_small_buffers; } /* Initialize the large buffer queue. */ ql_init_large_buffers(qdev); if (ql_create_send_free_list(qdev)) goto err_free_list; qdev->rsp_current = qdev->rsp_q_virt_addr; return 0; err_free_list: ql_free_send_free_list(qdev); err_small_buffers: ql_free_buffer_queues(qdev); err_buffer_queues: ql_free_net_req_rsp_queues(qdev); err_req_rsp: pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); return -ENOMEM; } static void ql_free_mem_resources(struct ql3_adapter *qdev) { ql_free_send_free_list(qdev); ql_free_large_buffers(qdev); ql_free_small_buffers(qdev); ql_free_buffer_queues(qdev); ql_free_net_req_rsp_queues(qdev); if (qdev->shadow_reg_virt_addr != NULL) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); qdev->shadow_reg_virt_addr = NULL; } } static int ql_init_misc_registers(struct ql3_adapter *qdev) { struct ql3xxx_local_ram_registers __iomem *local_ram = (void __iomem *)qdev->mem_map_registers; if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 4)) return -1; ql_write_page2_reg(qdev, &local_ram->bufletSize, qdev->nvram_data.bufletSize); ql_write_page2_reg(qdev, &local_ram->maxBufletCount, qdev->nvram_data.bufletCount); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdLow, (qdev->nvram_data.tcpWindowThreshold25 << 16) | (qdev->nvram_data.tcpWindowThreshold0)); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdHigh, qdev->nvram_data.tcpWindowThreshold50); ql_write_page2_reg(qdev, &local_ram->ipHashTableBase, (qdev->nvram_data.ipHashTableBaseHi << 16) | qdev->nvram_data.ipHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->ipHashTableCount, qdev->nvram_data.ipHashTableSize); ql_write_page2_reg(qdev, &local_ram->tcpHashTableBase, (qdev->nvram_data.tcpHashTableBaseHi << 16) | qdev->nvram_data.tcpHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->tcpHashTableCount, qdev->nvram_data.tcpHashTableSize); ql_write_page2_reg(qdev, &local_ram->ncbBase, (qdev->nvram_data.ncbTableBaseHi << 16) | qdev->nvram_data.ncbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxNcbCount, qdev->nvram_data.ncbTableSize); ql_write_page2_reg(qdev, &local_ram->drbBase, (qdev->nvram_data.drbTableBaseHi << 16) | qdev->nvram_data.drbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxDrbCount, qdev->nvram_data.drbTableSize); ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); return 0; } static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; struct ql3xxx_host_memory_registers __iomem *hmem_regs = (void __iomem *)port_regs; u32 delay = 10; int status = 0; if (ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); /* Give the PHY time to come out of reset. */ mdelay(100); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_SDE | (ISP_SERIAL_PORT_IF_SDE << 16))); /* Request Queue Registers */ *((u32 *)(qdev->preq_consumer_index)) = 0; atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrHigh, qdev->req_consumer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrLow, qdev->req_consumer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrHigh, MS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrLow, LS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); /* Response Queue Registers */ *((__le16 *) (qdev->prsp_producer_index)) = 0; qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrHigh, qdev->rsp_producer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrLow, qdev->rsp_producer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrHigh, MS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrLow, LS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); /* Large Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrHigh, MS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrLow, LS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); ql_write_page1_reg(qdev, &hmem_regs->rxLargeBufferLength, qdev->lrg_buffer_len); /* Small Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrHigh, MS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrLow, LS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); ql_write_page1_reg(qdev, &hmem_regs->rxSmallBufferLength, QL_SMALL_BUFFER_SIZE); qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; qdev->small_buf_release_cnt = 8; qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; qdev->lrg_buf_release_cnt = 8; qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; qdev->small_buf_index = 0; qdev->lrg_buf_index = 0; qdev->lrg_buf_free_count = 0; qdev->lrg_buf_free_head = NULL; qdev->lrg_buf_free_tail = NULL; ql_write_common_reg(qdev, &port_regs->CommonRegs. rxSmallQProducerIndex, qdev->small_buf_q_producer_index); ql_write_common_reg(qdev, &port_regs->CommonRegs. rxLargeQProducerIndex, qdev->lrg_buf_q_producer_index); /* * Find out if the chip has already been initialized. If it has, then * we skip some of the initialization. */ clear_bit(QL_LINK_MASTER, &qdev->flags); value = ql_read_page0_reg(qdev, &port_regs->portStatus); if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ if (ql_init_misc_registers(qdev)) { status = -1; goto out; } value = qdev->nvram_data.tcpMaxWindowSize; ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 13)) { status = -1; goto out; } ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 16) | (INTERNAL_CHIP_SD | INTERNAL_CHIP_WE))); ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); } if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1MaxFrameLengthReg, qdev->max_frame_size); else ql_write_page0_reg(qdev, &port_regs->mac0MaxFrameLengthReg, qdev->max_frame_size); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { status = -1; goto out; } PHY_Setup(qdev); ql_init_scan_mode(qdev); ql_get_phy_owner(qdev); /* Load the MAC Configuration */ /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[2] << 24) | (qdev->ndev->dev_addr[3] << 16) | (qdev->ndev->dev_addr[4] << 8) | qdev->ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[0] << 8) | qdev->ndev->dev_addr[1])); /* Enable Primary MAC */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | MAC_ADDR_INDIRECT_PTR_REG_PE)); /* Clear Primary and Secondary IP addresses */ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | (qdev->mac_index << 2))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | ((qdev->mac_index << 2) + 1))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); /* Indicate Configuration Complete */ ql_write_page0_reg(qdev, &port_regs->portControl, ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); do { value = ql_read_page0_reg(qdev, &port_regs->portStatus); if (value & PORT_STATUS_IC) break; spin_unlock_irq(&qdev->hw_lock); msleep(500); spin_lock_irq(&qdev->hw_lock); } while (--delay); if (delay == 0) { netdev_err(qdev->ndev, "Hw Initialization timeout\n"); status = -1; goto out; } /* Enable Ethernet Function */ if (qdev->device_id == QL3032_DEVICE_ID) { value = (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | QL3032_PORT_CONTROL_ET); ql_write_page0_reg(qdev, &port_regs->functionControl, ((value << 16) | value)); } else { value = (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | PORT_CONTROL_HH); ql_write_page0_reg(qdev, &port_regs->portControl, ((value << 16) | value)); } out: return status; } /* * Caller holds hw_lock. */ static int ql_adapter_reset(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; int status = 0; u16 value; int max_wait_time; set_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_DONE, &qdev->flags); /* * Issue soft reset to chip. */ netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); /* Wait 3 seconds for reset to complete. */ netdev_printk(KERN_DEBUG, qdev->ndev, "Wait 10 milliseconds for reset to complete\n"); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) break; ssleep(1); } while ((--max_wait_time)); /* * Also, make sure that the Network Reset Interrupt bit has been * cleared after the soft reset has taken place. */ value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & ISP_CONTROL_RI) { netdev_printk(KERN_DEBUG, qdev->ndev, "clearing RI after reset\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } if (max_wait_time == 0) { /* Issue Force Soft Reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_FSR << 16) | ISP_CONTROL_FSR)); /* * Wait until the firmware tells us the Force Soft Reset is * done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) break; ssleep(1); } while ((--max_wait_time)); } if (max_wait_time == 0) status = 1; clear_bit(QL_RESET_ACTIVE, &qdev->flags); set_bit(QL_RESET_DONE, &qdev->flags); return status; } static void ql_set_mac_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value, port_status; u8 func_number; /* Get the function number */ value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_NET: qdev->mac_index = 0; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) set_bit(QL_LINK_OPTICAL, &qdev->flags); else clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN1_NET: qdev->mac_index = 1; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) set_bit(QL_LINK_OPTICAL, &qdev->flags); else clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN0_SCSI: case ISP_CONTROL_FN1_SCSI: default: netdev_printk(KERN_DEBUG, qdev->ndev, "Invalid function number, ispControlStatus = 0x%x\n", value); break; } qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; } static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; netdev_info(ndev, "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", qdev->pci_slot); netdev_info(ndev, "%s Interface\n", test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ netdev_info(ndev, "Bus interface is %s %s\n", ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), ((qdev->pci_x) ? "PCI-X" : "PCI")); netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", qdev->mem_map_registers); netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) { struct net_device *ndev = qdev->ndev; int retval = 0; netif_stop_queue(ndev); netif_carrier_off(ndev); clear_bit(QL_ADAPTER_UP, &qdev->flags); clear_bit(QL_LINK_MASTER, &qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } del_timer_sync(&qdev->adapter_timer); napi_disable(&qdev->napi); if (do_reset) { int soft_reset; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { soft_reset = ql_adapter_reset(qdev); if (soft_reset) { netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", qdev->index); } netdev_err(ndev, "Releasing driver lock via chip reset\n"); } else { netdev_err(ndev, "Could not acquire driver lock to do reset!\n"); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } ql_free_mem_resources(qdev); return retval; } static int ql_adapter_up(struct ql3_adapter *qdev) { struct net_device *ndev = qdev->ndev; int err; unsigned long irq_flags = IRQF_SHARED; unsigned long hw_flags; if (ql_alloc_mem_resources(qdev)) { netdev_err(ndev, "Unable to allocate buffers\n"); return -ENOMEM; } if (qdev->msi) { if (pci_enable_msi(qdev->pdev)) { netdev_err(ndev, "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); qdev->msi = 0; } else { netdev_info(ndev, "MSI Enabled...\n"); set_bit(QL_MSI_ENABLED, &qdev->flags); irq_flags &= ~IRQF_SHARED; } } err = request_irq(qdev->pdev->irq, ql3xxx_isr, irq_flags, ndev->name, ndev); if (err) { netdev_err(ndev, "Failed to reserve interrupt %d - already in use\n", qdev->pdev->irq); goto err_irq; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); err = ql_wait_for_drvr_lock(qdev); if (err) { err = ql_adapter_initialize(qdev); if (err) { netdev_err(ndev, "Unable to initialize adapter\n"); goto err_init; } netdev_err(ndev, "Releasing driver lock\n"); ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); } else { netdev_err(ndev, "Could not acquire driver lock\n"); goto err_lock; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; err_init: ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); err_lock: spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); free_irq(qdev->pdev->irq, ndev); err_irq: if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { netdev_info(ndev, "calling pci_disable_msi()\n"); clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } return err; } static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { netdev_err(qdev->ndev, "Driver up/down cycle failed, closing device\n"); rtnl_lock(); dev_close(qdev->ndev); rtnl_unlock(); return -1; } return 0; } static int ql3xxx_close(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(50); ql_adapter_down(qdev, QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return ql_adapter_up(qdev); } static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct sockaddr *addr = p; unsigned long hw_flags; if (netif_running(ndev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[2] << 24) | (ndev-> dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } static void ql3xxx_tx_timeout(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); netdev_err(ndev, "Resetting...\n"); /* * Stop the queues, we've got a problem. */ netif_stop_queue(ndev); /* * Wake up the worker to process this event. */ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); } static void ql_reset_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, reset_work.work); struct net_device *ndev = qdev->ndev; u32 value; struct ql_tx_buf_cb *tx_cb; int max_wait_time, i; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; unsigned long hw_flags; if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { clear_bit(QL_LINK_MASTER, &qdev->flags); /* * Loop through the active list and return the skb. */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { int j; tx_cb = &qdev->tx_buf[i]; if (tx_cb->skb) { netdev_printk(KERN_DEBUG, ndev, "Freeing lost SKB\n"); pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); for (j = 1; j < tx_cb->seg_count; j++) { pci_unmap_page(qdev->pdev, dma_unmap_addr(&tx_cb->map[j], mapaddr), dma_unmap_len(&tx_cb->map[j], maplen), PCI_DMA_TODEVICE); } dev_kfree_skb(tx_cb->skb); tx_cb->skb = NULL; } } netdev_err(ndev, "Clearing NRI after reset\n"); spin_lock_irqsave(&qdev->hw_lock, hw_flags); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); /* * Wait the for Soft Reset to Complete. */ max_wait_time = 10; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) { netdev_printk(KERN_DEBUG, ndev, "reset completed\n"); break; } if (value & ISP_CONTROL_RI) { netdev_printk(KERN_DEBUG, ndev, "clearing NRI after reset\n"); ql_write_common_reg(qdev, &port_regs-> CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ssleep(1); spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--max_wait_time); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); if (value & ISP_CONTROL_SR) { /* * Set the reset flags and clear the board again. * Nothing else to do... */ netdev_err(ndev, "Timed out waiting for reset to complete\n"); netdev_err(ndev, "Do a reset\n"); clear_bit(QL_RESET_PER_SCSI, &qdev->flags); clear_bit(QL_RESET_START, &qdev->flags); ql_cycle_adapter(qdev, QL_DO_RESET); return; } clear_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_PER_SCSI, &qdev->flags); clear_bit(QL_RESET_START, &qdev->flags); ql_cycle_adapter(qdev, QL_NO_RESET); } } static void ql_tx_timeout_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, tx_timeout_work.work); ql_cycle_adapter(qdev, QL_DO_RESET); } static void ql_get_board_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); if (value & PORT_STATUS_64) qdev->pci_width = 64; else qdev->pci_width = 32; if (value & PORT_STATUS_X) qdev->pci_x = 1; else qdev->pci_x = 0; qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); } static void ql3xxx_timer(unsigned long ptr) { struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); } static const struct net_device_ops ql3xxx_netdev_ops = { .ndo_open = ql3xxx_open, .ndo_start_xmit = ql3xxx_send, .ndo_stop = ql3xxx_close, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ql3xxx_set_mac_address, .ndo_tx_timeout = ql3xxx_tx_timeout, }; static int __devinit ql3xxx_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found; int uninitialized_var(pci_using_dac), err; err = pci_enable_device(pdev); if (err) { pr_err("%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { pci_using_dac = 0; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } if (err) { pr_err("%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) { err = -ENOMEM; goto err_out_free_regions; } SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); qdev = netdev_priv(ndev); qdev->index = cards_found; qdev->ndev = ndev; qdev->pdev = pdev; qdev->device_id = pci_entry->device; qdev->port_link_state = LS_DOWN; if (msi) qdev->msi = 1; qdev->msg_enable = netif_msg_init(debug, default_msg); if (pci_using_dac) ndev->features |= NETIF_F_HIGHDMA; if (qdev->device_id == QL3032_DEVICE_ID) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); if (!qdev->mem_map_registers) { pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -EIO; goto err_out_free_ndev; } spin_lock_init(&qdev->adapter_lock); spin_lock_init(&qdev->hw_lock); /* Set driver entry points */ ndev->netdev_ops = &ql3xxx_netdev_ops; SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->watchdog_timeo = 5 * HZ; netif_napi_add(ndev, &qdev->napi, ql_poll, 64); ndev->irq = pdev->irq; /* make sure the EEPROM is good */ if (ql_get_nvram_params(qdev)) { pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", __func__, qdev->index); err = -EIO; goto err_out_iounmap; } ql_set_mac_info(qdev); /* Validate and set parameters */ if (qdev->mac_index) { ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); } else { ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); } memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; /* Record PCI bus information. */ ql_get_board_info(qdev); /* * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ if (qdev->pci_x) pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); err = register_netdev(ndev); if (err) { pr_err("%s: cannot register net device\n", pci_name(pdev)); goto err_out_iounmap; } /* we're going to reset, so assume we have no link for now */ netif_carrier_off(ndev); netif_stop_queue(ndev); qdev->workqueue = create_singlethread_workqueue(ndev->name); INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); init_timer(&qdev->adapter_timer); qdev->adapter_timer.function = ql3xxx_timer; qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ qdev->adapter_timer.data = (unsigned long)qdev; if (!cards_found) { pr_alert("%s\n", DRV_STRING); pr_alert("Driver name: %s, Version: %s\n", DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); cards_found++; return 0; err_out_iounmap: iounmap(qdev->mem_map_registers); err_out_free_ndev: free_netdev(ndev); err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); err_out: return err; } static void __devexit ql3xxx_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql3_adapter *qdev = netdev_priv(ndev); unregister_netdev(ndev); ql_disable_interrupts(qdev); if (qdev->workqueue) { cancel_delayed_work(&qdev->reset_work); cancel_delayed_work(&qdev->tx_timeout_work); destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } iounmap(qdev->mem_map_registers); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(ndev); } static struct pci_driver ql3xxx_driver = { .name = DRV_NAME, .id_table = ql3xxx_pci_tbl, .probe = ql3xxx_probe, .remove = __devexit_p(ql3xxx_remove), }; static int __init ql3xxx_init_module(void) { return pci_register_driver(&ql3xxx_driver); } static void __exit ql3xxx_exit(void) { pci_unregister_driver(&ql3xxx_driver); } module_init(ql3xxx_init_module); module_exit(ql3xxx_exit);
gpl-2.0
faux123/xperia_8974
drivers/net/ethernet/brocade/bna/bna_enet.c
4951
52883
/* * Linux network driver for Brocade Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com */ #include "bna.h" static inline int ethport_can_be_up(struct bna_ethport *ethport) { int ready = 0; if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); else ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); return ready; } #define ethport_is_up ethport_can_be_up enum bna_ethport_event { ETHPORT_E_START = 1, ETHPORT_E_STOP = 2, ETHPORT_E_FAIL = 3, ETHPORT_E_UP = 4, ETHPORT_E_DOWN = 5, ETHPORT_E_FWRESP_UP_OK = 6, ETHPORT_E_FWRESP_DOWN = 7, ETHPORT_E_FWRESP_UP_FAIL = 8, }; enum bna_enet_event { ENET_E_START = 1, ENET_E_STOP = 2, ENET_E_FAIL = 3, ENET_E_PAUSE_CFG = 4, ENET_E_MTU_CFG = 5, ENET_E_FWRESP_PAUSE = 6, ENET_E_CHLD_STOPPED = 7, }; enum bna_ioceth_event { IOCETH_E_ENABLE = 1, IOCETH_E_DISABLE = 2, IOCETH_E_IOC_RESET = 3, IOCETH_E_IOC_FAILED = 4, IOCETH_E_IOC_READY = 5, IOCETH_E_ENET_ATTR_RESP = 6, IOCETH_E_ENET_STOPPED = 7, IOCETH_E_IOC_DISABLED = 8, }; #define bna_stats_copy(_name, _type) \ do { \ count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \ stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \ stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \ for (i = 0; i < count; i++) \ stats_dst[i] = be64_to_cpu(stats_src[i]); \ } while (0) \ /* * FW response handlers */ static void bna_bfi_ethport_enable_aen(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; if (ethport_can_be_up(ethport)) bfa_fsm_send_event(ethport, ETHPORT_E_UP); } static void bna_bfi_ethport_disable_aen(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { int ethport_up = ethport_is_up(ethport); ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; if (ethport_up) bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); } static void bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_enable_req *admin_req = &ethport->bfi_enet_cmd.admin_req; struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; switch (admin_req->enable) { case BNA_STATUS_T_ENABLED: if (rsp->error == BFI_ENET_CMD_OK) bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); else { ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); } break; case BNA_STATUS_T_DISABLED: bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); ethport->link_status = BNA_LINK_DOWN; ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); break; } } static void bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_diag_lb_req *diag_lb_req = &ethport->bfi_enet_cmd.lpbk_req; struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; switch (diag_lb_req->enable) { case BNA_STATUS_T_ENABLED: if (rsp->error == BFI_ENET_CMD_OK) bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); else { ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); } break; case BNA_STATUS_T_DISABLED: bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); break; } } static void bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr) { bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE); } static void bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr; /** * Store only if not set earlier, since BNAD can override the HW * attributes */ if (!ioceth->attr.fw_query_complete) { ioceth->attr.num_txq = ntohl(rsp->max_cfg); ioceth->attr.num_rxp = ntohl(rsp->max_cfg); ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; ioceth->attr.max_rit_size = ntohl(rsp->rit_size); ioceth->attr.fw_query_complete = true; } bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP); } static void bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; u64 *stats_src; u64 *stats_dst; u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask); u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask); int count; int i; bna_stats_copy(mac, mac); bna_stats_copy(bpc, bpc); bna_stats_copy(rad, rad); bna_stats_copy(rlb, rad); bna_stats_copy(fc_rx, fc_rx); bna_stats_copy(fc_tx, fc_tx); stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]); /* Copy Rxf stats to SW area, scatter them while copying */ for (i = 0; i < BFI_ENET_CFG_MAX; i++) { stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]); memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf)); if (rx_enet_mask & ((u32)(1 << i))) { int k; count = sizeof(struct bfi_enet_stats_rxf) / sizeof(u64); for (k = 0; k < count; k++) { stats_dst[k] = be64_to_cpu(*stats_src); stats_src++; } } } /* Copy Txf stats to SW area, scatter them while copying */ for (i = 0; i < BFI_ENET_CFG_MAX; i++) { stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]); memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf)); if (tx_enet_mask & ((u32)(1 << i))) { int k; count = sizeof(struct bfi_enet_stats_txf) / sizeof(u64); for (k = 0; k < count; k++) { stats_dst[k] = be64_to_cpu(*stats_src); stats_src++; } } } bna->stats_mod.stats_get_busy = false; bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats); } static void bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { ethport->link_status = BNA_LINK_UP; /* Dispatch events */ ethport->link_cbfn(ethport->bna->bnad, ethport->link_status); } static void bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport, struct bfi_msgq_mhdr *msghdr) { ethport->link_status = BNA_LINK_DOWN; /* Dispatch events */ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); } static void bna_err_handler(struct bna *bna, u32 intr_status) { if (BNA_IS_HALT_INTR(bna, intr_status)) bna_halt_clear(bna); bfa_nw_ioc_error_isr(&bna->ioceth.ioc); } void bna_mbox_handler(struct bna *bna, u32 intr_status) { if (BNA_IS_ERR_INTR(bna, intr_status)) { bna_err_handler(bna, intr_status); return; } if (BNA_IS_MBOX_INTR(bna, intr_status)) bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc); } static void bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) { struct bna *bna = (struct bna *)arg; struct bna_tx *tx; struct bna_rx *rx; switch (msghdr->msg_id) { case BFI_ENET_I2H_RX_CFG_SET_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) bna_bfi_rx_enet_start_rsp(rx, msghdr); break; case BFI_ENET_I2H_RX_CFG_CLR_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) bna_bfi_rx_enet_stop_rsp(rx, msghdr); break; case BFI_ENET_I2H_RIT_CFG_RSP: case BFI_ENET_I2H_RSS_CFG_RSP: case BFI_ENET_I2H_RSS_ENABLE_RSP: case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: case BFI_ENET_I2H_RX_DEFAULT_RSP: case BFI_ENET_I2H_MAC_UCAST_SET_RSP: case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: case BFI_ENET_I2H_MAC_MCAST_DEL_RSP: case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP: case BFI_ENET_I2H_RX_VLAN_SET_RSP: case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); break; case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr); break; case BFI_ENET_I2H_TX_CFG_SET_RSP: bna_tx_from_rid(bna, msghdr->enet_id, tx); if (tx) bna_bfi_tx_enet_start_rsp(tx, msghdr); break; case BFI_ENET_I2H_TX_CFG_CLR_RSP: bna_tx_from_rid(bna, msghdr->enet_id, tx); if (tx) bna_bfi_tx_enet_stop_rsp(tx, msghdr); break; case BFI_ENET_I2H_PORT_ADMIN_RSP: bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr); break; case BFI_ENET_I2H_DIAG_LOOPBACK_RSP: bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr); break; case BFI_ENET_I2H_SET_PAUSE_RSP: bna_bfi_pause_set_rsp(&bna->enet, msghdr); break; case BFI_ENET_I2H_GET_ATTR_RSP: bna_bfi_attr_get_rsp(&bna->ioceth, msghdr); break; case BFI_ENET_I2H_STATS_GET_RSP: bna_bfi_stats_get_rsp(bna, msghdr); break; case BFI_ENET_I2H_STATS_CLR_RSP: /* No-op */ break; case BFI_ENET_I2H_LINK_UP_AEN: bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr); break; case BFI_ENET_I2H_LINK_DOWN_AEN: bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr); break; case BFI_ENET_I2H_PORT_ENABLE_AEN: bna_bfi_ethport_enable_aen(&bna->ethport, msghdr); break; case BFI_ENET_I2H_PORT_DISABLE_AEN: bna_bfi_ethport_disable_aen(&bna->ethport, msghdr); break; case BFI_ENET_I2H_BW_UPDATE_AEN: bna_bfi_bw_update_aen(&bna->tx_mod); break; default: break; } } /** * ETHPORT */ #define call_ethport_stop_cbfn(_ethport) \ do { \ if ((_ethport)->stop_cbfn) { \ void (*cbfn)(struct bna_enet *); \ cbfn = (_ethport)->stop_cbfn; \ (_ethport)->stop_cbfn = NULL; \ cbfn(&(_ethport)->bna->enet); \ } \ } while (0) #define call_ethport_adminup_cbfn(ethport, status) \ do { \ if ((ethport)->adminup_cbfn) { \ void (*cbfn)(struct bnad *, enum bna_cb_status); \ cbfn = (ethport)->adminup_cbfn; \ (ethport)->adminup_cbfn = NULL; \ cbfn((ethport)->bna->bnad, status); \ } \ } while (0) static void bna_bfi_ethport_admin_up(struct bna_ethport *ethport) { struct bfi_enet_enable_req *admin_up_req = &ethport->bfi_enet_cmd.admin_req; bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET, BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); admin_up_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); admin_up_req->enable = BNA_STATUS_T_ENABLED; bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_enable_req), &admin_up_req->mh); bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd); } static void bna_bfi_ethport_admin_down(struct bna_ethport *ethport) { struct bfi_enet_enable_req *admin_down_req = &ethport->bfi_enet_cmd.admin_req; bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET, BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); admin_down_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); admin_down_req->enable = BNA_STATUS_T_DISABLED; bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_enable_req), &admin_down_req->mh); bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd); } static void bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport) { struct bfi_enet_diag_lb_req *lpbk_up_req = &ethport->bfi_enet_cmd.lpbk_req; bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET, BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); lpbk_up_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); lpbk_up_req->mode = (ethport->bna->enet.type == BNA_ENET_T_LOOPBACK_INTERNAL) ? BFI_ENET_DIAG_LB_OPMODE_EXT : BFI_ENET_DIAG_LB_OPMODE_CBL; lpbk_up_req->enable = BNA_STATUS_T_ENABLED; bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh); bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd); } static void bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport) { struct bfi_enet_diag_lb_req *lpbk_down_req = &ethport->bfi_enet_cmd.lpbk_req; bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET, BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); lpbk_down_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); lpbk_down_req->enable = BNA_STATUS_T_DISABLED; bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh); bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd); } static void bna_bfi_ethport_up(struct bna_ethport *ethport) { if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) bna_bfi_ethport_admin_up(ethport); else bna_bfi_ethport_lpbk_up(ethport); } static void bna_bfi_ethport_down(struct bna_ethport *ethport) { if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) bna_bfi_ethport_admin_down(ethport); else bna_bfi_ethport_lpbk_down(ethport); } bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport, enum bna_ethport_event); bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport, enum bna_ethport_event); bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport, enum bna_ethport_event); bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport, enum bna_ethport_event); bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport, enum bna_ethport_event); bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport, enum bna_ethport_event); static void bna_ethport_sm_stopped_entry(struct bna_ethport *ethport) { call_ethport_stop_cbfn(ethport); } static void bna_ethport_sm_stopped(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_START: bfa_fsm_set_state(ethport, bna_ethport_sm_down); break; case ETHPORT_E_STOP: call_ethport_stop_cbfn(ethport); break; case ETHPORT_E_FAIL: /* No-op */ break; case ETHPORT_E_DOWN: /* This event is received due to Rx objects failing */ /* No-op */ break; default: bfa_sm_fault(event); } } static void bna_ethport_sm_down_entry(struct bna_ethport *ethport) { } static void bna_ethport_sm_down(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_STOP: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_FAIL: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_UP: bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); bna_bfi_ethport_up(ethport); break; default: bfa_sm_fault(event); } } static void bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport) { } static void bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_STOP: bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); break; case ETHPORT_E_FAIL: call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_DOWN: call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT); bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); break; case ETHPORT_E_FWRESP_UP_OK: call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS); bfa_fsm_set_state(ethport, bna_ethport_sm_up); break; case ETHPORT_E_FWRESP_UP_FAIL: call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); bfa_fsm_set_state(ethport, bna_ethport_sm_down); break; case ETHPORT_E_FWRESP_DOWN: /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */ bna_bfi_ethport_up(ethport); break; default: bfa_sm_fault(event); } } static void bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport) { /** * NOTE: Do not call bna_bfi_ethport_down() here. That will over step * mbox due to up_resp_wait -> down_resp_wait transition on event * ETHPORT_E_DOWN */ } static void bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_STOP: bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); break; case ETHPORT_E_FAIL: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_UP: bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); break; case ETHPORT_E_FWRESP_UP_OK: /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */ bna_bfi_ethport_down(ethport); break; case ETHPORT_E_FWRESP_UP_FAIL: case ETHPORT_E_FWRESP_DOWN: bfa_fsm_set_state(ethport, bna_ethport_sm_down); break; default: bfa_sm_fault(event); } } static void bna_ethport_sm_up_entry(struct bna_ethport *ethport) { } static void bna_ethport_sm_up(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_STOP: bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); bna_bfi_ethport_down(ethport); break; case ETHPORT_E_FAIL: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_DOWN: bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); bna_bfi_ethport_down(ethport); break; default: bfa_sm_fault(event); } } static void bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport) { } static void bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport, enum bna_ethport_event event) { switch (event) { case ETHPORT_E_FAIL: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; case ETHPORT_E_DOWN: /** * This event is received due to Rx objects stopping in * parallel to ethport */ /* No-op */ break; case ETHPORT_E_FWRESP_UP_OK: /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */ bna_bfi_ethport_down(ethport); break; case ETHPORT_E_FWRESP_UP_FAIL: case ETHPORT_E_FWRESP_DOWN: bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); break; default: bfa_sm_fault(event); } } static void bna_ethport_init(struct bna_ethport *ethport, struct bna *bna) { ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED); ethport->bna = bna; ethport->link_status = BNA_LINK_DOWN; ethport->link_cbfn = bnad_cb_ethport_link_status; ethport->rx_started_count = 0; ethport->stop_cbfn = NULL; ethport->adminup_cbfn = NULL; bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); } static void bna_ethport_uninit(struct bna_ethport *ethport) { ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; ethport->bna = NULL; } static void bna_ethport_start(struct bna_ethport *ethport) { bfa_fsm_send_event(ethport, ETHPORT_E_START); } static void bna_enet_cb_ethport_stopped(struct bna_enet *enet) { bfa_wc_down(&enet->chld_stop_wc); } static void bna_ethport_stop(struct bna_ethport *ethport) { ethport->stop_cbfn = bna_enet_cb_ethport_stopped; bfa_fsm_send_event(ethport, ETHPORT_E_STOP); } static void bna_ethport_fail(struct bna_ethport *ethport) { /* Reset the physical port status to enabled */ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; if (ethport->link_status != BNA_LINK_DOWN) { ethport->link_status = BNA_LINK_DOWN; ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); } bfa_fsm_send_event(ethport, ETHPORT_E_FAIL); } /* Should be called only when ethport is disabled */ void bna_ethport_cb_rx_started(struct bna_ethport *ethport) { ethport->rx_started_count++; if (ethport->rx_started_count == 1) { ethport->flags |= BNA_ETHPORT_F_RX_STARTED; if (ethport_can_be_up(ethport)) bfa_fsm_send_event(ethport, ETHPORT_E_UP); } } void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) { int ethport_up = ethport_is_up(ethport); ethport->rx_started_count--; if (ethport->rx_started_count == 0) { ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED; if (ethport_up) bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); } } /** * ENET */ #define bna_enet_chld_start(enet) \ do { \ enum bna_tx_type tx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ enum bna_rx_type rx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ bna_ethport_start(&(enet)->bna->ethport); \ bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ } while (0) #define bna_enet_chld_stop(enet) \ do { \ enum bna_tx_type tx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ enum bna_rx_type rx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ bfa_wc_up(&(enet)->chld_stop_wc); \ bna_ethport_stop(&(enet)->bna->ethport); \ bfa_wc_up(&(enet)->chld_stop_wc); \ bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \ bfa_wc_up(&(enet)->chld_stop_wc); \ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ bfa_wc_wait(&(enet)->chld_stop_wc); \ } while (0) #define bna_enet_chld_fail(enet) \ do { \ bna_ethport_fail(&(enet)->bna->ethport); \ bna_tx_mod_fail(&(enet)->bna->tx_mod); \ bna_rx_mod_fail(&(enet)->bna->rx_mod); \ } while (0) #define bna_enet_rx_start(enet) \ do { \ enum bna_rx_type rx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ } while (0) #define bna_enet_rx_stop(enet) \ do { \ enum bna_rx_type rx_type = \ ((enet)->type == BNA_ENET_T_REGULAR) ? \ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ bfa_wc_up(&(enet)->chld_stop_wc); \ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ bfa_wc_wait(&(enet)->chld_stop_wc); \ } while (0) #define call_enet_stop_cbfn(enet) \ do { \ if ((enet)->stop_cbfn) { \ void (*cbfn)(void *); \ void *cbarg; \ cbfn = (enet)->stop_cbfn; \ cbarg = (enet)->stop_cbarg; \ (enet)->stop_cbfn = NULL; \ (enet)->stop_cbarg = NULL; \ cbfn(cbarg); \ } \ } while (0) #define call_enet_pause_cbfn(enet) \ do { \ if ((enet)->pause_cbfn) { \ void (*cbfn)(struct bnad *); \ cbfn = (enet)->pause_cbfn; \ (enet)->pause_cbfn = NULL; \ cbfn((enet)->bna->bnad); \ } \ } while (0) #define call_enet_mtu_cbfn(enet) \ do { \ if ((enet)->mtu_cbfn) { \ void (*cbfn)(struct bnad *); \ cbfn = (enet)->mtu_cbfn; \ (enet)->mtu_cbfn = NULL; \ cbfn((enet)->bna->bnad); \ } \ } while (0) static void bna_enet_cb_chld_stopped(void *arg); static void bna_bfi_pause_set(struct bna_enet *enet); bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, started, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet, enum bna_enet_event); bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet, enum bna_enet_event); static void bna_enet_sm_stopped_entry(struct bna_enet *enet) { call_enet_pause_cbfn(enet); call_enet_mtu_cbfn(enet); call_enet_stop_cbfn(enet); } static void bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_START: bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait); break; case ENET_E_STOP: call_enet_stop_cbfn(enet); break; case ENET_E_FAIL: /* No-op */ break; case ENET_E_PAUSE_CFG: call_enet_pause_cbfn(enet); break; case ENET_E_MTU_CFG: call_enet_mtu_cbfn(enet); break; case ENET_E_CHLD_STOPPED: /** * This event is received due to Ethport, Tx and Rx objects * failing */ /* No-op */ break; default: bfa_sm_fault(event); } } static void bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet) { bna_bfi_pause_set(enet); } static void bna_enet_sm_pause_init_wait(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_STOP: enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait); break; case ENET_E_FAIL: enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; bfa_fsm_set_state(enet, bna_enet_sm_stopped); break; case ENET_E_PAUSE_CFG: enet->flags |= BNA_ENET_F_PAUSE_CHANGED; break; case ENET_E_MTU_CFG: /* No-op */ break; case ENET_E_FWRESP_PAUSE: if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; bna_bfi_pause_set(enet); } else { bfa_fsm_set_state(enet, bna_enet_sm_started); bna_enet_chld_start(enet); } break; default: bfa_sm_fault(event); } } static void bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; } static void bna_enet_sm_last_resp_wait(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_FAIL: case ENET_E_FWRESP_PAUSE: bfa_fsm_set_state(enet, bna_enet_sm_stopped); break; default: bfa_sm_fault(event); } } static void bna_enet_sm_started_entry(struct bna_enet *enet) { /** * NOTE: Do not call bna_enet_chld_start() here, since it will be * inadvertently called during cfg_wait->started transition as well */ call_enet_pause_cbfn(enet); call_enet_mtu_cbfn(enet); } static void bna_enet_sm_started(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_STOP: bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); break; case ENET_E_FAIL: bfa_fsm_set_state(enet, bna_enet_sm_stopped); bna_enet_chld_fail(enet); break; case ENET_E_PAUSE_CFG: bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); bna_bfi_pause_set(enet); break; case ENET_E_MTU_CFG: bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); bna_enet_rx_stop(enet); break; default: bfa_sm_fault(event); } } static void bna_enet_sm_cfg_wait_entry(struct bna_enet *enet) { } static void bna_enet_sm_cfg_wait(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_STOP: enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; enet->flags &= ~BNA_ENET_F_MTU_CHANGED; bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait); break; case ENET_E_FAIL: enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; enet->flags &= ~BNA_ENET_F_MTU_CHANGED; bfa_fsm_set_state(enet, bna_enet_sm_stopped); bna_enet_chld_fail(enet); break; case ENET_E_PAUSE_CFG: enet->flags |= BNA_ENET_F_PAUSE_CHANGED; break; case ENET_E_MTU_CFG: enet->flags |= BNA_ENET_F_MTU_CHANGED; break; case ENET_E_CHLD_STOPPED: bna_enet_rx_start(enet); /* Fall through */ case ENET_E_FWRESP_PAUSE: if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; bna_bfi_pause_set(enet); } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) { enet->flags &= ~BNA_ENET_F_MTU_CHANGED; bna_enet_rx_stop(enet); } else { bfa_fsm_set_state(enet, bna_enet_sm_started); } break; default: bfa_sm_fault(event); } } static void bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; enet->flags &= ~BNA_ENET_F_MTU_CHANGED; } static void bna_enet_sm_cfg_stop_wait(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_FAIL: bfa_fsm_set_state(enet, bna_enet_sm_stopped); bna_enet_chld_fail(enet); break; case ENET_E_FWRESP_PAUSE: case ENET_E_CHLD_STOPPED: bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); break; default: bfa_sm_fault(event); } } static void bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet) { bna_enet_chld_stop(enet); } static void bna_enet_sm_chld_stop_wait(struct bna_enet *enet, enum bna_enet_event event) { switch (event) { case ENET_E_FAIL: bfa_fsm_set_state(enet, bna_enet_sm_stopped); bna_enet_chld_fail(enet); break; case ENET_E_CHLD_STOPPED: bfa_fsm_set_state(enet, bna_enet_sm_stopped); break; default: bfa_sm_fault(event); } } static void bna_bfi_pause_set(struct bna_enet *enet) { struct bfi_enet_set_pause_req *pause_req = &enet->pause_req; bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET, BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0); pause_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req))); pause_req->tx_pause = enet->pause_config.tx_pause; pause_req->rx_pause = enet->pause_config.rx_pause; bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_set_pause_req), &pause_req->mh); bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd); } static void bna_enet_cb_chld_stopped(void *arg) { struct bna_enet *enet = (struct bna_enet *)arg; bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED); } static void bna_enet_init(struct bna_enet *enet, struct bna *bna) { enet->bna = bna; enet->flags = 0; enet->mtu = 0; enet->type = BNA_ENET_T_REGULAR; enet->stop_cbfn = NULL; enet->stop_cbarg = NULL; enet->pause_cbfn = NULL; enet->mtu_cbfn = NULL; bfa_fsm_set_state(enet, bna_enet_sm_stopped); } static void bna_enet_uninit(struct bna_enet *enet) { enet->flags = 0; enet->bna = NULL; } static void bna_enet_start(struct bna_enet *enet) { enet->flags |= BNA_ENET_F_IOCETH_READY; if (enet->flags & BNA_ENET_F_ENABLED) bfa_fsm_send_event(enet, ENET_E_START); } static void bna_ioceth_cb_enet_stopped(void *arg) { struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED); } static void bna_enet_stop(struct bna_enet *enet) { enet->stop_cbfn = bna_ioceth_cb_enet_stopped; enet->stop_cbarg = &enet->bna->ioceth; enet->flags &= ~BNA_ENET_F_IOCETH_READY; bfa_fsm_send_event(enet, ENET_E_STOP); } static void bna_enet_fail(struct bna_enet *enet) { enet->flags &= ~BNA_ENET_F_IOCETH_READY; bfa_fsm_send_event(enet, ENET_E_FAIL); } void bna_enet_cb_tx_stopped(struct bna_enet *enet) { bfa_wc_down(&enet->chld_stop_wc); } void bna_enet_cb_rx_stopped(struct bna_enet *enet) { bfa_wc_down(&enet->chld_stop_wc); } int bna_enet_mtu_get(struct bna_enet *enet) { return enet->mtu; } void bna_enet_enable(struct bna_enet *enet) { if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped) return; enet->flags |= BNA_ENET_F_ENABLED; if (enet->flags & BNA_ENET_F_IOCETH_READY) bfa_fsm_send_event(enet, ENET_E_START); } void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, void (*cbfn)(void *)) { if (type == BNA_SOFT_CLEANUP) { (*cbfn)(enet->bna->bnad); return; } enet->stop_cbfn = cbfn; enet->stop_cbarg = enet->bna->bnad; enet->flags &= ~BNA_ENET_F_ENABLED; bfa_fsm_send_event(enet, ENET_E_STOP); } void bna_enet_pause_config(struct bna_enet *enet, struct bna_pause_config *pause_config, void (*cbfn)(struct bnad *)) { enet->pause_config = *pause_config; enet->pause_cbfn = cbfn; bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG); } void bna_enet_mtu_set(struct bna_enet *enet, int mtu, void (*cbfn)(struct bnad *)) { enet->mtu = mtu; enet->mtu_cbfn = cbfn; bfa_fsm_send_event(enet, ENET_E_MTU_CFG); } void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac) { *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); } /** * IOCETH */ #define enable_mbox_intr(_ioceth) \ do { \ u32 intr_status; \ bna_intr_status_get((_ioceth)->bna, intr_status); \ bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \ bna_mbox_intr_enable((_ioceth)->bna); \ } while (0) #define disable_mbox_intr(_ioceth) \ do { \ bna_mbox_intr_disable((_ioceth)->bna); \ bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \ } while (0) #define call_ioceth_stop_cbfn(_ioceth) \ do { \ if ((_ioceth)->stop_cbfn) { \ void (*cbfn)(struct bnad *); \ struct bnad *cbarg; \ cbfn = (_ioceth)->stop_cbfn; \ cbarg = (_ioceth)->stop_cbarg; \ (_ioceth)->stop_cbfn = NULL; \ (_ioceth)->stop_cbarg = NULL; \ cbfn(cbarg); \ } \ } while (0) #define bna_stats_mod_uninit(_stats_mod) \ do { \ } while (0) #define bna_stats_mod_start(_stats_mod) \ do { \ (_stats_mod)->ioc_ready = true; \ } while (0) #define bna_stats_mod_stop(_stats_mod) \ do { \ (_stats_mod)->ioc_ready = false; \ } while (0) #define bna_stats_mod_fail(_stats_mod) \ do { \ (_stats_mod)->ioc_ready = false; \ (_stats_mod)->stats_get_busy = false; \ (_stats_mod)->stats_clr_busy = false; \ } while (0) static void bna_bfi_attr_get(struct bna_ioceth *ioceth); bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth, enum bna_ioceth_event); bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth, enum bna_ioceth_event); static void bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth) { call_ioceth_stop_cbfn(ioceth); } static void bna_ioceth_sm_stopped(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_ENABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); bfa_nw_ioc_enable(&ioceth->ioc); break; case IOCETH_E_DISABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); break; case IOCETH_E_IOC_RESET: enable_mbox_intr(ioceth); break; case IOCETH_E_IOC_FAILED: disable_mbox_intr(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth) { /** * Do not call bfa_nw_ioc_enable() here. It must be called in the * previous state due to failed -> ioc_ready_wait transition. */ } static void bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_DISABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); bfa_nw_ioc_disable(&ioceth->ioc); break; case IOCETH_E_IOC_RESET: enable_mbox_intr(ioceth); break; case IOCETH_E_IOC_FAILED: disable_mbox_intr(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); break; case IOCETH_E_IOC_READY: bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth) { bna_bfi_attr_get(ioceth); } static void bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_DISABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait); break; case IOCETH_E_IOC_FAILED: disable_mbox_intr(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); break; case IOCETH_E_ENET_ATTR_RESP: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth) { bna_enet_start(&ioceth->bna->enet); bna_stats_mod_start(&ioceth->bna->stats_mod); bnad_cb_ioceth_ready(ioceth->bna->bnad); } static void bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_DISABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait); break; case IOCETH_E_IOC_FAILED: disable_mbox_intr(ioceth); bna_enet_fail(&ioceth->bna->enet); bna_stats_mod_fail(&ioceth->bna->stats_mod); bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth) { } static void bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_IOC_FAILED: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); disable_mbox_intr(ioceth); bfa_nw_ioc_disable(&ioceth->ioc); break; case IOCETH_E_ENET_ATTR_RESP: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); bfa_nw_ioc_disable(&ioceth->ioc); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth) { bna_stats_mod_stop(&ioceth->bna->stats_mod); bna_enet_stop(&ioceth->bna->enet); } static void bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_IOC_FAILED: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); disable_mbox_intr(ioceth); bna_enet_fail(&ioceth->bna->enet); bna_stats_mod_fail(&ioceth->bna->stats_mod); bfa_nw_ioc_disable(&ioceth->ioc); break; case IOCETH_E_ENET_STOPPED: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); bfa_nw_ioc_disable(&ioceth->ioc); break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth) { } static void bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_IOC_DISABLED: disable_mbox_intr(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); break; case IOCETH_E_ENET_STOPPED: /* This event is received due to enet failing */ /* No-op */ break; default: bfa_sm_fault(event); } } static void bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth) { bnad_cb_ioceth_failed(ioceth->bna->bnad); } static void bna_ioceth_sm_failed(struct bna_ioceth *ioceth, enum bna_ioceth_event event) { switch (event) { case IOCETH_E_DISABLE: bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); bfa_nw_ioc_disable(&ioceth->ioc); break; case IOCETH_E_IOC_RESET: enable_mbox_intr(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); break; case IOCETH_E_IOC_FAILED: break; default: bfa_sm_fault(event); } } static void bna_bfi_attr_get(struct bna_ioceth *ioceth) { struct bfi_enet_attr_req *attr_req = &ioceth->attr_req; bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET, BFI_ENET_H2I_GET_ATTR_REQ, 0, 0); attr_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req))); bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_attr_req), &attr_req->mh); bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd); } /* IOC callback functions */ static void bna_cb_ioceth_enable(void *arg, enum bfa_status error) { struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; if (error) bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); else bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY); } static void bna_cb_ioceth_disable(void *arg) { struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED); } static void bna_cb_ioceth_hbfail(void *arg) { struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); } static void bna_cb_ioceth_reset(void *arg) { struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET); } static struct bfa_ioc_cbfn bna_ioceth_cbfn = { bna_cb_ioceth_enable, bna_cb_ioceth_disable, bna_cb_ioceth_hbfail, bna_cb_ioceth_reset }; static void bna_attr_init(struct bna_ioceth *ioceth) { ioceth->attr.num_txq = BFI_ENET_DEF_TXQ; ioceth->attr.num_rxp = BFI_ENET_DEF_RXP; ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM; ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ; ioceth->attr.fw_query_complete = false; } static void bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, struct bna_res_info *res_info) { u64 dma; u8 *kva; ioceth->bna = bna; /** * Attach IOC and claim: * 1. DMA memory for IOC attributes * 2. Kernel memory for FW trace */ bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn); bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH); BNA_GET_DMA_ADDR( &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva; bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva); /** * Attach common modules (Diag, SFP, CEE, Port) and claim respective * DMA memory. */ BNA_GET_DMA_ADDR( &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna); bfa_nw_cee_mem_claim(&bna->cee, kva, dma); kva += bfa_nw_cee_meminfo(); dma += bfa_nw_cee_meminfo(); bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna); bfa_nw_flash_memclaim(&bna->flash, kva, dma); kva += bfa_nw_flash_meminfo(); dma += bfa_nw_flash_meminfo(); bfa_msgq_attach(&bna->msgq, &ioceth->ioc); bfa_msgq_memclaim(&bna->msgq, kva, dma); bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); kva += bfa_msgq_meminfo(); dma += bfa_msgq_meminfo(); ioceth->stop_cbfn = NULL; ioceth->stop_cbarg = NULL; bna_attr_init(ioceth); bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); } static void bna_ioceth_uninit(struct bna_ioceth *ioceth) { bfa_nw_ioc_detach(&ioceth->ioc); ioceth->bna = NULL; } void bna_ioceth_enable(struct bna_ioceth *ioceth) { if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) { bnad_cb_ioceth_ready(ioceth->bna->bnad); return; } if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped) bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE); } void bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type) { if (type == BNA_SOFT_CLEANUP) { bnad_cb_ioceth_disabled(ioceth->bna->bnad); return; } ioceth->stop_cbfn = bnad_cb_ioceth_disabled; ioceth->stop_cbarg = ioceth->bna->bnad; bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE); } static void bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, struct bna_res_info *res_info) { int i; ucam_mod->ucmac = (struct bna_mac *) res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva; INIT_LIST_HEAD(&ucam_mod->free_q); for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) { bfa_q_qe_init(&ucam_mod->ucmac[i].qe); list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); } ucam_mod->bna = bna; } static void bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) { struct list_head *qe; int i = 0; list_for_each(qe, &ucam_mod->free_q) i++; ucam_mod->bna = NULL; } static void bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, struct bna_res_info *res_info) { int i; mcam_mod->mcmac = (struct bna_mac *) res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva; INIT_LIST_HEAD(&mcam_mod->free_q); for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { bfa_q_qe_init(&mcam_mod->mcmac[i].qe); list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); } mcam_mod->mchandle = (struct bna_mcam_handle *) res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva; INIT_LIST_HEAD(&mcam_mod->free_handle_q); for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { bfa_q_qe_init(&mcam_mod->mchandle[i].qe); list_add_tail(&mcam_mod->mchandle[i].qe, &mcam_mod->free_handle_q); } mcam_mod->bna = bna; } static void bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) { struct list_head *qe; int i; i = 0; list_for_each(qe, &mcam_mod->free_q) i++; i = 0; list_for_each(qe, &mcam_mod->free_handle_q) i++; mcam_mod->bna = NULL; } static void bna_bfi_stats_get(struct bna *bna) { struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; bna->stats_mod.stats_get_busy = true; bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET, BFI_ENET_H2I_STATS_GET_REQ, 0, 0); stats_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req))); stats_req->stats_mask = htons(BFI_ENET_STATS_ALL); stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask); stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask); stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb; stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb; bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL, sizeof(struct bfi_enet_stats_req), &stats_req->mh); bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd); } void bna_res_req(struct bna_res_info *res_info) { /* DMA memory for COMMON_MODULE */ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( (bfa_nw_cee_meminfo() + bfa_nw_flash_meminfo() + bfa_msgq_meminfo()), PAGE_SIZE); /* DMA memory for retrieving IOC attributes */ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); /* Virtual memory for retreiving fw_trc */ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN; /* DMA memory for retreiving stats */ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA; res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len = ALIGN(sizeof(struct bfi_enet_stats), PAGE_SIZE); } void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) { struct bna_attr *attr = &bna->ioceth.attr; /* Virtual memory for Tx objects - stored by Tx module */ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len = attr->num_txq * sizeof(struct bna_tx); /* Virtual memory for TxQ - stored by Tx module */ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len = attr->num_txq * sizeof(struct bna_txq); /* Virtual memory for Rx objects - stored by Rx module */ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len = attr->num_rxp * sizeof(struct bna_rx); /* Virtual memory for RxPath - stored by Rx module */ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len = attr->num_rxp * sizeof(struct bna_rxp); /* Virtual memory for RxQ - stored by Rx module */ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len = (attr->num_rxp * 2) * sizeof(struct bna_rxq); /* Virtual memory for Unicast MAC address - stored by ucam module */ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = attr->num_ucmac * sizeof(struct bna_mac); /* Virtual memory for Multicast MAC address - stored by mcam module */ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = attr->num_mcmac * sizeof(struct bna_mac); /* Virtual memory for Multicast handle - stored by mcam module */ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type = BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len = attr->num_mcmac * sizeof(struct bna_mcam_handle); } void bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev, struct bna_res_info *res_info) { bna->bnad = bnad; bna->pcidev = *pcidev; bna->stats.hw_stats_kva = (struct bfi_enet_stats *) res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva; bna->stats.hw_stats_dma.msb = res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb; bna->stats.hw_stats_dma.lsb = res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb; bna_reg_addr_init(bna, &bna->pcidev); /* Also initializes diag, cee, sfp, phy_port, msgq */ bna_ioceth_init(&bna->ioceth, bna, res_info); bna_enet_init(&bna->enet, bna); bna_ethport_init(&bna->ethport, bna); } void bna_mod_init(struct bna *bna, struct bna_res_info *res_info) { bna_tx_mod_init(&bna->tx_mod, bna, res_info); bna_rx_mod_init(&bna->rx_mod, bna, res_info); bna_ucam_mod_init(&bna->ucam_mod, bna, res_info); bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); bna->default_mode_rid = BFI_INVALID_RID; bna->promisc_rid = BFI_INVALID_RID; bna->mod_flags |= BNA_MOD_F_INIT_DONE; } void bna_uninit(struct bna *bna) { if (bna->mod_flags & BNA_MOD_F_INIT_DONE) { bna_mcam_mod_uninit(&bna->mcam_mod); bna_ucam_mod_uninit(&bna->ucam_mod); bna_rx_mod_uninit(&bna->rx_mod); bna_tx_mod_uninit(&bna->tx_mod); bna->mod_flags &= ~BNA_MOD_F_INIT_DONE; } bna_stats_mod_uninit(&bna->stats_mod); bna_ethport_uninit(&bna->ethport); bna_enet_uninit(&bna->enet); bna_ioceth_uninit(&bna->ioceth); bna->bnad = NULL; } int bna_num_txq_set(struct bna *bna, int num_txq) { if (bna->ioceth.attr.fw_query_complete && (num_txq <= bna->ioceth.attr.num_txq)) { bna->ioceth.attr.num_txq = num_txq; return BNA_CB_SUCCESS; } return BNA_CB_FAIL; } int bna_num_rxp_set(struct bna *bna, int num_rxp) { if (bna->ioceth.attr.fw_query_complete && (num_rxp <= bna->ioceth.attr.num_rxp)) { bna->ioceth.attr.num_rxp = num_rxp; return BNA_CB_SUCCESS; } return BNA_CB_FAIL; } struct bna_mac * bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod) { struct list_head *qe; if (list_empty(&ucam_mod->free_q)) return NULL; bfa_q_deq(&ucam_mod->free_q, &qe); return (struct bna_mac *)qe; } void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac) { list_add_tail(&mac->qe, &ucam_mod->free_q); } struct bna_mac * bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod) { struct list_head *qe; if (list_empty(&mcam_mod->free_q)) return NULL; bfa_q_deq(&mcam_mod->free_q, &qe); return (struct bna_mac *)qe; } void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac) { list_add_tail(&mac->qe, &mcam_mod->free_q); } struct bna_mcam_handle * bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod) { struct list_head *qe; if (list_empty(&mcam_mod->free_handle_q)) return NULL; bfa_q_deq(&mcam_mod->free_handle_q, &qe); return (struct bna_mcam_handle *)qe; } void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, struct bna_mcam_handle *handle) { list_add_tail(&handle->qe, &mcam_mod->free_handle_q); } void bna_hw_stats_get(struct bna *bna) { if (!bna->stats_mod.ioc_ready) { bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); return; } if (bna->stats_mod.stats_get_busy) { bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats); return; } bna_bfi_stats_get(bna); }
gpl-2.0
draekko/android_kernel_lg_hammerhead-neobuddy89
drivers/mfd/wm831x-auxadc.c
5463
6809
/* * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs * * Copyright 2009-2011 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/irq.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/otp.h> #include <linux/mfd/wm831x/regulator.h> struct wm831x_auxadc_req { struct list_head list; enum wm831x_auxadc input; int val; struct completion done; }; static int wm831x_auxadc_read_irq(struct wm831x *wm831x, enum wm831x_auxadc input) { struct wm831x_auxadc_req *req; int ret; bool ena = false; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&req->done); req->input = input; req->val = -ETIMEDOUT; mutex_lock(&wm831x->auxadc_lock); /* Enqueue the request */ list_add(&req->list, &wm831x->auxadc_pending); ena = !wm831x->auxadc_active; if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret != 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } } /* Enable the conversion if not already running */ if (!(wm831x->auxadc_active & (1 << input))) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 1 << input); if (ret != 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } wm831x->auxadc_active |= 1 << input; } /* We convert at the fastest rate possible */ if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK); if (ret != 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto out; } } mutex_unlock(&wm831x->auxadc_lock); /* Wait for an interrupt */ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); list_del(&req->list); ret = req->val; out: mutex_unlock(&wm831x->auxadc_lock); kfree(req); return ret; } static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data) { struct wm831x *wm831x = irq_data; struct wm831x_auxadc_req *req; int ret, input, val; ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); return IRQ_NONE; } input = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (input == 14) input = WM831X_AUX_CAL; val = ret & WM831X_AUX_DATA_MASK; mutex_lock(&wm831x->auxadc_lock); /* Disable this conversion, we're about to complete all users */ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 0); wm831x->auxadc_active &= ~(1 << input); /* Turn off the entire convertor if idle */ if (!wm831x->auxadc_active) wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0); /* Wake up any threads waiting for this request */ list_for_each_entry(req, &wm831x->auxadc_pending, list) { if (req->input == input) { req->val = val; complete(&req->done); } } mutex_unlock(&wm831x->auxadc_lock); return IRQ_HANDLED; } static int wm831x_auxadc_read_polled(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret, src, timeout; mutex_lock(&wm831x->auxadc_lock); ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } /* We force a single source at present */ src = input; ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE, 1 << src); if (ret < 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto disable; } /* If we're not using interrupts then poll the * interrupt status register */ timeout = 5; while (timeout) { msleep(1); ret = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1); if (ret < 0) { dev_err(wm831x->dev, "ISR 1 read failed: %d\n", ret); goto disable; } /* Did it complete? */ if (ret & WM831X_AUXADC_DATA_EINT) { wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1, WM831X_AUXADC_DATA_EINT); break; } else { dev_err(wm831x->dev, "AUXADC conversion timeout\n"); ret = -EBUSY; goto disable; } } ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); goto disable; } src = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (src == 14) src = WM831X_AUX_CAL; if (src != input) { dev_err(wm831x->dev, "Data from source %d not %d\n", src, input); ret = -EINVAL; } else { ret &= WM831X_AUX_DATA_MASK; } disable: wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0); out: mutex_unlock(&wm831x->auxadc_lock); return ret; } /** * wm831x_auxadc_read: Read a value from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) { return wm831x->auxadc_read(wm831x, input); } EXPORT_SYMBOL_GPL(wm831x_auxadc_read); /** * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret; ret = wm831x_auxadc_read(wm831x, input); if (ret < 0) return ret; ret *= 1465; return ret; } EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv); void wm831x_auxadc_init(struct wm831x *wm831x) { int ret; mutex_init(&wm831x->auxadc_lock); INIT_LIST_HEAD(&wm831x->auxadc_pending); if (wm831x->irq && wm831x->irq_base) { wm831x->auxadc_read = wm831x_auxadc_read_irq; ret = request_threaded_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, NULL, wm831x_auxadc_irq, 0, "auxadc", wm831x); if (ret < 0) { dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n", ret); wm831x->auxadc_read = NULL; } } if (!wm831x->auxadc_read) wm831x->auxadc_read = wm831x_auxadc_read_polled; }
gpl-2.0
Hogman500/ouya_1_1-kernel
drivers/mfd/wm831x-auxadc.c
5463
6809
/* * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs * * Copyright 2009-2011 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/irq.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/otp.h> #include <linux/mfd/wm831x/regulator.h> struct wm831x_auxadc_req { struct list_head list; enum wm831x_auxadc input; int val; struct completion done; }; static int wm831x_auxadc_read_irq(struct wm831x *wm831x, enum wm831x_auxadc input) { struct wm831x_auxadc_req *req; int ret; bool ena = false; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&req->done); req->input = input; req->val = -ETIMEDOUT; mutex_lock(&wm831x->auxadc_lock); /* Enqueue the request */ list_add(&req->list, &wm831x->auxadc_pending); ena = !wm831x->auxadc_active; if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret != 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } } /* Enable the conversion if not already running */ if (!(wm831x->auxadc_active & (1 << input))) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 1 << input); if (ret != 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } wm831x->auxadc_active |= 1 << input; } /* We convert at the fastest rate possible */ if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK); if (ret != 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto out; } } mutex_unlock(&wm831x->auxadc_lock); /* Wait for an interrupt */ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); list_del(&req->list); ret = req->val; out: mutex_unlock(&wm831x->auxadc_lock); kfree(req); return ret; } static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data) { struct wm831x *wm831x = irq_data; struct wm831x_auxadc_req *req; int ret, input, val; ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); return IRQ_NONE; } input = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (input == 14) input = WM831X_AUX_CAL; val = ret & WM831X_AUX_DATA_MASK; mutex_lock(&wm831x->auxadc_lock); /* Disable this conversion, we're about to complete all users */ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 0); wm831x->auxadc_active &= ~(1 << input); /* Turn off the entire convertor if idle */ if (!wm831x->auxadc_active) wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0); /* Wake up any threads waiting for this request */ list_for_each_entry(req, &wm831x->auxadc_pending, list) { if (req->input == input) { req->val = val; complete(&req->done); } } mutex_unlock(&wm831x->auxadc_lock); return IRQ_HANDLED; } static int wm831x_auxadc_read_polled(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret, src, timeout; mutex_lock(&wm831x->auxadc_lock); ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } /* We force a single source at present */ src = input; ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE, 1 << src); if (ret < 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto disable; } /* If we're not using interrupts then poll the * interrupt status register */ timeout = 5; while (timeout) { msleep(1); ret = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1); if (ret < 0) { dev_err(wm831x->dev, "ISR 1 read failed: %d\n", ret); goto disable; } /* Did it complete? */ if (ret & WM831X_AUXADC_DATA_EINT) { wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1, WM831X_AUXADC_DATA_EINT); break; } else { dev_err(wm831x->dev, "AUXADC conversion timeout\n"); ret = -EBUSY; goto disable; } } ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); goto disable; } src = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (src == 14) src = WM831X_AUX_CAL; if (src != input) { dev_err(wm831x->dev, "Data from source %d not %d\n", src, input); ret = -EINVAL; } else { ret &= WM831X_AUX_DATA_MASK; } disable: wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0); out: mutex_unlock(&wm831x->auxadc_lock); return ret; } /** * wm831x_auxadc_read: Read a value from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) { return wm831x->auxadc_read(wm831x, input); } EXPORT_SYMBOL_GPL(wm831x_auxadc_read); /** * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret; ret = wm831x_auxadc_read(wm831x, input); if (ret < 0) return ret; ret *= 1465; return ret; } EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv); void wm831x_auxadc_init(struct wm831x *wm831x) { int ret; mutex_init(&wm831x->auxadc_lock); INIT_LIST_HEAD(&wm831x->auxadc_pending); if (wm831x->irq && wm831x->irq_base) { wm831x->auxadc_read = wm831x_auxadc_read_irq; ret = request_threaded_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, NULL, wm831x_auxadc_irq, 0, "auxadc", wm831x); if (ret < 0) { dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n", ret); wm831x->auxadc_read = NULL; } } if (!wm831x->auxadc_read) wm831x->auxadc_read = wm831x_auxadc_read_polled; }
gpl-2.0
DirtyUnicorns/android_kernel_htc_msm8660-caf
arch/ia64/xen/xencomm.c
9303
2788
/* * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> static unsigned long kernel_virtual_offset; static int is_xencomm_initialized; /* for xen early printk. It uses console io hypercall which uses xencomm. * However early printk may use it before xencomm initialization. */ int xencomm_is_initialized(void) { return is_xencomm_initialized; } void xencomm_initialize(void) { kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); is_xencomm_initialized = 1; } /* Translate virtual address to physical address. */ unsigned long xencomm_vtop(unsigned long vaddr) { struct page *page; struct vm_area_struct *vma; if (vaddr == 0) return 0UL; if (REGION_NUMBER(vaddr) == 5) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; /* On ia64, TASK_SIZE refers to current. It is not initialized during boot. Furthermore the kernel is relocatable and __pa() doesn't work on addresses. */ if (vaddr >= KERNEL_START && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) return vaddr - kernel_virtual_offset; /* In kernel area -- virtually mapped. */ pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd) || pgd_bad(*pgd)) return ~0UL; pud = pud_offset(pgd, vaddr); if (pud_none(*pud) || pud_bad(*pud)) return ~0UL; pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd) || pmd_bad(*pmd)) return ~0UL; ptep = pte_offset_kernel(pmd, vaddr); if (!ptep) return ~0UL; return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); } if (vaddr > TASK_SIZE) { /* percpu variables */ if (REGION_NUMBER(vaddr) == 7 && REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) ia64_tpa(vaddr); /* kernel address */ return __pa(vaddr); } /* XXX double-check (lack of) locking */ vma = find_extend_vma(current->mm, vaddr); if (!vma) return ~0UL; /* We assume the page is modified. */ page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); if (!page) return ~0UL; return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); }
gpl-2.0
Inventor1938/android_kernel_samsung_klte
sound/soc/pxa/em-x270.c
9559
2254
/* * SoC audio driver for EM-X270, eXeda and CM-X300 * * Copyright 2007, 2009 CompuLab, Ltd. * * Author: Mike Rapoport <mike@compulab.co.il> * * Copied from tosa.c: * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/audio.h> #include "../codecs/wm9712.h" #include "pxa2xx-ac97.h" static struct snd_soc_dai_link em_x270_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", }, }; static struct snd_soc_card em_x270 = { .name = "EM-X270", .owner = THIS_MODULE, .dai_link = em_x270_dai, .num_links = ARRAY_SIZE(em_x270_dai), }; static struct platform_device *em_x270_snd_device; static int __init em_x270_init(void) { int ret; if (!(machine_is_em_x270() || machine_is_exeda() || machine_is_cm_x300())) return -ENODEV; em_x270_snd_device = platform_device_alloc("soc-audio", -1); if (!em_x270_snd_device) return -ENOMEM; platform_set_drvdata(em_x270_snd_device, &em_x270); ret = platform_device_add(em_x270_snd_device); if (ret) platform_device_put(em_x270_snd_device); return ret; } static void __exit em_x270_exit(void) { platform_device_unregister(em_x270_snd_device); } module_init(em_x270_init); module_exit(em_x270_exit); /* Module information */ MODULE_AUTHOR("Mike Rapoport"); MODULE_DESCRIPTION("ALSA SoC EM-X270, eXeda and CM-X300"); MODULE_LICENSE("GPL");
gpl-2.0
kirananto/RaZorLettuce
sound/core/seq/oss/seq_oss_ioctl.c
12887
5623
/* * OSS compatible sequencer driver * * OSS compatible i/o control * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_device.h" #include "seq_oss_readq.h" #include "seq_oss_writeq.h" #include "seq_oss_timer.h" #include "seq_oss_synth.h" #include "seq_oss_midi.h" #include "seq_oss_event.h" static int snd_seq_oss_synth_info_user(struct seq_oss_devinfo *dp, void __user *arg) { struct synth_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (snd_seq_oss_synth_make_info(dp, info.device, &info) < 0) return -EINVAL; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_seq_oss_midi_info_user(struct seq_oss_devinfo *dp, void __user *arg) { struct midi_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (snd_seq_oss_midi_make_info(dp, info.device, &info) < 0) return -EINVAL; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_seq_oss_oob_user(struct seq_oss_devinfo *dp, void __user *arg) { unsigned char ev[8]; struct snd_seq_event tmpev; if (copy_from_user(ev, arg, 8)) return -EFAULT; memset(&tmpev, 0, sizeof(tmpev)); snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.port, dp->addr.client); tmpev.time.tick = 0; if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) { snd_seq_oss_dispatch(dp, &tmpev, 0, 0); } return 0; } int snd_seq_oss_ioctl(struct seq_oss_devinfo *dp, unsigned int cmd, unsigned long carg) { int dev, val; void __user *arg = (void __user *)carg; int __user *p = arg; switch (cmd) { case SNDCTL_TMR_TIMEBASE: case SNDCTL_TMR_TEMPO: case SNDCTL_TMR_START: case SNDCTL_TMR_STOP: case SNDCTL_TMR_CONTINUE: case SNDCTL_TMR_METRONOME: case SNDCTL_TMR_SOURCE: case SNDCTL_TMR_SELECT: case SNDCTL_SEQ_CTRLRATE: return snd_seq_oss_timer_ioctl(dp->timer, cmd, arg); case SNDCTL_SEQ_PANIC: debug_printk(("panic\n")); snd_seq_oss_reset(dp); return -EINVAL; case SNDCTL_SEQ_SYNC: debug_printk(("sync\n")); if (! is_write_mode(dp->file_mode) || dp->writeq == NULL) return 0; while (snd_seq_oss_writeq_sync(dp->writeq)) ; if (signal_pending(current)) return -ERESTARTSYS; return 0; case SNDCTL_SEQ_RESET: debug_printk(("reset\n")); snd_seq_oss_reset(dp); return 0; case SNDCTL_SEQ_TESTMIDI: debug_printk(("test midi\n")); if (get_user(dev, p)) return -EFAULT; return snd_seq_oss_midi_open(dp, dev, dp->file_mode); case SNDCTL_SEQ_GETINCOUNT: debug_printk(("get in count\n")); if (dp->readq == NULL || ! is_read_mode(dp->file_mode)) return 0; return put_user(dp->readq->qlen, p) ? -EFAULT : 0; case SNDCTL_SEQ_GETOUTCOUNT: debug_printk(("get out count\n")); if (! is_write_mode(dp->file_mode) || dp->writeq == NULL) return 0; return put_user(snd_seq_oss_writeq_get_free_size(dp->writeq), p) ? -EFAULT : 0; case SNDCTL_SEQ_GETTIME: debug_printk(("get time\n")); return put_user(snd_seq_oss_timer_cur_tick(dp->timer), p) ? -EFAULT : 0; case SNDCTL_SEQ_RESETSAMPLES: debug_printk(("reset samples\n")); if (get_user(dev, p)) return -EFAULT; return snd_seq_oss_synth_ioctl(dp, dev, cmd, carg); case SNDCTL_SEQ_NRSYNTHS: debug_printk(("nr synths\n")); return put_user(dp->max_synthdev, p) ? -EFAULT : 0; case SNDCTL_SEQ_NRMIDIS: debug_printk(("nr midis\n")); return put_user(dp->max_mididev, p) ? -EFAULT : 0; case SNDCTL_SYNTH_MEMAVL: debug_printk(("mem avail\n")); if (get_user(dev, p)) return -EFAULT; val = snd_seq_oss_synth_ioctl(dp, dev, cmd, carg); return put_user(val, p) ? -EFAULT : 0; case SNDCTL_FM_4OP_ENABLE: debug_printk(("4op\n")); if (get_user(dev, p)) return -EFAULT; snd_seq_oss_synth_ioctl(dp, dev, cmd, carg); return 0; case SNDCTL_SYNTH_INFO: case SNDCTL_SYNTH_ID: debug_printk(("synth info\n")); return snd_seq_oss_synth_info_user(dp, arg); case SNDCTL_SEQ_OUTOFBAND: debug_printk(("out of band\n")); return snd_seq_oss_oob_user(dp, arg); case SNDCTL_MIDI_INFO: debug_printk(("midi info\n")); return snd_seq_oss_midi_info_user(dp, arg); case SNDCTL_SEQ_THRESHOLD: debug_printk(("threshold\n")); if (! is_write_mode(dp->file_mode)) return 0; if (get_user(val, p)) return -EFAULT; if (val < 1) val = 1; if (val >= dp->writeq->maxlen) val = dp->writeq->maxlen - 1; snd_seq_oss_writeq_set_output(dp->writeq, val); return 0; case SNDCTL_MIDI_PRETIME: debug_printk(("pretime\n")); if (dp->readq == NULL || !is_read_mode(dp->file_mode)) return 0; if (get_user(val, p)) return -EFAULT; if (val <= 0) val = -1; else val = (HZ * val) / 10; dp->readq->pre_event_timeout = val; return put_user(val, p) ? -EFAULT : 0; default: debug_printk(("others\n")); if (! is_write_mode(dp->file_mode)) return -EIO; return snd_seq_oss_synth_ioctl(dp, 0, cmd, carg); } return 0; }
gpl-2.0
Sajid3/linux
drivers/video/fbdev/i810/i810_dvt.c
14679
11794
/*-*- linux-c -*- * linux/drivers/video/i810_dvt.c -- Intel 810 Discrete Video Timings (Intel) * * Copyright (C) 2001 Antonino Daplas<adaplas@pol.net> * All Rights Reserved * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include "i810_regs.h" #include "i810.h" struct mode_registers std_modes[] = { /* 640x480 @ 60Hz */ { 25000, 0x0013, 0x0003, 0x40, 0x5F, 0x4F, 0x50, 0x82, 0x51, 0x9D, 0x0B, 0x10, 0x40, 0xE9, 0x0B, 0xDF, 0x50, 0xE7, 0x04, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22006000, 0x22002000, 0x22004000, 0x22006000, 0xC0 }, /* 640x480 @ 70Hz */ { 28000, 0x0053, 0x0010, 0x40, 0x61, 0x4F, 0x4F, 0x85, 0x52, 0x9A, 0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22005000, 0x22002000, 0x22004000, 0x22005000, 0xC0 }, /* 640x480 @ 72Hz */ { 31000, 0x0013, 0x0002, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x52, 0x97, 0x06, 0x0F, 0x40, 0xE8, 0x0B, 0xDF, 0x50, 0xDF, 0x07, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000, 0x22003000, 0x22005000, 0x22007000, 0xC0 }, /* 640x480 @ 75Hz */ { 31000, 0x0013, 0x0002, 0x40, 0x64, 0x4F, 0x4F, 0x88, 0x51, 0x99, 0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000, 0x22003000, 0x22005000, 0x22007000, 0xC0 }, /* 640x480 @ 85Hz */ { 36000, 0x0010, 0x0001, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x56, 0x9D, 0xFB, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xFC, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000, 0x22003000, 0x22005000, 0x22107000, 0xC0 }, /* 800x600 @ 56Hz */ { 36000, 0x0010, 0x0001, 0x40, 0x7B, 0x63, 0x63, 0x9F, 0x66, 0x8F, 0x6F, 0x10, 0x40, 0x58, 0x0A, 0x57, 0xC8, 0x57, 0x70, 0x02, 0x02, 0x02, 0x02, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000, 0x22003000, 0x22005000, 0x22107000, 0x00 }, /* 800x600 @ 60Hz */ { 40000, 0x0008, 0x0001, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x68, 0x18, 0x72, 0x10, 0x40, 0x58, 0x0C, 0x57, 0xC8, 0x57, 0x73, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x22003000, 0x22006000, 0x22108000, 0x22003000, 0x22006000, 0x22108000, 0x00 }, /* 800x600 @ 70Hz */ { 45000, 0x0054, 0x0015, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x68, 0x12, 0x6f, 0x10, 0x40, 0x58, 0x0b, 0x57, 0x64, 0x57, 0x70, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000, 0x22004000, 0x22007000, 0x2210A000, 0x00 }, /* 800x600 @ 72Hz */ { 50000, 0x0017, 0x0004, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x6A, 0x19, 0x98, 0x10, 0x40, 0x7C, 0x02, 0x57, 0xC8, 0x57, 0x99, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000, 0x22004000, 0x22007000, 0x2210A000, 0x00 }, /* 800x600 @ 75Hz */ { 49000, 0x001F, 0x0006, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x65, 0x0F, 0x6F, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x70, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210B000, 0x22004000, 0x22007000, 0x2210B000, 0x00 }, /* 800x600 @ 85Hz */ { 56000, 0x0049, 0x000E, 0x30, 0x7E, 0x63, 0x63, 0x82, 0x67, 0x0F, 0x75, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x76, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22108000, 0x2210b000, 0x22004000, 0x22108000, 0x2210b000, 0x00 }, /* 1024x768 @ 60Hz */ { 65000, 0x003F, 0x000A, 0x30, 0xA3, 0x7F, 0x7F, 0x87, 0x83, 0x94, 0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03, 0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x22109000, 0x2220D000, 0x22005000, 0x22109000, 0x2220D000, 0xC0 }, /* 1024x768 @ 70Hz */ { 75000, 0x0017, 0x0002, 0x30, 0xA1, 0x7F, 0x7F, 0x85, 0x82, 0x93, 0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03, 0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x2210A000, 0x2220F000, 0x22005000, 0x2210A000, 0x2220F000, 0xC0 }, /* 1024x768 @ 75Hz */ { 78000, 0x0050, 0x0017, 0x20, 0x9F, 0x7F, 0x7F, 0x83, 0x81, 0x8D, 0x1E, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x1F, 0x03, 0x02, 0x03, 0x02, 0x00, 0x00, 0x22006000, 0x2210B000, 0x22210000, 0x22006000, 0x2210B000, 0x22210000, 0x00 }, /* 1024x768 @ 85Hz */ { 94000, 0x003D, 0x000E, 0x20, 0xA7, 0x7F, 0x7F, 0x8B, 0x85, 0x91, 0x26, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x27, 0x03, 0x02, 0x03, 0x02, 0x00, 0x00, 0x22007000, 0x2220E000, 0x22212000, 0x22007000, 0x2220E000, 0x22212000, 0x00 }, /* 1152x864 @ 60Hz */ { 80000, 0x0008, 0x0001, 0x20, 0xB3, 0x8F, 0x8F, 0x97, 0x93, 0x9f, 0x87, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5f, 0x88, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x2220C000, 0x22210000, 0x22415000, 0x2220C000, 0x22210000, 0x22415000, 0x00 }, /* 1152x864 @ 70Hz */ { 96000, 0x000a, 0x0001, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87, 0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000, 0x22107000, 0x22210000, 0x22415000, 0x00 }, /* 1152x864 @ 72Hz */ { 99000, 0x001f, 0x0006, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87, 0x83, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x84, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000, 0x22107000, 0x22210000, 0x22415000, 0x00 }, /* 1152x864 @ 75Hz */ { 108000, 0x0010, 0x0002, 0x20, 0xC3, 0x8F, 0x8F, 0x87, 0x97, 0x07, 0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03, 0x03, 0x03, 0x03, 0x00, 0x01, 0x22107000, 0x22210000, 0x22415000, 0x22107000, 0x22210000, 0x22415000, 0x00 }, /* 1152x864 @ 85Hz */ { 121000, 0x006D, 0x0014, 0x20, 0xc0, 0x8F, 0x8F, 0x84, 0x97, 0x07, 0x93, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x94, 0x03, 0x03, 0x03, 0x03, 0x00, 0x01, 0x2220C000, 0x22210000, 0x22415000, 0x2220C000, 0x22210000, 0x22415000, 0x0 }, /* 1280x960 @ 60Hz */ { 108000, 0x0010, 0x0002, 0x20, 0xDC, 0x9F, 0x9F, 0x80, 0xAB, 0x99, 0xE6, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE7, 0x03, 0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x22415000, 0x2210A000, 0x22210000, 0x22415000, 0x00 }, /* 1280x960 @ 75Hz */ { 129000, 0x0029, 0x0006, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xaa, 0x1b, 0xE8, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE9, 0x03, 0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x2241B000, 0x2210A000, 0x22210000, 0x2241B000, 0x00 }, /* 1280x960 @ 85Hz */ { 148000, 0x0042, 0x0009, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xA7, 0x1B, 0xF1, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xF2, 0x03, 0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22220000, 0x2241D000, 0x2210A000, 0x22220000, 0x2241D000, 0x00 }, /* 1600x1200 @ 60Hz */ { 162000, 0x0019, 0x0006, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07, 0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x2210b000, 0x22416000, 0x44419000, 0x2210b000, 0x22416000, 0x44419000, 0x00 }, /* 1600x1200 @ 65 Hz */ { 175000, 0x005d, 0x0018, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07, 0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x2210c000, 0x22416000, 0x44419000, 0x2210c000, 0x22416000, 0x44419000, 0x00 }, /* 1600x1200 @ 70 Hz */ { 189000, 0x003D, 0x000e, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07, 0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000, 0x2220e000, 0x22416000, 0x44419000, 0x00 }, /* 1600x1200 @ 72 Hz */ { 195000, 0x003f, 0x000e, 0x10, 0x0b, 0xC7, 0xC7, 0x8f, 0xd5, 0x0b, 0xE1, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xe2, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000, 0x2220e000, 0x22416000, 0x44419000, 0x00 }, /* 1600x1200 @ 75 Hz */ { 202000, 0x0024, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07, 0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000, 0x2220e000, 0x22416000, 0x44419000, 0x00 }, /* 1600x1200 @ 85 Hz */ { 229000, 0x0029, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07, 0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04, 0x04, 0x04, 0x01, 0x00, 0x22210000, 0x22416000, 0x0, 0x22210000, 0x22416000, 0x0, 0x00 }, }; void round_off_xres(u32 *xres) { if (*xres <= 640) *xres = 640; else if (*xres <= 800) *xres = 800; else if (*xres <= 1024) *xres = 1024; else if (*xres <= 1152) *xres = 1152; else if (*xres <= 1280) *xres = 1280; else *xres = 1600; } inline void round_off_yres(u32 *xres, u32 *yres) { *yres = (*xres * 3) >> 2; } static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock) { u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0; u8 hfl = (u8) ((xres >> 3) - 1); for (i = 0; i < ARRAY_SIZE(std_modes); i++) { if (std_modes[i].cr01 == hfl) { if (std_modes[i].pixclock <= pixclock) diff = pixclock - std_modes[i].pixclock; if (diff < diff_best) { i_best = i; diff_best = diff; } } } return i_best; } void i810fb_encode_registers(const struct fb_var_screeninfo *var, struct i810fb_par *par, u32 xres, u32 yres) { u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock); par->regs = std_modes[i_best]; /* overlay */ par->ovract = ((xres + var->right_margin + var->hsync_len + var->left_margin - 32) | ((xres - 32) << 16)); } void i810fb_fill_var_timings(struct fb_var_screeninfo *var) { u32 total, xres, yres; u32 mode, pixclock; xres = var->xres; yres = var->yres; pixclock = 1000000000 / var->pixclock; mode = i810fb_find_best_mode(xres, yres, pixclock); total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3; total <<= 3; var->pixclock = 1000000000 / std_modes[mode].pixclock; var->right_margin = (std_modes[mode].cr04 << 3) - xres; var->hsync_len = ((std_modes[mode].cr05 & 0x1F) - (std_modes[mode].cr04 & 0x1F)) << 3; var->left_margin = (total - (xres + var->right_margin + var->hsync_len)); var->sync = FB_SYNC_ON_GREEN; if (~(std_modes[mode].msr & (1 << 6))) var->sync |= FB_SYNC_HOR_HIGH_ACT; if (~(std_modes[mode].msr & (1 << 7))) var->sync |= FB_SYNC_VERT_HIGH_ACT; total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2; var->lower_margin = (std_modes[mode].cr10 | (std_modes[mode].cr32 & 0x0F) << 8) - yres; var->vsync_len = (std_modes[mode].cr11 & 0x0F) - (var->lower_margin & 0x0F); var->upper_margin = total - (yres + var->lower_margin + var->vsync_len); } u32 i810_get_watermark(struct fb_var_screeninfo *var, struct i810fb_par *par) { struct mode_registers *params = &par->regs; u32 wmark = 0; if (par->mem_freq == 100) { switch (var->bits_per_pixel) { case 8: wmark = params->bpp8_100; break; case 16: wmark = params->bpp16_100; break; case 24: case 32: wmark = params->bpp24_100; } } else { switch (var->bits_per_pixel) { case 8: wmark = params->bpp8_133; break; case 16: wmark = params->bpp16_133; break; case 24: case 32: wmark = params->bpp24_133; } } return wmark; }
gpl-2.0
varigit/wl18xx
net/core/dev_addr_lists.c
88
17463
/* * net/core/dev_addr_lists.c - Functions for handling net device lists * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> * * This file contains functions for working with unicast, multicast and device * addresses lists. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/export.h> #include <linux/list.h> #include <linux/proc_fs.h> /* * General list handling functions */ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; int alloc_size; if (addr_len > MAX_ADDR_LEN) return -EINVAL; list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && ha->type == addr_type) { if (global) { /* check if addr is already used as global */ if (ha->global_use) return 0; else ha->global_use = true; } ha->refcount++; return 0; } } alloc_size = sizeof(*ha); if (alloc_size < L1_CACHE_BYTES) alloc_size = L1_CACHE_BYTES; ha = kmalloc(alloc_size, GFP_ATOMIC); if (!ha) return -ENOMEM; memcpy(ha->addr, addr, addr_len); ha->type = addr_type; ha->refcount = 1; ha->global_use = global; ha->synced = false; list_add_tail_rcu(&ha->list, &list->list); list->count++; return 0; } static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type) { return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); } static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && (ha->type == addr_type || !addr_type)) { if (global) { if (!ha->global_use) break; else ha->global_use = false; } if (--ha->refcount) return 0; list_del_rcu(&ha->list); kfree_rcu(ha, rcu_head); list->count--; return 0; } } return -ENOENT; } static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type) { return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); } int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len, unsigned char addr_type) { int err; struct netdev_hw_addr *ha, *ha2; unsigned char type; list_for_each_entry(ha, &from_list->list, list) { type = addr_type ? addr_type : ha->type; err = __hw_addr_add(to_list, ha->addr, addr_len, type); if (err) goto unroll; } return 0; unroll: list_for_each_entry(ha2, &from_list->list, list) { if (ha2 == ha) break; type = addr_type ? addr_type : ha2->type; __hw_addr_del(to_list, ha2->addr, addr_len, type); } return err; } EXPORT_SYMBOL(__hw_addr_add_multiple); void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len, unsigned char addr_type) { struct netdev_hw_addr *ha; unsigned char type; list_for_each_entry(ha, &from_list->list, list) { type = addr_type ? addr_type : ha->type; __hw_addr_del(to_list, ha->addr, addr_len, type); } } EXPORT_SYMBOL(__hw_addr_del_multiple); int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len) { int err = 0; struct netdev_hw_addr *ha, *tmp; list_for_each_entry_safe(ha, tmp, &from_list->list, list) { if (!ha->synced) { err = __hw_addr_add(to_list, ha->addr, addr_len, ha->type); if (err) break; ha->synced = true; ha->refcount++; } else if (ha->refcount == 1) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } } return err; } EXPORT_SYMBOL(__hw_addr_sync); void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len) { struct netdev_hw_addr *ha, *tmp; list_for_each_entry_safe(ha, tmp, &from_list->list, list) { if (ha->synced) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); ha->synced = false; __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } } } EXPORT_SYMBOL(__hw_addr_unsync); void __hw_addr_flush(struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha, *tmp; list_for_each_entry_safe(ha, tmp, &list->list, list) { list_del_rcu(&ha->list); kfree_rcu(ha, rcu_head); } list->count = 0; } EXPORT_SYMBOL(__hw_addr_flush); void __hw_addr_init(struct netdev_hw_addr_list *list) { INIT_LIST_HEAD(&list->list); list->count = 0; } EXPORT_SYMBOL(__hw_addr_init); /* * Device addresses handling functions */ /** * dev_addr_flush - Flush device address list * @dev: device * * Flush device address list and reset ->dev_addr. * * The caller must hold the rtnl_mutex. */ void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; } EXPORT_SYMBOL(dev_addr_flush); /** * dev_addr_init - Init device address list * @dev: device * * Init device address list and create the first element, * used by ->dev_addr. * * The caller must hold the rtnl_mutex. */ int dev_addr_init(struct net_device *dev) { unsigned char addr[MAX_ADDR_LEN]; struct netdev_hw_addr *ha; int err; /* rtnl_mutex must be held here */ __hw_addr_init(&dev->dev_addrs); memset(addr, 0, sizeof(addr)); err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), NETDEV_HW_ADDR_T_LAN); if (!err) { /* * Get the first (previously created) address from the list * and set dev_addr pointer to this location. */ ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); dev->dev_addr = ha->addr; } return err; } EXPORT_SYMBOL(dev_addr_init); /** * dev_addr_add - Add a device address * @dev: device * @addr: address to add * @addr_type: address type * * Add a device address to the device or increase the reference count if * it already exists. * * The caller must hold the rtnl_mutex. */ int dev_addr_add(struct net_device *dev, unsigned char *addr, unsigned char addr_type) { int err; ASSERT_RTNL(); err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } EXPORT_SYMBOL(dev_addr_add); /** * dev_addr_del - Release a device address. * @dev: device * @addr: address to delete * @addr_type: address type * * Release reference to a device address and remove it from the device * if the reference count drops to zero. * * The caller must hold the rtnl_mutex. */ int dev_addr_del(struct net_device *dev, unsigned char *addr, unsigned char addr_type) { int err; struct netdev_hw_addr *ha; ASSERT_RTNL(); /* * We can not remove the first address from the list because * dev->dev_addr points to that. */ ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); if (ha->addr == dev->dev_addr && ha->refcount == 1) return -ENOENT; err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } EXPORT_SYMBOL(dev_addr_del); /** * dev_addr_add_multiple - Add device addresses from another device * @to_dev: device to which addresses will be added * @from_dev: device from which addresses will be added * @addr_type: address type - 0 means type will be used from from_dev * * Add device addresses of the one device to another. ** * The caller must hold the rtnl_mutex. */ int dev_addr_add_multiple(struct net_device *to_dev, struct net_device *from_dev, unsigned char addr_type) { int err; ASSERT_RTNL(); if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, to_dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); return err; } EXPORT_SYMBOL(dev_addr_add_multiple); /** * dev_addr_del_multiple - Delete device addresses by another device * @to_dev: device where the addresses will be deleted * @from_dev: device supplying the addresses to be deleted * @addr_type: address type - 0 means type will be used from from_dev * * Deletes addresses in to device by the list of addresses in from device. * * The caller must hold the rtnl_mutex. */ int dev_addr_del_multiple(struct net_device *to_dev, struct net_device *from_dev, unsigned char addr_type) { ASSERT_RTNL(); if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, to_dev->addr_len, addr_type); call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); return 0; } EXPORT_SYMBOL(dev_addr_del_multiple); /* * Unicast list handling functions */ /** * dev_uc_add - Add a secondary unicast address * @dev: device * @addr: address to add * * Add a secondary unicast address to the device or increase * the reference count if it already exists. */ int dev_uc_add(struct net_device *dev, unsigned char *addr) { int err; netif_addr_lock_bh(dev); err = __hw_addr_add(&dev->uc, addr, dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_uc_add); /** * dev_uc_del - Release secondary unicast address. * @dev: device * @addr: address to delete * * Release reference to a secondary unicast address and remove it * from the device if the reference count drops to zero. */ int dev_uc_del(struct net_device *dev, unsigned char *addr) { int err; netif_addr_lock_bh(dev); err = __hw_addr_del(&dev->uc, addr, dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_uc_del); /** * dev_uc_sync - Synchronize device's unicast list to another device * @to: destination device * @from: source device * * Add newly added addresses to the destination device and release * addresses that have no users left. The source device must be * locked by netif_tx_lock_bh. * * This function is intended to be called from the dev->set_rx_mode * function of layered software devices. */ int dev_uc_sync(struct net_device *to, struct net_device *from) { int err = 0; if (to->addr_len != from->addr_len) return -EINVAL; netif_addr_lock_bh(to); err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); if (!err) __dev_set_rx_mode(to); netif_addr_unlock_bh(to); return err; } EXPORT_SYMBOL(dev_uc_sync); /** * dev_uc_unsync - Remove synchronized addresses from the destination device * @to: destination device * @from: source device * * Remove all addresses that were added to the destination device by * dev_uc_sync(). This function is intended to be called from the * dev->stop function of layered software devices. */ void dev_uc_unsync(struct net_device *to, struct net_device *from) { if (to->addr_len != from->addr_len) return; netif_addr_lock_bh(from); netif_addr_lock(to); __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); __dev_set_rx_mode(to); netif_addr_unlock(to); netif_addr_unlock_bh(from); } EXPORT_SYMBOL(dev_uc_unsync); /** * dev_uc_flush - Flush unicast addresses * @dev: device * * Flush unicast addresses. */ void dev_uc_flush(struct net_device *dev) { netif_addr_lock_bh(dev); __hw_addr_flush(&dev->uc); netif_addr_unlock_bh(dev); } EXPORT_SYMBOL(dev_uc_flush); /** * dev_uc_flush - Init unicast address list * @dev: device * * Init unicast address list. */ void dev_uc_init(struct net_device *dev) { __hw_addr_init(&dev->uc); } EXPORT_SYMBOL(dev_uc_init); /* * Multicast list handling functions */ static int __dev_mc_add(struct net_device *dev, unsigned char *addr, bool global) { int err; netif_addr_lock_bh(dev); err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST, global); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); return err; } /** * dev_mc_add - Add a multicast address * @dev: device * @addr: address to add * * Add a multicast address to the device or increase * the reference count if it already exists. */ int dev_mc_add(struct net_device *dev, unsigned char *addr) { return __dev_mc_add(dev, addr, false); } EXPORT_SYMBOL(dev_mc_add); /** * dev_mc_add_global - Add a global multicast address * @dev: device * @addr: address to add * * Add a global multicast address to the device. */ int dev_mc_add_global(struct net_device *dev, unsigned char *addr) { return __dev_mc_add(dev, addr, true); } EXPORT_SYMBOL(dev_mc_add_global); static int __dev_mc_del(struct net_device *dev, unsigned char *addr, bool global) { int err; netif_addr_lock_bh(dev); err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST, global); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); return err; } /** * dev_mc_del - Delete a multicast address. * @dev: device * @addr: address to delete * * Release reference to a multicast address and remove it * from the device if the reference count drops to zero. */ int dev_mc_del(struct net_device *dev, unsigned char *addr) { return __dev_mc_del(dev, addr, false); } EXPORT_SYMBOL(dev_mc_del); /** * dev_mc_del_global - Delete a global multicast address. * @dev: device * @addr: address to delete * * Release reference to a multicast address and remove it * from the device if the reference count drops to zero. */ int dev_mc_del_global(struct net_device *dev, unsigned char *addr) { return __dev_mc_del(dev, addr, true); } EXPORT_SYMBOL(dev_mc_del_global); /** * dev_mc_sync - Synchronize device's unicast list to another device * @to: destination device * @from: source device * * Add newly added addresses to the destination device and release * addresses that have no users left. The source device must be * locked by netif_tx_lock_bh. * * This function is intended to be called from the ndo_set_rx_mode * function of layered software devices. */ int dev_mc_sync(struct net_device *to, struct net_device *from) { int err = 0; if (to->addr_len != from->addr_len) return -EINVAL; netif_addr_lock_bh(to); err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); if (!err) __dev_set_rx_mode(to); netif_addr_unlock_bh(to); return err; } EXPORT_SYMBOL(dev_mc_sync); /** * dev_mc_unsync - Remove synchronized addresses from the destination device * @to: destination device * @from: source device * * Remove all addresses that were added to the destination device by * dev_mc_sync(). This function is intended to be called from the * dev->stop function of layered software devices. */ void dev_mc_unsync(struct net_device *to, struct net_device *from) { if (to->addr_len != from->addr_len) return; netif_addr_lock_bh(from); netif_addr_lock(to); __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); __dev_set_rx_mode(to); netif_addr_unlock(to); netif_addr_unlock_bh(from); } EXPORT_SYMBOL(dev_mc_unsync); /** * dev_mc_flush - Flush multicast addresses * @dev: device * * Flush multicast addresses. */ void dev_mc_flush(struct net_device *dev) { netif_addr_lock_bh(dev); __hw_addr_flush(&dev->mc); netif_addr_unlock_bh(dev); } EXPORT_SYMBOL(dev_mc_flush); /** * dev_mc_flush - Init multicast address list * @dev: device * * Init multicast address list. */ void dev_mc_init(struct net_device *dev) { __hw_addr_init(&dev->mc); } EXPORT_SYMBOL(dev_mc_init); #ifdef CONFIG_PROC_FS #include <linux/seq_file.h> static int dev_mc_seq_show(struct seq_file *seq, void *v) { struct netdev_hw_addr *ha; struct net_device *dev = v; if (v == SEQ_START_TOKEN) return 0; netif_addr_lock_bh(dev); netdev_for_each_mc_addr(ha, dev) { int i; seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, dev->name, ha->refcount, ha->global_use); for (i = 0; i < dev->addr_len; i++) seq_printf(seq, "%02x", ha->addr[i]); seq_putc(seq, '\n'); } netif_addr_unlock_bh(dev); return 0; } static const struct seq_operations dev_mc_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_mc_seq_show, }; static int dev_mc_seq_open(struct inode *inode, struct file *file) { return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); } static const struct file_operations dev_mc_seq_fops = { .owner = THIS_MODULE, .open = dev_mc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init dev_mc_net_init(struct net *net) { if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) return -ENOMEM; return 0; } static void __net_exit dev_mc_net_exit(struct net *net) { proc_net_remove(net, "dev_mcast"); } static struct pernet_operations __net_initdata dev_mc_net_ops = { .init = dev_mc_net_init, .exit = dev_mc_net_exit, }; void __init dev_mcast_init(void) { register_pernet_subsys(&dev_mc_net_ops); }
gpl-2.0
roqu3/Kernel-Pecan-2.6.32
arch/arm/mach-msm/board-mahimahi-panel.c
88
13728
/* linux/arch/arm/mach-msm/board-mahimahi-panel.c * * Copyright (c) 2009 Google Inc. * Author: Dima Zavin <dima@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <asm/io.h> #include <asm/mach-types.h> #include <mach/msm_fb.h> #include <mach/msm_iomap.h> #include "board-mahimahi.h" #include "devices.h" #define SPI_CONFIG (0x00000000) #define SPI_IO_CONTROL (0x00000004) #define SPI_OPERATIONAL (0x00000030) #define SPI_ERROR_FLAGS_EN (0x00000038) #define SPI_ERROR_FLAGS (0x00000038) #define SPI_OUTPUT_FIFO (0x00000100) static void __iomem *spi_base; static struct clk *spi_clk ; static int qspi_send(uint32_t id, uint8_t data) { uint32_t err; /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { if ((err = readl(spi_base + SPI_ERROR_FLAGS))) { pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, err); return -EIO; } } writel((0x7000 | (id << 9) | data) << 16, spi_base + SPI_OUTPUT_FIFO); udelay(100); return 0; } static int lcm_writeb(uint8_t reg, uint8_t val) { qspi_send(0x0, reg); qspi_send(0x1, val); return 0; } static int lcm_writew(uint8_t reg, uint16_t val) { qspi_send(0x0, reg); qspi_send(0x1, val >> 8); qspi_send(0x1, val & 0xff); return 0; } static struct resource resources_msm_fb[] = { { .start = MSM_FB_BASE, .end = MSM_FB_BASE + MSM_FB_SIZE - 1, .flags = IORESOURCE_MEM, }, }; struct lcm_tbl { uint8_t reg; uint8_t val; }; static struct lcm_tbl samsung_oled_rgb565_init_table[] = { { 0x31, 0x08 }, { 0x32, 0x14 }, { 0x30, 0x2 }, { 0x27, 0x1 }, { 0x12, 0x8 }, { 0x13, 0x8 }, { 0x15, 0x0 }, { 0x16, 0x02 }, { 0x39, 0x24 }, { 0x17, 0x22 }, { 0x18, 0x33 }, { 0x19, 0x3 }, { 0x1A, 0x1 }, { 0x22, 0xA4 }, { 0x23, 0x0 }, { 0x26, 0xA0 }, }; static struct lcm_tbl samsung_oled_rgb666_init_table[] = { { 0x31, 0x08 }, { 0x32, 0x14 }, { 0x30, 0x2 }, { 0x27, 0x1 }, { 0x12, 0x8 }, { 0x13, 0x8 }, { 0x15, 0x0 }, { 0x16, 0x01 }, { 0x39, 0x24 }, { 0x17, 0x22 }, { 0x18, 0x33 }, { 0x19, 0x3 }, { 0x1A, 0x1 }, { 0x22, 0xA4 }, { 0x23, 0x0 }, { 0x26, 0xA0 }, }; static struct lcm_tbl *init_tablep = samsung_oled_rgb565_init_table; static size_t init_table_sz = ARRAY_SIZE(samsung_oled_rgb565_init_table); #define OLED_GAMMA_TABLE_SIZE (7 * 3) static struct lcm_tbl samsung_oled_gamma_table[][OLED_GAMMA_TABLE_SIZE] = { /* level 10 */ { /* Gamma-R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x3f }, { 0x43, 0x35 }, { 0x44, 0x30 }, { 0x45, 0x2c }, { 0x46, 0x13 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x0 }, { 0x54, 0x27 }, { 0x55, 0x2b }, { 0x56, 0x12 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x3f }, { 0x63, 0x34 }, { 0x64, 0x2f }, { 0x65, 0x2b }, { 0x66, 0x1b }, }, /* level 40 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x3e }, { 0x43, 0x2e }, { 0x44, 0x2d }, { 0x45, 0x28 }, { 0x46, 0x21 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x21 }, { 0x54, 0x2a }, { 0x55, 0x28 }, { 0x56, 0x20 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x3e }, { 0x63, 0x2d }, { 0x64, 0x2b }, { 0x65, 0x26 }, { 0x66, 0x2d }, }, /* level 70 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x35 }, { 0x43, 0x2c }, { 0x44, 0x2b }, { 0x45, 0x26 }, { 0x46, 0x29 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x25 }, { 0x54, 0x29 }, { 0x55, 0x26 }, { 0x56, 0x28 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x34 }, { 0x63, 0x2b }, { 0x64, 0x2a }, { 0x65, 0x23 }, { 0x66, 0x37 }, }, /* level 100 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x30 }, { 0x43, 0x2a }, { 0x44, 0x2b }, { 0x45, 0x24 }, { 0x46, 0x2f }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x25 }, { 0x54, 0x29 }, { 0x55, 0x24 }, { 0x56, 0x2e }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2f }, { 0x63, 0x29 }, { 0x64, 0x29 }, { 0x65, 0x21 }, { 0x66, 0x3f }, }, /* level 130 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2e }, { 0x43, 0x29 }, { 0x44, 0x2a }, { 0x45, 0x23 }, { 0x46, 0x34 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0xa }, { 0x53, 0x25 }, { 0x54, 0x28 }, { 0x55, 0x23 }, { 0x56, 0x33 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2d }, { 0x63, 0x28 }, { 0x64, 0x27 }, { 0x65, 0x20 }, { 0x66, 0x46 }, }, /* level 160 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2b }, { 0x43, 0x29 }, { 0x44, 0x28 }, { 0x45, 0x23 }, { 0x46, 0x38 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0xb }, { 0x53, 0x25 }, { 0x54, 0x27 }, { 0x55, 0x23 }, { 0x56, 0x37 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x29 }, { 0x63, 0x28 }, { 0x64, 0x25 }, { 0x65, 0x20 }, { 0x66, 0x4b }, }, /* level 190 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x29 }, { 0x43, 0x29 }, { 0x44, 0x27 }, { 0x45, 0x22 }, { 0x46, 0x3c }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x10 }, { 0x53, 0x26 }, { 0x54, 0x26 }, { 0x55, 0x22 }, { 0x56, 0x3b }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x28 }, { 0x63, 0x28 }, { 0x64, 0x24 }, { 0x65, 0x1f }, { 0x66, 0x50 }, }, /* level 220 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x28 }, { 0x43, 0x28 }, { 0x44, 0x28 }, { 0x45, 0x20 }, { 0x46, 0x40 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x11 }, { 0x53, 0x25 }, { 0x54, 0x27 }, { 0x55, 0x20 }, { 0x56, 0x3f }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x27 }, { 0x63, 0x26 }, { 0x64, 0x26 }, { 0x65, 0x1c }, { 0x66, 0x56 }, }, /* level 250 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2a }, { 0x43, 0x27 }, { 0x44, 0x27 }, { 0x45, 0x1f }, { 0x46, 0x44 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x17 }, { 0x53, 0x24 }, { 0x54, 0x26 }, { 0x55, 0x1f }, { 0x56, 0x43 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2a }, { 0x63, 0x25 }, { 0x64, 0x24 }, { 0x65, 0x1b }, { 0x66, 0x5c }, }, }; #define SAMSUNG_OLED_NUM_LEVELS ARRAY_SIZE(samsung_oled_gamma_table) #define SAMSUNG_OLED_MIN_VAL 10 #define SAMSUNG_OLED_MAX_VAL 250 #define SAMSUNG_OLED_DEFAULT_VAL (SAMSUNG_OLED_MIN_VAL + \ (SAMSUNG_OLED_MAX_VAL - \ SAMSUNG_OLED_MIN_VAL) / 2) #define SAMSUNG_OLED_LEVEL_STEP ((SAMSUNG_OLED_MAX_VAL - \ SAMSUNG_OLED_MIN_VAL) / \ (SAMSUNG_OLED_NUM_LEVELS - 1)) static DEFINE_MUTEX(panel_lock); static struct work_struct brightness_delayed_work; static DEFINE_SPINLOCK(brightness_lock); static uint8_t new_val = SAMSUNG_OLED_DEFAULT_VAL; static uint8_t last_val = SAMSUNG_OLED_DEFAULT_VAL; static uint8_t table_sel_vals[] = { 0x43, 0x34 }; static int table_sel_idx = 0; static void gamma_table_bank_select(void) { lcm_writeb(0x39, table_sel_vals[table_sel_idx]); table_sel_idx ^= 1; } static void samsung_oled_set_gamma_val(int val) { int i; int level; int frac; val = clamp(val, SAMSUNG_OLED_MIN_VAL, SAMSUNG_OLED_MAX_VAL); val = (val / 2) * 2; level = (val - SAMSUNG_OLED_MIN_VAL) / SAMSUNG_OLED_LEVEL_STEP; frac = (val - SAMSUNG_OLED_MIN_VAL) % SAMSUNG_OLED_LEVEL_STEP; clk_enable(spi_clk); for (i = 0; i < OLED_GAMMA_TABLE_SIZE; ++i) { unsigned int v1; unsigned int v2 = 0; u8 v; if (frac == 0) { v = samsung_oled_gamma_table[level][i].val; } else { v1 = samsung_oled_gamma_table[level][i].val; v2 = samsung_oled_gamma_table[level+1][i].val; v = (v1 * (SAMSUNG_OLED_LEVEL_STEP - frac) + v2 * frac) / SAMSUNG_OLED_LEVEL_STEP; } lcm_writeb(samsung_oled_gamma_table[level][i].reg, v); } gamma_table_bank_select(); clk_disable(spi_clk); last_val = val; } static int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); clk_enable(spi_clk); /* Set the gamma write target to 4, leave the current gamma set at 2 */ lcm_writeb(0x39, 0x24); clk_disable(spi_clk); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static int samsung_oled_panel_unblank(struct msm_lcdc_panel_ops *ops) { int i; pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); udelay(50); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); udelay(20); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); msleep(20); clk_enable(spi_clk); for (i = 0; i < init_table_sz; i++) lcm_writeb(init_tablep[i].reg, init_tablep[i].val); lcm_writew(0xef, 0xd0e8); lcm_writeb(0x1d, 0xa0); table_sel_idx = 0; gamma_table_bank_select(); samsung_oled_set_gamma_val(last_val); msleep(250); lcm_writeb(0x14, 0x03); clk_disable(spi_clk); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static int samsung_oled_panel_blank(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); clk_enable(spi_clk); lcm_writeb(0x14, 0x0); mdelay(1); lcm_writeb(0x1d, 0xa1); clk_disable(spi_clk); msleep(200); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static struct msm_lcdc_panel_ops mahimahi_lcdc_panel_ops = { .init = samsung_oled_panel_init, .blank = samsung_oled_panel_blank, .unblank = samsung_oled_panel_unblank, }; static struct msm_lcdc_timing mahimahi_lcdc_timing = { .clk_rate = 24576000, .hsync_pulse_width = 4, .hsync_back_porch = 8, .hsync_front_porch = 8, .hsync_skew = 0, .vsync_pulse_width = 2, .vsync_back_porch = 8, .vsync_front_porch = 8, .vsync_act_low = 1, .hsync_act_low = 1, .den_act_low = 1, }; static struct msm_fb_data mahimahi_lcdc_fb_data = { .xres = 480, .yres = 800, .width = 48, .height = 80, .output_format = MSM_MDP_OUT_IF_FMT_RGB565, }; static struct msm_lcdc_platform_data mahimahi_lcdc_platform_data = { .panel_ops = &mahimahi_lcdc_panel_ops, .timing = &mahimahi_lcdc_timing, .fb_id = 0, .fb_data = &mahimahi_lcdc_fb_data, .fb_resource = &resources_msm_fb[0], }; static struct platform_device mahimahi_lcdc_device = { .name = "msm_mdp_lcdc", .id = -1, .dev = { .platform_data = &mahimahi_lcdc_platform_data, }, }; static int mahimahi_init_spi_hack(void) { int ret; spi_base = ioremap(MSM_SPI_PHYS, MSM_SPI_SIZE); if (!spi_base) return -1; spi_clk = clk_get(&msm_device_spi.dev, "spi_clk"); if (IS_ERR(spi_clk)) { pr_err("%s: unable to get spi_clk\n", __func__); ret = PTR_ERR(spi_clk); goto err_clk_get; } clk_enable(spi_clk); printk("spi: SPI_CONFIG=%x\n", readl(spi_base + SPI_CONFIG)); printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base + SPI_IO_CONTROL)); printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base + SPI_OPERATIONAL)); printk("spi: SPI_ERROR_FLAGS_EN=%x\n", readl(spi_base + SPI_ERROR_FLAGS_EN)); printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base + SPI_ERROR_FLAGS)); printk("-%s()\n", __FUNCTION__); clk_disable(spi_clk); return 0; err_clk_get: iounmap(spi_base); return ret; } static void mahimahi_brightness_set(struct led_classdev *led_cdev, enum led_brightness val) { unsigned long flags; led_cdev->brightness = val; spin_lock_irqsave(&brightness_lock, flags); new_val = val; spin_unlock_irqrestore(&brightness_lock, flags); schedule_work(&brightness_delayed_work); } static void mahimahi_brightness_set_work(struct work_struct *work_ptr) { unsigned long flags; uint8_t val; spin_lock_irqsave(&brightness_lock, flags); val = new_val; spin_unlock_irqrestore(&brightness_lock, flags); mutex_lock(&panel_lock); samsung_oled_set_gamma_val(val); mutex_unlock(&panel_lock); } static struct led_classdev mahimahi_brightness_led = { .name = "lcd-backlight", .brightness = LED_FULL, .brightness_set = mahimahi_brightness_set, }; int __init mahimahi_init_panel(void) { int ret; if (!machine_is_mahimahi()) return 0; if (system_rev > 0xC0) { /* CDMA version (except for EVT1) supports RGB666 */ init_tablep = samsung_oled_rgb666_init_table; init_table_sz = ARRAY_SIZE(samsung_oled_rgb666_init_table); mahimahi_lcdc_fb_data.output_format = MSM_MDP_OUT_IF_FMT_RGB666; } ret = platform_device_register(&msm_device_mdp); if (ret != 0) return ret; ret = mahimahi_init_spi_hack(); if (ret != 0) return ret; INIT_WORK(&brightness_delayed_work, mahimahi_brightness_set_work); ret = platform_device_register(&mahimahi_lcdc_device); if (ret != 0) return ret; ret = led_classdev_register(NULL, &mahimahi_brightness_led); if (ret != 0) { pr_err("%s: Cannot register brightness led\n", __func__); return ret; } return 0; } device_initcall(mahimahi_init_panel);
gpl-2.0
heechul/linux
net/sunrpc/auth_gss/gss_krb5_mech.c
344
20371
/* * linux/net/sunrpc/gss_krb5_mech.c * * Copyright (c) 2001-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/sunrpc/xdr.h> #include <linux/crypto.h> #include <linux/sunrpc/gss_krb5_enctypes.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static struct gss_api_mech gss_kerberos_mech; /* forward declaration */ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { /* * DES (All DES enctypes are mapped to the same gss functionality) */ { .etype = ENCTYPE_DES_CBC_RAW, .ctype = CKSUMTYPE_RSA_MD5, .name = "des-cbc-crc", .encrypt_name = "cbc(des)", .cksum_name = "md5", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, .mk_key = NULL, .signalg = SGN_ALG_DES_MAC_MD5, .sealalg = SEAL_ALG_DES, .keybytes = 7, .keylength = 8, .blocksize = 8, .conflen = 8, .cksumlength = 8, .keyed_cksum = 0, }, /* * RC4-HMAC */ { .etype = ENCTYPE_ARCFOUR_HMAC, .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR, .name = "rc4-hmac", .encrypt_name = "ecb(arc4)", .cksum_name = "hmac(md5)", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, .mk_key = NULL, .signalg = SGN_ALG_HMAC_MD5, .sealalg = SEAL_ALG_MICROSOFT_RC4, .keybytes = 16, .keylength = 16, .blocksize = 1, .conflen = 8, .cksumlength = 8, .keyed_cksum = 1, }, /* * 3DES */ { .etype = ENCTYPE_DES3_CBC_RAW, .ctype = CKSUMTYPE_HMAC_SHA1_DES3, .name = "des3-hmac-sha1", .encrypt_name = "cbc(des3_ede)", .cksum_name = "hmac(sha1)", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, .mk_key = gss_krb5_des3_make_key, .signalg = SGN_ALG_HMAC_SHA1_DES3_KD, .sealalg = SEAL_ALG_DES3KD, .keybytes = 21, .keylength = 24, .blocksize = 8, .conflen = 8, .cksumlength = 20, .keyed_cksum = 1, }, /* * AES128 */ { .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96, .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128, .name = "aes128-cts", .encrypt_name = "cts(cbc(aes))", .cksum_name = "hmac(sha1)", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, .mk_key = gss_krb5_aes_make_key, .encrypt_v2 = gss_krb5_aes_encrypt, .decrypt_v2 = gss_krb5_aes_decrypt, .signalg = -1, .sealalg = -1, .keybytes = 16, .keylength = 16, .blocksize = 16, .conflen = 16, .cksumlength = 12, .keyed_cksum = 1, }, /* * AES256 */ { .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96, .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256, .name = "aes256-cts", .encrypt_name = "cts(cbc(aes))", .cksum_name = "hmac(sha1)", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, .mk_key = gss_krb5_aes_make_key, .encrypt_v2 = gss_krb5_aes_encrypt, .decrypt_v2 = gss_krb5_aes_decrypt, .signalg = -1, .sealalg = -1, .keybytes = 32, .keylength = 32, .blocksize = 16, .conflen = 16, .cksumlength = 12, .keyed_cksum = 1, }, }; static const int num_supported_enctypes = ARRAY_SIZE(supported_gss_krb5_enctypes); static int supported_gss_krb5_enctype(int etype) { int i; for (i = 0; i < num_supported_enctypes; i++) if (supported_gss_krb5_enctypes[i].etype == etype) return 1; return 0; } static const struct gss_krb5_enctype * get_gss_krb5_enctype(int etype) { int i; for (i = 0; i < num_supported_enctypes; i++) if (supported_gss_krb5_enctypes[i].etype == etype) return &supported_gss_krb5_enctypes[i]; return NULL; } static const void * simple_get_bytes(const void *p, const void *end, void *res, int len) { const void *q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); memcpy(res, p, len); return q; } static const void * simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) { const void *q; unsigned int len; p = simple_get_bytes(p, end, &len, sizeof(len)); if (IS_ERR(p)) return p; q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); res->data = kmemdup(p, len, GFP_NOFS); if (unlikely(res->data == NULL)) return ERR_PTR(-ENOMEM); res->len = len; return q; } static inline const void * get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_blkcipher **res) { struct xdr_netobj key; int alg; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; switch (alg) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: /* Map all these key types to ENCTYPE_DES_CBC_RAW */ alg = ENCTYPE_DES_CBC_RAW; break; } if (!supported_gss_krb5_enctype(alg)) { printk(KERN_WARNING "gss_kerberos_mech: unsupported " "encryption key algorithm %d\n", alg); p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); *res = NULL; goto out_err_free_key; } if (crypto_blkcipher_setkey(*res, key.data, key.len)) { printk(KERN_WARNING "gss_kerberos_mech: error setting key for " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); goto out_err_free_tfm; } kfree(key.data); return p; out_err_free_tfm: crypto_free_blkcipher(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); out_err: return p; } static int gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) { int tmp; p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) goto out_err; /* Old format supports only DES! Any other enctype uses new format */ ctx->enctype = ENCTYPE_DES_CBC_RAW; ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); if (ctx->gk5e == NULL) { p = ERR_PTR(-EINVAL); goto out_err; } /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore * completely (like the next twenty bytes): */ if (unlikely(p + 20 > end || p + 20 < p)) { p = ERR_PTR(-EFAULT); goto out_err; } p += 20; p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err; if (tmp != SGN_ALG_DES_MAC_MD5) { p = ERR_PTR(-ENOSYS); goto out_err; } p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err; if (tmp != SEAL_ALG_DES) { p = ERR_PTR(-ENOSYS); goto out_err; } p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err; p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); if (IS_ERR(p)) goto out_err; p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) goto out_err; p = get_key(p, end, ctx, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; p = get_key(p, end, ctx, &ctx->seq); if (IS_ERR(p)) goto out_err_free_key1; if (p != end) { p = ERR_PTR(-EFAULT); goto out_err_free_key2; } return 0; out_err_free_key2: crypto_free_blkcipher(ctx->seq); out_err_free_key1: crypto_free_blkcipher(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); out_err: return PTR_ERR(p); } static struct crypto_blkcipher * context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) { struct crypto_blkcipher *cp; cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cp)) { dprintk("gss_kerberos_mech: unable to initialize " "crypto algorithm %s\n", cname); return NULL; } if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { dprintk("gss_kerberos_mech: error setting key for " "crypto algorithm %s\n", cname); crypto_free_blkcipher(cp); return NULL; } return cp; } static inline void set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) { cdata[0] = (usage>>24)&0xff; cdata[1] = (usage>>16)&0xff; cdata[2] = (usage>>8)&0xff; cdata[3] = usage&0xff; cdata[4] = seed; } static int context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; u32 err; c.len = GSS_KRB5_K5CLENGTH; c.data = cdata; keyin.data = ctx->Ksess; keyin.len = ctx->gk5e->keylength; keyout.len = ctx->gk5e->keylength; /* seq uses the raw key */ ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, ctx->Ksess); if (ctx->seq == NULL) goto out_err; ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, ctx->Ksess); if (ctx->enc == NULL) goto out_free_seq; /* derive cksum */ set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->cksum; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving cksum key\n", __func__, err); goto out_free_enc; } return 0; out_free_enc: crypto_free_blkcipher(ctx->enc); out_free_seq: crypto_free_blkcipher(ctx->seq); out_err: return -EINVAL; } /* * Note that RC4 depends on deriving keys using the sequence * number or the checksum of a token. Therefore, the final keys * cannot be calculated until the token is being constructed! */ static int context_derive_keys_rc4(struct krb5_ctx *ctx) { struct crypto_hash *hmac; char sigkeyconstant[] = "signaturekey"; int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ struct hash_desc desc; struct scatterlist sg[1]; int err; dprintk("RPC: %s: entered\n", __func__); /* * derive cksum (aka Ksign) key */ hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmac)) { dprintk("%s: error %ld allocating hash '%s'\n", __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); err = PTR_ERR(hmac); goto out_err; } err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); if (err) goto out_err_free_hmac; sg_init_table(sg, 1); sg_set_buf(sg, sigkeyconstant, slen); desc.tfm = hmac; desc.flags = 0; err = crypto_hash_init(&desc); if (err) goto out_err_free_hmac; err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); if (err) goto out_err_free_hmac; /* * allocate hash, and blkciphers for data and seqnum encryption */ ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ctx->enc)) { err = PTR_ERR(ctx->enc); goto out_err_free_hmac; } ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ctx->seq)) { crypto_free_blkcipher(ctx->enc); err = PTR_ERR(ctx->seq); goto out_err_free_hmac; } dprintk("RPC: %s: returning success\n", __func__); err = 0; out_err_free_hmac: crypto_free_hash(hmac); out_err: dprintk("RPC: %s: returning %d\n", __func__, err); return err; } static int context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; u32 err; c.len = GSS_KRB5_K5CLENGTH; c.data = cdata; keyin.data = ctx->Ksess; keyin.len = ctx->gk5e->keylength; keyout.len = ctx->gk5e->keylength; /* initiator seal encryption */ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); keyout.data = ctx->initiator_seal; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_seal key\n", __func__, err); goto out_err; } ctx->initiator_enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, ctx->initiator_seal); if (ctx->initiator_enc == NULL) goto out_err; /* acceptor seal encryption */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); keyout.data = ctx->acceptor_seal; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_seal key\n", __func__, err); goto out_free_initiator_enc; } ctx->acceptor_enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, ctx->acceptor_seal); if (ctx->acceptor_enc == NULL) goto out_free_initiator_enc; /* initiator sign checksum */ set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->initiator_sign; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_sign key\n", __func__, err); goto out_free_acceptor_enc; } /* acceptor sign checksum */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->acceptor_sign; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_sign key\n", __func__, err); goto out_free_acceptor_enc; } /* initiator seal integrity */ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY); keyout.data = ctx->initiator_integ; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_integ key\n", __func__, err); goto out_free_acceptor_enc; } /* acceptor seal integrity */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY); keyout.data = ctx->acceptor_integ; err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_integ key\n", __func__, err); goto out_free_acceptor_enc; } switch (ctx->enctype) { case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: ctx->initiator_enc_aux = context_v2_alloc_cipher(ctx, "cbc(aes)", ctx->initiator_seal); if (ctx->initiator_enc_aux == NULL) goto out_free_acceptor_enc; ctx->acceptor_enc_aux = context_v2_alloc_cipher(ctx, "cbc(aes)", ctx->acceptor_seal); if (ctx->acceptor_enc_aux == NULL) { crypto_free_blkcipher(ctx->initiator_enc_aux); goto out_free_acceptor_enc; } } return 0; out_free_acceptor_enc: crypto_free_blkcipher(ctx->acceptor_enc); out_free_initiator_enc: crypto_free_blkcipher(ctx->initiator_enc); out_err: return -EINVAL; } static int gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) { int keylen; p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); if (IS_ERR(p)) goto out_err; ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err; p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); if (IS_ERR(p)) goto out_err; /* set seq_send for use by "older" enctypes */ ctx->seq_send = ctx->seq_send64; if (ctx->seq_send64 != ctx->seq_send) { dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, (unsigned long)ctx->seq_send64, ctx->seq_send); p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); if (IS_ERR(p)) goto out_err; /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */ if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) ctx->enctype = ENCTYPE_DES3_CBC_RAW; ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); if (ctx->gk5e == NULL) { dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", ctx->enctype); p = ERR_PTR(-EINVAL); goto out_err; } keylen = ctx->gk5e->keylength; p = simple_get_bytes(p, end, ctx->Ksess, keylen); if (IS_ERR(p)) goto out_err; if (p != end) { p = ERR_PTR(-EINVAL); goto out_err; } ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, gss_kerberos_mech.gm_oid.len, gfp_mask); if (unlikely(ctx->mech_used.data == NULL)) { p = ERR_PTR(-ENOMEM); goto out_err; } ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; switch (ctx->enctype) { case ENCTYPE_DES3_CBC_RAW: return context_derive_keys_des3(ctx, gfp_mask); case ENCTYPE_ARCFOUR_HMAC: return context_derive_keys_rc4(ctx); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: return context_derive_keys_new(ctx, gfp_mask); default: return -EINVAL; } out_err: return PTR_ERR(p); } static int gss_import_sec_context_kerberos(const void *p, size_t len, struct gss_ctx *ctx_id, gfp_t gfp_mask) { const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), gfp_mask); if (ctx == NULL) return -ENOMEM; if (len == 85) ret = gss_import_v1_context(p, end, ctx); else ret = gss_import_v2_context(p, end, ctx, gfp_mask); if (ret == 0) ctx_id->internal_ctx_id = ctx; else kfree(ctx); dprintk("RPC: %s: returning %d\n", __func__, ret); return ret; } static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_blkcipher(kctx->seq); crypto_free_blkcipher(kctx->enc); crypto_free_blkcipher(kctx->acceptor_enc); crypto_free_blkcipher(kctx->initiator_enc); crypto_free_blkcipher(kctx->acceptor_enc_aux); crypto_free_blkcipher(kctx->initiator_enc_aux); kfree(kctx->mech_used.data); kfree(kctx); } static const struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, .gss_wrap = gss_wrap_kerberos, .gss_unwrap = gss_unwrap_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, }; static struct pf_desc gss_kerberos_pfs[] = { [0] = { .pseudoflavor = RPC_AUTH_GSS_KRB5, .service = RPC_GSS_SVC_NONE, .name = "krb5", }, [1] = { .pseudoflavor = RPC_AUTH_GSS_KRB5I, .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", }, [2] = { .pseudoflavor = RPC_AUTH_GSS_KRB5P, .service = RPC_GSS_SVC_PRIVACY, .name = "krb5p", }, }; MODULE_ALIAS("rpc-auth-gss-krb5"); MODULE_ALIAS("rpc-auth-gss-krb5i"); MODULE_ALIAS("rpc-auth-gss-krb5p"); MODULE_ALIAS("rpc-auth-gss-390003"); MODULE_ALIAS("rpc-auth-gss-390004"); MODULE_ALIAS("rpc-auth-gss-390005"); static struct gss_api_mech gss_kerberos_mech = { .gm_name = "krb5", .gm_owner = THIS_MODULE, .gm_oid = {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"}, .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES, }; static int __init init_kerberos_module(void) { int status; status = gss_mech_register(&gss_kerberos_mech); if (status) printk("Failed to register kerberos gss mechanism!\n"); return status; } static void __exit cleanup_kerberos_module(void) { gss_mech_unregister(&gss_kerberos_mech); } MODULE_LICENSE("GPL"); module_init(init_kerberos_module); module_exit(cleanup_kerberos_module);
gpl-2.0
mostafa-z/GABRIEL_LP
drivers/net/phy/phy.c
344
23521
/* * drivers/net/phy/phy.c * * Framework for configuring and reading PHY devices * Based on code in sungem_phy.c and gianfar_phy.c * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /** * phy_print_status - Convenience function to print out the current phy status * @phydev: the phy_device struct */ void phy_print_status(struct phy_device *phydev) { pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev), phydev->link ? "Up" : "Down"); if (phydev->link) printk(KERN_CONT " - %d/%s", phydev->speed, DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); printk(KERN_CONT "\n"); } EXPORT_SYMBOL(phy_print_status); /** * phy_clear_interrupt - Ack the phy device's interrupt * @phydev: the phy_device struct * * If the @phydev driver has an ack_interrupt function, call it to * ack and clear the phy device's interrupt. * * Returns 0 on success on < 0 on error. */ static int phy_clear_interrupt(struct phy_device *phydev) { int err = 0; if (phydev->drv->ack_interrupt) err = phydev->drv->ack_interrupt(phydev); return err; } /** * phy_config_interrupt - configure the PHY device for the requested interrupts * @phydev: the phy_device struct * @interrupts: interrupt flags to configure for this @phydev * * Returns 0 on success on < 0 on error. */ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) { int err = 0; phydev->interrupts = interrupts; if (phydev->drv->config_intr) err = phydev->drv->config_intr(phydev); return err; } /** * phy_aneg_done - return auto-negotiation status * @phydev: target phy_device struct * * Description: Reads the status register and returns 0 either if * auto-negotiation is incomplete, or if there was an error. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. */ static inline int phy_aneg_done(struct phy_device *phydev) { int retval; retval = phy_read(phydev, MII_BMSR); return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); } /* A structure for mapping a particular speed and duplex * combination to a particular SUPPORTED and ADVERTISED value */ struct phy_setting { int speed; int duplex; u32 setting; }; /* A mapping of all SUPPORTED settings to speed/duplex */ static const struct phy_setting settings[] = { { .speed = 10000, .duplex = DUPLEX_FULL, .setting = SUPPORTED_10000baseT_Full, }, { .speed = SPEED_1000, .duplex = DUPLEX_FULL, .setting = SUPPORTED_1000baseT_Full, }, { .speed = SPEED_1000, .duplex = DUPLEX_HALF, .setting = SUPPORTED_1000baseT_Half, }, { .speed = SPEED_100, .duplex = DUPLEX_FULL, .setting = SUPPORTED_100baseT_Full, }, { .speed = SPEED_100, .duplex = DUPLEX_HALF, .setting = SUPPORTED_100baseT_Half, }, { .speed = SPEED_10, .duplex = DUPLEX_FULL, .setting = SUPPORTED_10baseT_Full, }, { .speed = SPEED_10, .duplex = DUPLEX_HALF, .setting = SUPPORTED_10baseT_Half, }, }; #define MAX_NUM_SETTINGS ARRAY_SIZE(settings) /** * phy_find_setting - find a PHY settings array entry that matches speed & duplex * @speed: speed to match * @duplex: duplex to match * * Description: Searches the settings array for the setting which * matches the desired speed and duplex, and returns the index * of that setting. Returns the index of the last setting if * none of the others match. */ static inline int phy_find_setting(int speed, int duplex) { int idx = 0; while (idx < ARRAY_SIZE(settings) && (settings[idx].speed != speed || settings[idx].duplex != duplex)) idx++; return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; } /** * phy_find_valid - find a PHY setting that matches the requested features mask * @idx: The first index in settings[] to search * @features: A mask of the valid settings * * Description: Returns the index of the first valid setting less * than or equal to the one pointed to by idx, as determined by * the mask in features. Returns the index of the last setting * if nothing else matches. */ static inline int phy_find_valid(int idx, u32 features) { while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) idx++; return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; } /** * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex * @phydev: the target phy_device struct * * Description: Make sure the PHY is set to supported speeds and * duplexes. Drop down by one in this order: 1000/FULL, * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. */ static void phy_sanitize_settings(struct phy_device *phydev) { u32 features = phydev->supported; int idx; /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) phydev->autoneg = AUTONEG_DISABLE; idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), features); phydev->speed = settings[idx].speed; phydev->duplex = settings[idx].duplex; } /** * phy_ethtool_sset - generic ethtool sset function, handles all the details * @phydev: target phy_device struct * @cmd: ethtool_cmd * * A few notes about parameter checking: * - We don't set port or transceiver, so we don't care what they * were set to. * - phy_start_aneg() will make sure forced settings are sane, and * choose the next best ones from the ones selected, so we don't * care if ethtool tries to give us bad values. */ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { u32 speed = ethtool_cmd_speed(cmd); if (cmd->phy_address != phydev->addr) return -EINVAL; /* We make sure that we don't pass unsupported * values in to the PHY */ cmd->advertising &= phydev->supported; /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL))) return -EINVAL; phydev->autoneg = cmd->autoneg; phydev->speed = speed; phydev->advertising = cmd->advertising; if (AUTONEG_ENABLE == cmd->autoneg) phydev->advertising |= ADVERTISED_Autoneg; else phydev->advertising &= ~ADVERTISED_Autoneg; phydev->duplex = cmd->duplex; /* Restart the PHY */ phy_start_aneg(phydev); return 0; } EXPORT_SYMBOL(phy_ethtool_sset); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) { cmd->supported = phydev->supported; cmd->advertising = phydev->advertising; ethtool_cmd_speed_set(cmd, phydev->speed); cmd->duplex = phydev->duplex; cmd->port = PORT_MII; cmd->phy_address = phydev->addr; cmd->transceiver = XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; return 0; } EXPORT_SYMBOL(phy_ethtool_gset); /** * phy_mii_ioctl - generic PHY MII ioctl interface * @phydev: the phy_device struct * @ifr: &struct ifreq for socket ioctl's * @cmd: ioctl cmd to execute * * Note that this function is currently incompatible with the * PHYCONTROL layer. It changes registers without regard to * current state. Use at own risk. */ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mii_data = if_mii(ifr); u16 val = mii_data->val_in; switch (cmd) { case SIOCGMIIPHY: mii_data->phy_id = phydev->addr; /* fall through */ case SIOCGMIIREG: mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: if (mii_data->phy_id == phydev->addr) { switch(mii_data->reg_num) { case MII_BMCR: if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) phydev->autoneg = AUTONEG_DISABLE; else phydev->autoneg = AUTONEG_ENABLE; if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) phydev->duplex = DUPLEX_FULL; else phydev->duplex = DUPLEX_HALF; if ((!phydev->autoneg) && (val & BMCR_SPEED1000)) phydev->speed = SPEED_1000; else if ((!phydev->autoneg) && (val & BMCR_SPEED100)) phydev->speed = SPEED_100; break; case MII_ADVERTISE: phydev->advertising = val; break; default: /* do nothing */ break; } } mdiobus_write(phydev->bus, mii_data->phy_id, mii_data->reg_num, val); if (mii_data->reg_num == MII_BMCR && val & BMCR_RESET && phydev->drv->config_init) { phy_scan_fixups(phydev); phydev->drv->config_init(phydev); } break; case SIOCSHWTSTAMP: if (phydev->drv->hwtstamp) return phydev->drv->hwtstamp(phydev, ifr); /* fall through */ default: return -EOPNOTSUPP; } return 0; } EXPORT_SYMBOL(phy_mii_ioctl); /** * phy_start_aneg - start auto-negotiation for this PHY device * @phydev: the phy_device struct * * Description: Sanitizes the settings (if we're not autonegotiating * them), and then calls the driver's config_aneg function. * If the PHYCONTROL Layer is operating, we change the state to * reflect the beginning of Auto-negotiation or forcing. */ int phy_start_aneg(struct phy_device *phydev) { int err; mutex_lock(&phydev->lock); if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); err = phydev->drv->config_aneg(phydev); if (err < 0) goto out_unlock; if (phydev->state != PHY_HALTED) { if (AUTONEG_ENABLE == phydev->autoneg) { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; } else { phydev->state = PHY_FORCING; phydev->link_timeout = PHY_FORCE_TIMEOUT; } } out_unlock: mutex_unlock(&phydev->lock); return err; } EXPORT_SYMBOL(phy_start_aneg); static void phy_change(struct work_struct *work); /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct * @handler: callback function for state change notifications * * Description: The PHY infrastructure can run a state machine * which tracks whether the PHY is starting up, negotiating, * etc. This function starts the timer which tracks the state * of the PHY. If you want to be notified when the state changes, * pass in the callback @handler, otherwise, pass NULL. If you * want to maintain your own state machine, do not call this * function. */ void phy_start_machine(struct phy_device *phydev, void (*handler)(struct net_device *)) { phydev->adjust_state = handler; queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); } /** * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * * Description: Stops the state machine timer, sets the state to UP * (unless it wasn't up yet). This function must be called BEFORE * phy_detach. */ void phy_stop_machine(struct phy_device *phydev) { cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); if (phydev->state > PHY_UP) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); phydev->adjust_state = NULL; } /** * phy_force_reduction - reduce PHY speed/duplex settings by one step * @phydev: target phy_device struct * * Description: Reduces the speed/duplex settings by one notch, * in this order-- * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. * The function bottoms out at 10/HALF. */ static void phy_force_reduction(struct phy_device *phydev) { int idx; idx = phy_find_setting(phydev->speed, phydev->duplex); idx++; idx = phy_find_valid(idx, phydev->supported); phydev->speed = settings[idx].speed; phydev->duplex = settings[idx].duplex; pr_info("Trying %d/%s\n", phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF"); } /** * phy_error - enter HALTED state for this PHY device * @phydev: target phy_device struct * * Moves the PHY to the HALTED state in response to a read * or write error, and tells the controller the link is down. * Must not be called from interrupt context, or while the * phydev->lock is held. */ static void phy_error(struct phy_device *phydev) { mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); } /** * phy_interrupt - PHY interrupt handler * @irq: interrupt line * @phy_dat: phy_device pointer * * Description: When a PHY interrupt occurs, the handler disables * interrupts, and schedules a work task to clear the interrupt. */ static irqreturn_t phy_interrupt(int irq, void *phy_dat) { struct phy_device *phydev = phy_dat; if (PHY_HALTED == phydev->state) return IRQ_NONE; /* It can't be ours. */ /* The MDIO bus is not allowed to be written in interrupt * context, so we need to disable the irq here. A work * queue will write the PHY to disable and clear the * interrupt, and then reenable the irq line. */ disable_irq_nosync(irq); atomic_inc(&phydev->irq_disable); queue_work(system_power_efficient_wq, &phydev->phy_queue); return IRQ_HANDLED; } /** * phy_enable_interrupts - Enable the interrupts from the PHY side * @phydev: target phy_device struct */ static int phy_enable_interrupts(struct phy_device *phydev) { int err; err = phy_clear_interrupt(phydev); if (err < 0) return err; err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); return err; } /** * phy_disable_interrupts - Disable the PHY interrupts from the PHY side * @phydev: target phy_device struct */ static int phy_disable_interrupts(struct phy_device *phydev) { int err; /* Disable PHY interrupts */ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); if (err) goto phy_err; /* Clear the interrupt */ err = phy_clear_interrupt(phydev); if (err) goto phy_err; return 0; phy_err: phy_error(phydev); return err; } /** * phy_start_interrupts - request and enable interrupts for a PHY device * @phydev: target phy_device struct * * Description: Request the interrupt for the given PHY. * If this fails, then we set irq to PHY_POLL. * Otherwise, we enable the interrupts in the PHY. * This should only be called with a valid IRQ number. * Returns 0 on success or < 0 on error. */ int phy_start_interrupts(struct phy_device *phydev) { int err = 0; INIT_WORK(&phydev->phy_queue, phy_change); atomic_set(&phydev->irq_disable, 0); if (request_irq(phydev->irq, phy_interrupt, IRQF_SHARED, "phy_interrupt", phydev) < 0) { printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", phydev->bus->name, phydev->irq); phydev->irq = PHY_POLL; return 0; } err = phy_enable_interrupts(phydev); return err; } EXPORT_SYMBOL(phy_start_interrupts); /** * phy_stop_interrupts - disable interrupts from a PHY device * @phydev: target phy_device struct */ int phy_stop_interrupts(struct phy_device *phydev) { int err; err = phy_disable_interrupts(phydev); if (err) phy_error(phydev); free_irq(phydev->irq, phydev); /* * Cannot call flush_scheduled_work() here as desired because * of rtnl_lock(), but we do not really care about what would * be done, except from enable_irq(), so cancel any work * possibly pending and take care of the matter below. */ cancel_work_sync(&phydev->phy_queue); /* * If work indeed has been cancelled, disable_irq() will have * been left unbalanced from phy_interrupt() and enable_irq() * has to be called so that other devices on the line work. */ while (atomic_dec_return(&phydev->irq_disable) >= 0) enable_irq(phydev->irq); return err; } EXPORT_SYMBOL(phy_stop_interrupts); /** * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes * @work: work_struct that describes the work to be done */ static void phy_change(struct work_struct *work) { int err; struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) goto ignore; err = phy_disable_interrupts(phydev); if (err) goto phy_err; mutex_lock(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; mutex_unlock(&phydev->lock); atomic_dec(&phydev->irq_disable); enable_irq(phydev->irq); /* Reenable interrupts */ if (PHY_HALTED != phydev->state) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); if (err) goto irq_enable_err; /* reschedule state queue work to run as soon as possible */ cancel_delayed_work_sync(&phydev->state_queue); queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); return; ignore: atomic_dec(&phydev->irq_disable); enable_irq(phydev->irq); return; irq_enable_err: disable_irq(phydev->irq); atomic_inc(&phydev->irq_disable); phy_err: phy_error(phydev); } /** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct */ void phy_stop(struct phy_device *phydev) { mutex_lock(&phydev->lock); if (PHY_HALTED == phydev->state) goto out_unlock; if (phydev->irq != PHY_POLL) { /* Disable PHY Interrupts */ phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); /* Clear any pending interrupts */ phy_clear_interrupt(phydev); } phydev->state = PHY_HALTED; out_unlock: mutex_unlock(&phydev->lock); /* * Cannot call flush_scheduled_work() here as desired because * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() * will not reenable interrupts. */ } /** * phy_start - start or restart a PHY device * @phydev: target phy_device struct * * Description: Indicates the attached device's readiness to * handle PHY-related work. Used during startup to start the * PHY, and after a call to phy_stop() to resume operation. * Also used to indicate the MDIO bus has cleared an error * condition. */ void phy_start(struct phy_device *phydev) { mutex_lock(&phydev->lock); switch (phydev->state) { case PHY_STARTING: phydev->state = PHY_PENDING; break; case PHY_READY: phydev->state = PHY_UP; break; case PHY_HALTED: phydev->state = PHY_RESUMING; default: break; } mutex_unlock(&phydev->lock); } EXPORT_SYMBOL(phy_stop); EXPORT_SYMBOL(phy_start); /** * phy_state_machine - Handle the state machine * @work: work_struct that describes the work to be done */ void phy_state_machine(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct phy_device *phydev = container_of(dwork, struct phy_device, state_queue); int needs_aneg = 0; int err = 0; mutex_lock(&phydev->lock); if (phydev->adjust_state) phydev->adjust_state(phydev->attached_dev); switch(phydev->state) { case PHY_DOWN: case PHY_STARTING: case PHY_READY: case PHY_PENDING: break; case PHY_UP: needs_aneg = 1; phydev->link_timeout = PHY_AN_TIMEOUT; break; case PHY_AN: err = phy_read_status(phydev); if (err < 0) break; /* If the link is down, give up on * negotiation for now */ if (!phydev->link) { phydev->state = PHY_NOLINK; netif_carrier_off(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); break; } /* Check if negotiation is done. Break * if there's an error */ err = phy_aneg_done(phydev); if (err < 0) break; /* If AN is done, we're running */ if (err > 0) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); } else if (0 == phydev->link_timeout--) { int idx; needs_aneg = 1; /* If we have the magic_aneg bit, * we try again */ if (phydev->drv->flags & PHY_HAS_MAGICANEG) break; /* The timer expired, and we still * don't have a setting, so we try * forcing it until we find one that * works, starting from the fastest speed, * and working our way down */ idx = phy_find_valid(0, phydev->supported); phydev->speed = settings[idx].speed; phydev->duplex = settings[idx].duplex; phydev->autoneg = AUTONEG_DISABLE; pr_info("Trying %d/%s\n", phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF"); } break; case PHY_NOLINK: err = phy_read_status(phydev); if (err) break; if (phydev->link) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); } break; case PHY_FORCING: err = genphy_update_link(phydev); if (err) break; if (phydev->link) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); } else { if (0 == phydev->link_timeout--) { phy_force_reduction(phydev); needs_aneg = 1; } } phydev->adjust_link(phydev->attached_dev); break; case PHY_RUNNING: /* Only register a CHANGE if we are * polling */ if (PHY_POLL == phydev->irq) phydev->state = PHY_CHANGELINK; break; case PHY_CHANGELINK: err = phy_read_status(phydev); if (err) break; if (phydev->link) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); } else { phydev->state = PHY_NOLINK; netif_carrier_off(phydev->attached_dev); } phydev->adjust_link(phydev->attached_dev); if (PHY_POLL != phydev->irq) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); break; case PHY_HALTED: if (phydev->link) { phydev->link = 0; netif_carrier_off(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); } break; case PHY_RESUMING: err = phy_clear_interrupt(phydev); if (err) break; err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); if (err) break; if (AUTONEG_ENABLE == phydev->autoneg) { err = phy_aneg_done(phydev); if (err < 0) break; /* err > 0 if AN is done. * Otherwise, it's 0, and we're * still waiting for AN */ if (err > 0) { err = phy_read_status(phydev); if (err) break; if (phydev->link) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); } else phydev->state = PHY_NOLINK; phydev->adjust_link(phydev->attached_dev); } else { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; } } else { err = phy_read_status(phydev); if (err) break; if (phydev->link) { phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); } else phydev->state = PHY_NOLINK; phydev->adjust_link(phydev->attached_dev); } break; } mutex_unlock(&phydev->lock); if (needs_aneg) err = phy_start_aneg(phydev); if (err < 0) phy_error(phydev); queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, PHY_STATE_TIME * HZ); }
gpl-2.0
morely/linux-xlnx
drivers/acpi/acpica/pstree.c
600
8027
/****************************************************************************** * * Module Name: pstree - Parser op tree manipulation/traversal/search * *****************************************************************************/ /* * Copyright (C) 2000 - 2014, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pstree") /* Local prototypes */ #ifdef ACPI_OBSOLETE_FUNCTIONS union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op); #endif /******************************************************************************* * * FUNCTION: acpi_ps_get_arg * * PARAMETERS: op - Get an argument for this op * argn - Nth argument to get * * RETURN: The argument (as an Op object). NULL if argument does not exist * * DESCRIPTION: Get the specified op's argument. * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn) { union acpi_parse_object *arg = NULL; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_ENTRY(); /* if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP) { return (Op->Common.Value.Arg); } */ /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode or ASCII character */ return (NULL); } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return (NULL); } /* Get the requested argument object */ arg = op->common.value.arg; while (arg && argn) { argn--; arg = arg->common.next; } return (arg); } /******************************************************************************* * * FUNCTION: acpi_ps_append_arg * * PARAMETERS: op - Append an argument to this Op. * arg - Argument Op to append * * RETURN: None. * * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK) * ******************************************************************************/ void acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg) { union acpi_parse_object *prev_arg; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_ENTRY(); if (!op) { return; } /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode */ ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X", op->common.aml_opcode)); return; } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return; } /* Append the argument to the linked argument list */ if (op->common.value.arg) { /* Append to existing argument list */ prev_arg = op->common.value.arg; while (prev_arg->common.next) { prev_arg = prev_arg->common.next; } prev_arg->common.next = arg; } else { /* No argument list, this will be the first argument */ op->common.value.arg = arg; } /* Set the parent in this arg and any args linked after it */ while (arg) { arg->common.parent = op; arg = arg->common.next; op->common.arg_list_length++; } } #ifdef ACPI_FUTURE_USAGE /******************************************************************************* * * FUNCTION: acpi_ps_get_depth_next * * PARAMETERS: origin - Root of subtree to search * op - Last (previous) Op that was found * * RETURN: Next Op found in the search. * * DESCRIPTION: Get next op in tree (walking the tree in depth-first order) * Return NULL when reaching "origin" or when walking up from root * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, union acpi_parse_object *op) { union acpi_parse_object *next = NULL; union acpi_parse_object *parent; union acpi_parse_object *arg; ACPI_FUNCTION_ENTRY(); if (!op) { return (NULL); } /* Look for an argument or child */ next = acpi_ps_get_arg(op, 0); if (next) { return (next); } /* Look for a sibling */ next = op->common.next; if (next) { return (next); } /* Look for a sibling of parent */ parent = op->common.parent; while (parent) { arg = acpi_ps_get_arg(parent, 0); while (arg && (arg != origin) && (arg != op)) { arg = arg->common.next; } if (arg == origin) { /* Reached parent of origin, end search */ return (NULL); } if (parent->common.next) { /* Found sibling of parent */ return (parent->common.next); } op = parent; parent = parent->common.parent; } return (next); } #ifdef ACPI_OBSOLETE_FUNCTIONS /******************************************************************************* * * FUNCTION: acpi_ps_get_child * * PARAMETERS: op - Get the child of this Op * * RETURN: Child Op, Null if none is found. * * DESCRIPTION: Get op's children or NULL if none * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op) { union acpi_parse_object *child = NULL; ACPI_FUNCTION_ENTRY(); switch (op->common.aml_opcode) { case AML_SCOPE_OP: case AML_ELSE_OP: case AML_DEVICE_OP: case AML_THERMAL_ZONE_OP: case AML_INT_METHODCALL_OP: child = acpi_ps_get_arg(op, 0); break; case AML_BUFFER_OP: case AML_PACKAGE_OP: case AML_METHOD_OP: case AML_IF_OP: case AML_WHILE_OP: case AML_FIELD_OP: child = acpi_ps_get_arg(op, 1); break; case AML_POWER_RES_OP: case AML_INDEX_FIELD_OP: child = acpi_ps_get_arg(op, 2); break; case AML_PROCESSOR_OP: case AML_BANK_FIELD_OP: child = acpi_ps_get_arg(op, 3); break; default: /* All others have no children */ break; } return (child); } #endif #endif /* ACPI_FUTURE_USAGE */
gpl-2.0
TheWhisp/android_kernel_samsung_kyle
drivers/gpu/drm/drm_fops.c
1112
17080
/** * \file drm_fops.c * File operations for DRM * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Daryll Strauss <daryll@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include <linux/poll.h> #include <linux/slab.h> /* from BKL pushdown: note that nothing else serializes idr_find() */ DEFINE_MUTEX(drm_global_mutex); EXPORT_SYMBOL(drm_global_mutex); static int drm_open_helper(struct inode *inode, struct file *filp, struct drm_device * dev); static int drm_setup(struct drm_device * dev) { int i; int ret; if (dev->driver->firstopen) { ret = dev->driver->firstopen(dev); if (ret != 0) return ret; } atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) { dev->buf_use = 0; atomic_set(&dev->buf_alloc, 0); i = drm_dma_setup(dev); if (i < 0) return i; } for (i = 0; i < ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); dev->sigdata.lock = NULL; dev->queue_count = 0; dev->queue_reserved = 0; dev->queue_slots = 0; dev->queuelist = NULL; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; init_waitqueue_head(&dev->context_wait); dev->if_version = 0; dev->ctx_start = 0; dev->lck_start = 0; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); DRM_DEBUG("\n"); /* * The kernel's context could be created here, but is now created * in drm_dma_enqueue. This is more resource-efficient for * hardware that does not do DMA, but may mean that * drm_select_queue fails between the time the interrupt is * initialized and the time the queues are initialized. */ return 0; } /** * Open file. * * \param inode device inode * \param filp file pointer. * \return zero on success or a negative number on failure. * * Searches the DRM device with the same minor number, calls open_helper(), and * increments the device open count. If the open count was previous at zero, * i.e., it's the first that the device is open, then calls setup(). */ int drm_open(struct inode *inode, struct file *filp) { struct drm_device *dev = NULL; int minor_id = iminor(inode); struct drm_minor *minor; int retcode = 0; minor = idr_find(&drm_minors_idr, minor_id); if (!minor) return -ENODEV; if (!(dev = minor->dev)) return -ENODEV; retcode = drm_open_helper(inode, filp, dev); if (!retcode) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); if (!dev->open_count++) retcode = drm_setup(dev); } if (!retcode) { mutex_lock(&dev->struct_mutex); if (minor->type == DRM_MINOR_LEGACY) { if (dev->dev_mapping == NULL) dev->dev_mapping = inode->i_mapping; else if (dev->dev_mapping != inode->i_mapping) retcode = -ENODEV; } mutex_unlock(&dev->struct_mutex); } return retcode; } EXPORT_SYMBOL(drm_open); /** * File \c open operation. * * \param inode device inode. * \param filp file pointer. * * Puts the dev->fops corresponding to the device minor number into * \p filp, call the \c open method, and restore the file operations. */ int drm_stub_open(struct inode *inode, struct file *filp) { struct drm_device *dev = NULL; struct drm_minor *minor; int minor_id = iminor(inode); int err = -ENODEV; const struct file_operations *old_fops; DRM_DEBUG("\n"); mutex_lock(&drm_global_mutex); minor = idr_find(&drm_minors_idr, minor_id); if (!minor) goto out; if (!(dev = minor->dev)) goto out; old_fops = filp->f_op; filp->f_op = fops_get(&dev->driver->fops); if (filp->f_op == NULL) { filp->f_op = old_fops; goto out; } if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { fops_put(filp->f_op); filp->f_op = fops_get(old_fops); } fops_put(old_fops); out: mutex_unlock(&drm_global_mutex); return err; } /** * Check whether DRI will run on this CPU. * * \return non-zero if the DRI will run on this CPU, or zero otherwise. */ static int drm_cpu_valid(void) { #if defined(__i386__) if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */ #endif #if defined(__sparc__) && !defined(__sparc_v9__) return 0; /* No cmpxchg before v9 sparc. */ #endif return 1; } /** * Called whenever a process opens /dev/drm. * * \param inode device inode. * \param filp file pointer. * \param dev device. * \return zero on success or a negative number on failure. * * Creates and initializes a drm_file structure for the file private data in \p * filp and add it into the double linked list in \p dev. */ static int drm_open_helper(struct inode *inode, struct file *filp, struct drm_device * dev) { int minor_id = iminor(inode); struct drm_file *priv; int ret; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ if (!drm_cpu_valid()) return -EINVAL; if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; filp->private_data = priv; priv->filp = filp; priv->uid = current_euid(); priv->pid = task_pid_nr(current); priv->minor = idr_find(&drm_minors_idr, minor_id); priv->ioctl_count = 0; /* for compatibility root is always authenticated */ priv->authenticated = capable(CAP_SYS_ADMIN); priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); INIT_LIST_HEAD(&priv->fbs); INIT_LIST_HEAD(&priv->event_list); init_waitqueue_head(&priv->event_wait); priv->event_space = 4096; /* set aside 4k for event buffer */ if (dev->driver->driver_features & DRIVER_GEM) drm_gem_open(dev, priv); if (dev->driver->open) { ret = dev->driver->open(dev, priv); if (ret < 0) goto out_free; } /* if there is no current master make this fd it */ mutex_lock(&dev->struct_mutex); if (!priv->minor->master) { /* create a new master */ priv->minor->master = drm_master_create(priv->minor); if (!priv->minor->master) { mutex_unlock(&dev->struct_mutex); ret = -ENOMEM; goto out_free; } priv->is_master = 1; /* take another reference for the copy in the local file priv */ priv->master = drm_master_get(priv->minor->master); priv->authenticated = 1; mutex_unlock(&dev->struct_mutex); if (dev->driver->master_create) { ret = dev->driver->master_create(dev, priv->master); if (ret) { mutex_lock(&dev->struct_mutex); /* drop both references if this fails */ drm_master_put(&priv->minor->master); drm_master_put(&priv->master); mutex_unlock(&dev->struct_mutex); goto out_free; } } mutex_lock(&dev->struct_mutex); if (dev->driver->master_set) { ret = dev->driver->master_set(dev, priv, true); if (ret) { /* drop both references if this fails */ drm_master_put(&priv->minor->master); drm_master_put(&priv->master); mutex_unlock(&dev->struct_mutex); goto out_free; } } mutex_unlock(&dev->struct_mutex); } else { /* get a reference to the master */ priv->master = drm_master_get(priv->minor->master); mutex_unlock(&dev->struct_mutex); } mutex_lock(&dev->struct_mutex); list_add(&priv->lhead, &dev->filelist); mutex_unlock(&dev->struct_mutex); #ifdef __alpha__ /* * Default the hose */ if (!dev->hose) { struct pci_dev *pci_dev; pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); if (pci_dev) { dev->hose = pci_dev->sysdata; pci_dev_put(pci_dev); } if (!dev->hose) { struct pci_bus *b = pci_bus_b(pci_root_buses.next); if (b) dev->hose = b->sysdata; } } #endif return 0; out_free: kfree(priv); filp->private_data = NULL; return ret; } /** No-op. */ int drm_fasync(int fd, struct file *filp, int on) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)old_encode_dev(priv->minor->device)); return fasync_helper(fd, filp, on, &dev->buf_async); } EXPORT_SYMBOL(drm_fasync); /* * Reclaim locked buffers; note that this may be a bad idea if the current * context doesn't have the hw lock... */ static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) { struct drm_file *file_priv = f->private_data; if (drm_i_have_hw_lock(dev, file_priv)) { dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end = jiffies + 3 * DRM_HZ; int locked = 0; drm_idlelock_take(&file_priv->master->lock); /* * Wait for a while. */ do { spin_lock_bh(&file_priv->master->lock.spinlock); locked = file_priv->master->lock.idle_has_lock; spin_unlock_bh(&file_priv->master->lock.spinlock); if (locked) break; schedule(); } while (!time_after_eq(jiffies, _end)); if (!locked) { DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" "\tdriver to use reclaim_buffers_idlelocked() instead.\n" "\tI will go on reclaiming the buffers anyway.\n"); } dev->driver->reclaim_buffers_locked(dev, file_priv); drm_idlelock_release(&file_priv->master->lock); } } static void drm_master_release(struct drm_device *dev, struct file *filp) { struct drm_file *file_priv = filp->private_data; if (dev->driver->reclaim_buffers_locked && file_priv->master->lock.hw_lock) drm_reclaim_locked_buffers(dev, filp); if (dev->driver->reclaim_buffers_idlelocked && file_priv->master->lock.hw_lock) { drm_idlelock_take(&file_priv->master->lock); dev->driver->reclaim_buffers_idlelocked(dev, file_priv); drm_idlelock_release(&file_priv->master->lock); } if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); drm_lock_free(&file_priv->master->lock, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); } if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { dev->driver->reclaim_buffers(dev, file_priv); } } static void drm_events_release(struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; struct drm_pending_event *e, *et; struct drm_pending_vblank_event *v, *vt; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); /* Remove pending flips */ list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) if (v->base.file_priv == file_priv) { list_del(&v->base.link); drm_vblank_put(dev, v->pipe); v->base.destroy(&v->base); } /* Remove unconsumed events */ list_for_each_entry_safe(e, et, &file_priv->event_list, link) e->destroy(e); spin_unlock_irqrestore(&dev->event_lock, flags); } /** * Release file. * * \param inode device inode * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel * context since it's necessary to reclaim buffers. Unlink the file private * data from its list and free it. Decreases the open count and if it reaches * zero calls drm_lastclose(). */ int drm_release(struct inode *inode, struct file *filp) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; int retcode = 0; mutex_lock(&drm_global_mutex); DRM_DEBUG("open_count = %d\n", dev->open_count); if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", task_pid_nr(current), (long)old_encode_dev(file_priv->minor->device), dev->open_count); /* if the master has gone away we can't do anything with the lock */ if (file_priv->minor->master) drm_master_release(dev, filp); drm_events_release(file_priv); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); if (dev->driver->driver_features & DRIVER_MODESET) drm_fb_release(file_priv); mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->tag == file_priv && pos->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, pos->handle); drm_ctxbitmap_free(dev, pos->handle); list_del(&pos->head); kfree(pos); --dev->ctx_count; } } } mutex_unlock(&dev->ctxlist_mutex); mutex_lock(&dev->struct_mutex); if (file_priv->is_master) { struct drm_master *master = file_priv->master; struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) { if ((temp->master == file_priv->master) && (temp != file_priv)) temp->authenticated = 0; } /** * Since the master is disappearing, so is the * possibility to lock. */ if (master->lock.hw_lock) { if (dev->sigdata.lock == master->lock.hw_lock) dev->sigdata.lock = NULL; master->lock.hw_lock = NULL; master->lock.file_priv = NULL; wake_up_interruptible_all(&master->lock.lock_queue); } if (file_priv->minor->master == file_priv->master) { /* drop the reference held my the minor */ if (dev->driver->master_drop) dev->driver->master_drop(dev, file_priv, true); drm_master_put(&file_priv->minor->master); } } /* drop the reference held my the file priv */ drm_master_put(&file_priv->master); file_priv->is_master = 0; list_del(&file_priv->lhead); mutex_unlock(&dev->struct_mutex); if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); kfree(file_priv); /* ======================================================== * End inline drm_release */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); if (!--dev->open_count) { if (atomic_read(&dev->ioctl_count)) { DRM_ERROR("Device busy: %d\n", atomic_read(&dev->ioctl_count)); retcode = -EBUSY; } else retcode = drm_lastclose(dev); } mutex_unlock(&drm_global_mutex); return retcode; } EXPORT_SYMBOL(drm_release); static bool drm_dequeue_event(struct drm_file *file_priv, size_t total, size_t max, struct drm_pending_event **out) { struct drm_device *dev = file_priv->minor->dev; struct drm_pending_event *e; unsigned long flags; bool ret = false; spin_lock_irqsave(&dev->event_lock, flags); *out = NULL; if (list_empty(&file_priv->event_list)) goto out; e = list_first_entry(&file_priv->event_list, struct drm_pending_event, link); if (e->event->length + total > max) goto out; file_priv->event_space += e->event->length; list_del(&e->link); *out = e; ret = true; out: spin_unlock_irqrestore(&dev->event_lock, flags); return ret; } ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset) { struct drm_file *file_priv = filp->private_data; struct drm_pending_event *e; size_t total; ssize_t ret; ret = wait_event_interruptible(file_priv->event_wait, !list_empty(&file_priv->event_list)); if (ret < 0) return ret; total = 0; while (drm_dequeue_event(file_priv, total, count, &e)) { if (copy_to_user(buffer + total, e->event, e->event->length)) { total = -EFAULT; break; } total += e->event->length; e->destroy(e); } return total; } EXPORT_SYMBOL(drm_read); unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; unsigned int mask = 0; poll_wait(filp, &file_priv->event_wait, wait); if (!list_empty(&file_priv->event_list)) mask |= POLLIN | POLLRDNORM; return mask; } EXPORT_SYMBOL(drm_poll);
gpl-2.0
pinpong/enigma-i9100
arch/h8300/kernel/timer/timer8.c
1368
1928
/* * linux/arch/h8300/kernel/cpu/timer/timer8.c * * Yoshinori Sato <ysato@users.sourcefoge.jp> * * 8bit Timer Handler * */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/profile.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/timer.h> #if defined(CONFIG_CPU_H8300H) #include <asm/regs306x.h> #endif #if defined(CONFIG_CPU_H8S) #include <asm/regs267x.h> #endif /* 8bit timer x2 */ #define CMFA 6 #if defined(CONFIG_H8300_TIMER8_CH0) #define _8BASE _8TCR0 #ifdef CONFIG_CPU_H8300H #define _8IRQ 36 #endif #ifdef CONFIG_CPU_H8S #define _8IRQ 72 #endif #elif defined(CONFIG_H8300_TIMER8_CH2) #ifdef CONFIG_CPU_H8300H #define _8BASE _8TCR2 #define _8IRQ 40 #endif #endif #ifndef _8BASE #error Unknown timer channel. #endif #define _8TCR 0 #define _8TCSR 2 #define TCORA 4 #define TCORB 6 #define _8TCNT 8 #define CMIEA 0x40 #define CCLR_CMA 0x08 #define CKS2 0x04 /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dev_id) { h8300_timer_tick(); ctrl_bclr(CMFA, _8BASE + _8TCSR); return IRQ_HANDLED; } static struct irqaction timer8_irq = { .name = "timer-8", .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, }; static const int __initdata divide_rate[] = {8, 64, 8192}; void __init h8300_timer_setup(void) { unsigned int div; unsigned int cnt; calc_param(cnt, div, divide_rate, 0x10000); div++; setup_irq(_8IRQ, &timer8_irq); #if defined(CONFIG_CPU_H8S) /* Timer module enable */ ctrl_bclr(0, MSTPCRL) #endif /* initalize timer */ ctrl_outw(cnt, _8BASE + TCORA); ctrl_outw(0x0000, _8BASE + _8TCSR); ctrl_outw((CMIEA|CCLR_CMA|CKS2) << 8 | div, _8BASE + _8TCR); }
gpl-2.0
rodrigues-daniel/linux
arch/powerpc/platforms/85xx/ge_imp3a.c
1624
5193
/* * GE IMP3A Board Setup * * Author Martyn Welch <martyn.welch@ge.com> * * Copyright 2010 GE Intelligent Platforms Embedded Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Based on: mpc85xx_ds.c (MPC85xx DS Board Setup) * Copyright 2007 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <asm/nvram.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" #include <sysdev/ge/ge_pic.h> void __iomem *imp3a_regs; void __init ge_imp3a_pic_init(void) { struct mpic *mpic; struct device_node *np; struct device_node *cascade_node = NULL; unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) { mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET | MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } else { mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } BUG_ON(mpic == NULL); mpic_init(mpic); /* * There is a simple interrupt handler in the main FPGA, this needs * to be cascaded into the MPIC */ for_each_node_by_type(np, "interrupt-controller") if (of_device_is_compatible(np, "gef,fpga-pic-1.00")) { cascade_node = np; break; } if (cascade_node == NULL) { printk(KERN_WARNING "IMP3A: No FPGA PIC\n"); return; } gef_pic_init(cascade_node); of_node_put(cascade_node); } static void ge_imp3a_pci_assign_primary(void) { #ifdef CONFIG_PCI struct device_node *np; struct resource rsrc; for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie") || of_device_is_compatible(np, "fsl,p2020-pcie")) { of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x9000) fsl_pci_primary = np; } } #endif } /* * Setup the architecture */ static void __init ge_imp3a_setup_arch(void) { struct device_node *regs; if (ppc_md.progress) ppc_md.progress("ge_imp3a_setup_arch()", 0); mpc85xx_smp_init(); ge_imp3a_pci_assign_primary(); swiotlb_detect_4g(); /* Remap basic board registers */ regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs"); if (regs) { imp3a_regs = of_iomap(regs, 0); if (imp3a_regs == NULL) printk(KERN_WARNING "Unable to map board registers\n"); of_node_put(regs); } #if defined(CONFIG_MMIO_NVRAM) mmio_nvram_init(); #endif printk(KERN_INFO "GE Intelligent Platforms IMP3A 3U cPCI SBC\n"); } /* Return the PCB revision */ static unsigned int ge_imp3a_get_pcb_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs); return (reg >> 8) & 0xff; } /* Return the board (software) revision */ static unsigned int ge_imp3a_get_board_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x2); return reg & 0xff; } /* Return the FPGA revision */ static unsigned int ge_imp3a_get_fpga_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x2); return (reg >> 8) & 0xff; } /* Return compactPCI Geographical Address */ static unsigned int ge_imp3a_get_cpci_geo_addr(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x6); return (reg & 0x0f00) >> 8; } /* Return compactPCI System Controller Status */ static unsigned int ge_imp3a_get_cpci_is_syscon(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x6); return reg & (1 << 12); } static void ge_imp3a_show_cpuinfo(struct seq_file *m) { seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); seq_printf(m, "Revision\t: %u%c\n", ge_imp3a_get_pcb_rev(), ('A' + ge_imp3a_get_board_rev() - 1)); seq_printf(m, "FPGA Revision\t: %u\n", ge_imp3a_get_fpga_rev()); seq_printf(m, "cPCI geo. addr\t: %u\n", ge_imp3a_get_cpci_geo_addr()); seq_printf(m, "cPCI syscon\t: %s\n", ge_imp3a_get_cpci_is_syscon() ? "yes" : "no"); } /* * Called very early, device-tree isn't unflattened */ static int __init ge_imp3a_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "ge,IMP3A"); } machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices); machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier); define_machine(ge_imp3a) { .name = "GE_IMP3A", .probe = ge_imp3a_probe, .setup_arch = ge_imp3a_setup_arch, .init_IRQ = ge_imp3a_pic_init, .show_cpuinfo = ge_imp3a_show_cpuinfo, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Cardinal97/android_kernel_msm8939
arch/powerpc/kernel/signal.c
2136
5831
/* * Common signal handling code for both 32 and 64 bits * * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration * Extracted from signal_32.c and signal_64.c * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/tracehook.h> #include <linux/signal.h> #include <linux/uprobes.h> #include <linux/key.h> #include <linux/context_tracking.h> #include <asm/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/debug.h> #include <asm/tm.h> #include "signal.h" /* Log an error when sending an unhandled signal to a process. Controlled * through debug.exception-trace sysctl. */ int show_unhandled_signals = 1; /* * Allocate space for the signal frame */ void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size, int is_32) { unsigned long oldsp, newsp; /* Default to using normal stack */ oldsp = get_clean_sp(sp, is_32); /* Check for alt stack */ if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size && !on_sig_stack(oldsp)) oldsp = (current->sas_ss_sp + current->sas_ss_size); /* Get aligned frame */ newsp = (oldsp - frame_size) & ~0xFUL; /* Check access */ if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) return NULL; return (void __user *)newsp; } static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { unsigned long ret = regs->gpr[3]; int restart = 1; /* syscall ? */ if (TRAP(regs) != 0x0C00) return; /* error signalled ? */ if (!(regs->ccr & 0x10000000)) return; switch (ret) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: /* ERESTARTNOHAND means that the syscall should only be * restarted if there was no handler for the signal, and since * we only get here if there is a handler, we dont restart. */ restart = !has_handler; break; case ERESTARTSYS: /* ERESTARTSYS means to restart the syscall if there is no * handler or the handler was registered with SA_RESTART */ restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; break; case ERESTARTNOINTR: /* ERESTARTNOINTR means that the syscall should be * called again after the signal handler returns. */ break; default: return; } if (restart) { if (ret == ERESTART_RESTARTBLOCK) regs->gpr[0] = __NR_restart_syscall; else regs->gpr[3] = regs->orig_gpr3; regs->nip -= 4; regs->result = 0; } else { regs->result = -EINTR; regs->gpr[3] = EINTR; regs->ccr |= 0x10000000; } } static int do_signal(struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); siginfo_t info; int signr; struct k_sigaction ka; int ret; int is32 = is_32bit_task(); signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* Is there any syscall restart business here ? */ check_syscall_restart(regs, &ka, signr > 0); if (signr <= 0) { /* No signal to deliver -- put the saved sigmask back */ restore_saved_sigmask(); regs->trap = 0; return 0; /* no signals delivered */ } #ifndef CONFIG_PPC_ADV_DEBUG_REGS /* * Reenable the DABR before delivering the signal to * user space. The DABR will have been cleared if it * triggered inside the kernel. */ if (current->thread.hw_brk.address && current->thread.hw_brk.type) set_breakpoint(&current->thread.hw_brk); #endif /* Re-enable the breakpoints for the signal stack */ thread_change_pc(current, regs); if (is32) { if (ka.sa.sa_flags & SA_SIGINFO) ret = handle_rt_signal32(signr, &ka, &info, oldset, regs); else ret = handle_signal32(signr, &ka, &info, oldset, regs); } else { ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); } regs->trap = 0; if (ret) { signal_delivered(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } return ret; } void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { user_exit(); if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } user_enter(); } unsigned long get_tm_stackpointer(struct pt_regs *regs) { /* When in an active transaction that takes a signal, we need to be * careful with the stack. It's possible that the stack has moved back * up after the tbegin. The obvious case here is when the tbegin is * called inside a function that returns before a tend. In this case, * the stack is part of the checkpointed transactional memory state. * If we write over this non transactionally or in suspend, we are in * trouble because if we get a tm abort, the program counter and stack * pointer will be back at the tbegin but our in memory stack won't be * valid anymore. * * To avoid this, when taking a signal in an active transaction, we * need to use the stack pointer from the checkpointed state, rather * than the speculated state. This ensures that the signal context * (written tm suspended) will be written below the stack required for * the rollback. The transaction is aborted becuase of the treclaim, * so any memory written between the tbegin and the signal will be * rolled back anyway. * * For signals taken in non-TM or suspended mode, we use the * normal/non-checkpointed stack pointer. */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { tm_enable(); tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(regs->msr)) return current->thread.ckpt_regs.gpr[1]; } #endif return regs->gpr[1]; }
gpl-2.0
maikelwever/android_kernel_htc_msm8660-caf
tools/perf/builtin-stat.c
2392
32767
/* * builtin-stat.c * * Builtin stat command: Give a precise performance counters summary * overview about any workload, CPU or specific PID. * * Sample output: $ perf stat ./hackbench 10 Time: 0.118 Performance counter stats for './hackbench 10': 1708.761321 task-clock # 11.037 CPUs utilized 41,190 context-switches # 0.024 M/sec 6,735 CPU-migrations # 0.004 M/sec 17,318 page-faults # 0.010 M/sec 5,205,202,243 cycles # 3.046 GHz 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 2,603,501,247 instructions # 0.50 insns per cycle # 1.48 stalled cycles per insn 484,357,498 branches # 283.455 M/sec 6,388,934 branch-misses # 1.32% of all branches 0.154822978 seconds time elapsed * * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> * * Improvements and fixes by: * * Arjan van de Ven <arjan@linux.intel.com> * Yanmin Zhang <yanmin.zhang@intel.com> * Wu Fengguang <fengguang.wu@intel.com> * Mike Galbraith <efault@gmx.de> * Paul Mackerras <paulus@samba.org> * Jaswinder Singh Rajput <jaswinder@kernel.org> * * Released under the GPL v2. (and only v2, not any later version) */ #include "perf.h" #include "builtin.h" #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" #include "util/event.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/debug.h" #include "util/color.h" #include "util/header.h" #include "util/cpumap.h" #include "util/thread.h" #include "util/thread_map.h" #include <sys/prctl.h> #include <math.h> #include <locale.h> #define DEFAULT_SEPARATOR " " static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; /* * Detailed stats (-d), covering the L1 and last level data caches: */ static struct perf_event_attr detailed_attrs[] = { { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1D << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1D << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_LL << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_LL << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; /* * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: */ static struct perf_event_attr very_detailed_attrs[] = { { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1I << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1I << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_DTLB << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_DTLB << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_ITLB << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_ITLB << 0 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; /* * Very, very detailed stats (-d -d -d), adding prefetch events: */ static struct perf_event_attr very_very_detailed_attrs[] = { { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1D << 0 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1D << 0 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; struct perf_evlist *evsel_list; static bool system_wide = false; static int run_idx = 0; static int run_count = 1; static bool no_inherit = false; static bool scale = true; static bool no_aggr = false; static pid_t target_pid = -1; static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; static int detailed_run = 0; static bool sync_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *cpu_list; static const char *csv_sep = NULL; static bool csv_output = false; static volatile int done = 0; struct stats { double n, mean, M2; }; struct perf_stat { struct stats res_stats[3]; }; static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) { evsel->priv = zalloc(sizeof(struct perf_stat)); return evsel->priv == NULL ? -ENOMEM : 0; } static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) { free(evsel->priv); evsel->priv = NULL; } static void update_stats(struct stats *stats, u64 val) { double delta; stats->n++; delta = val - stats->mean; stats->mean += delta / stats->n; stats->M2 += delta*(val - stats->mean); } static double avg_stats(struct stats *stats) { return stats->mean; } /* * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance * * (\Sum n_i^2) - ((\Sum n_i)^2)/n * s^2 = ------------------------------- * n - 1 * * http://en.wikipedia.org/wiki/Stddev * * The std dev of the mean is related to the std dev by: * * s * s_mean = ------- * sqrt(n) * */ static double stddev_stats(struct stats *stats) { double variance = stats->M2 / (stats->n - 1); double variance_mean = variance / stats->n; return sqrt(variance_mean); } struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; if (scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; attr->inherit = !no_inherit; if (system_wide) return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false); if (target_pid == -1 && target_tid == -1) { attr->disabled = 1; attr->enable_on_exec = 1; } return perf_evsel__open_per_thread(evsel, evsel_list->threads, false); } /* * Does the counter have nsecs as a unit? */ static inline int nsec_counter(struct perf_evsel *evsel) { if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) return 1; return 0; } /* * Update various tracking values we maintain to print * more semantic information such as miss/hit ratios, * instruction rates, etc: */ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) { if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) update_stats(&runtime_nsecs_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) update_stats(&runtime_branches_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) update_stats(&runtime_cacherefs_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) update_stats(&runtime_l1_dcache_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) update_stats(&runtime_l1_icache_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) update_stats(&runtime_ll_cache_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) update_stats(&runtime_dtlb_cache_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) update_stats(&runtime_itlb_cache_stats[0], count[0]); } /* * Read out the results of a single counter: * aggregate counts across CPUs in system-wide mode */ static int read_counter_aggr(struct perf_evsel *counter) { struct perf_stat *ps = counter->priv; u64 *count = counter->counts->aggr.values; int i; if (__perf_evsel__read(counter, evsel_list->cpus->nr, evsel_list->threads->nr, scale) < 0) return -1; for (i = 0; i < 3; i++) update_stats(&ps->res_stats[i], count[i]); if (verbose) { fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", event_name(counter), count[0], count[1], count[2]); } /* * Save the full runtime - to allow normalization during printout: */ update_shadow_stats(counter, count); return 0; } /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ static int read_counter(struct perf_evsel *counter) { u64 *count; int cpu; for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) return -1; count = counter->counts->cpu[cpu].values; update_shadow_stats(counter, count); } return 0; } static int run_perf_stat(int argc __used, const char **argv) { unsigned long long t0, t1; struct perf_evsel *counter; int status = 0; int child_ready_pipe[2], go_pipe[2]; const bool forks = (argc > 0); char buf; if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { perror("failed to create pipes"); exit(1); } if (forks) { if ((child_pid = fork()) < 0) perror("failed to fork"); if (!child_pid) { close(child_ready_pipe[0]); close(go_pipe[1]); fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); /* * Do a dummy execvp to get the PLT entry resolved, * so we avoid the resolver overhead on the real * execvp call. */ execvp("", (char **)argv); /* * Tell the parent we're ready to go */ close(child_ready_pipe[1]); /* * Wait until the parent tells us to go. */ if (read(go_pipe[0], &buf, 1) == -1) perror("unable to read pipe"); execvp(argv[0], (char **)argv); perror(argv[0]); exit(-1); } if (target_tid == -1 && target_pid == -1 && !system_wide) evsel_list->threads->map[0] = child_pid; /* * Wait for the child to be ready to exec. */ close(child_ready_pipe[1]); close(go_pipe[0]); if (read(child_ready_pipe[0], &buf, 1) == -1) perror("unable to read pipe"); close(child_ready_pipe[0]); } list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { if (verbose) ui__warning("%s event is not supported by the kernel.\n", event_name(counter)); continue; } if (errno == EPERM || errno == EACCES) { error("You may not have permission to collect %sstats.\n" "\t Consider tweaking" " /proc/sys/kernel/perf_event_paranoid or running as root.", system_wide ? "system-wide " : ""); } else { error("open_counter returned with %d (%s). " "/bin/dmesg may provide additional information.\n", errno, strerror(errno)); } if (child_pid != -1) kill(child_pid, SIGTERM); die("Not all events could be opened.\n"); return -1; } } if (perf_evlist__set_filters(evsel_list)) { error("failed to set filter with %d (%s)\n", errno, strerror(errno)); return -1; } /* * Enable counters and exec the command: */ t0 = rdclock(); if (forks) { close(go_pipe[1]); wait(&status); } else { while(!done) sleep(1); } t1 = rdclock(); update_stats(&walltime_nsecs_stats, t1 - t0); if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) { read_counter(counter); perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1); } } else { list_for_each_entry(counter, &evsel_list->entries, node) { read_counter_aggr(counter); perf_evsel__close_fd(counter, evsel_list->cpus->nr, evsel_list->threads->nr); } } return WEXITSTATUS(status); } static void print_noise_pct(double total, double avg) { double pct = 0.0; if (avg) pct = 100.0*total/avg; fprintf(stderr, " ( +-%6.2f%% )", pct); } static void print_noise(struct perf_evsel *evsel, double avg) { struct perf_stat *ps; if (run_count == 1) return; ps = evsel->priv; print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); } static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) { double msecs = avg / 1e6; char cpustr[16] = { '\0', }; const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, evsel_list->cpus->map[cpu], csv_sep); fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel)); if (evsel->cgrp) fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name); if (csv_output) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); } static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_cycles_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 50.0) color = PERF_COLOR_RED; else if (ratio > 30.0) color = PERF_COLOR_MAGENTA; else if (ratio > 10.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " frontend cycles idle "); } static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_cycles_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 75.0) color = PERF_COLOR_RED; else if (ratio > 50.0) color = PERF_COLOR_MAGENTA; else if (ratio > 20.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " backend cycles idle "); } static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_branches_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all branches "); } static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_l1_dcache_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all L1-dcache hits "); } static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_l1_icache_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all L1-icache hits "); } static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_dtlb_cache_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all dTLB cache hits "); } static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_itlb_cache_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all iTLB cache hits "); } static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; total = avg_stats(&runtime_ll_cache_stats[cpu]); if (total) ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; if (ratio > 20.0) color = PERF_COLOR_RED; else if (ratio > 10.0) color = PERF_COLOR_MAGENTA; else if (ratio > 5.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all LL-cache hits "); } static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; char cpustr[16] = { '\0', }; const char *fmt; if (csv_output) fmt = "%s%.0f%s%s"; else if (big_num) fmt = "%s%'18.0f%s%-25s"; else fmt = "%s%18.0f%s%-25s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, evsel_list->cpus->map[cpu], csv_sep); else cpu = 0; fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel)); if (evsel->cgrp) fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name); if (csv_output) return; if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { total = avg_stats(&runtime_cycles_stats[cpu]); if (total) ratio = avg / total; fprintf(stderr, " # %5.2f insns per cycle ", ratio); total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); if (total && avg) { ratio = total / avg; fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { print_branch_misses(cpu, evsel, avg); } else if ( evsel->attr.type == PERF_TYPE_HW_CACHE && evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_l1_dcache_stats[cpu].n != 0) { print_l1_dcache_misses(cpu, evsel, avg); } else if ( evsel->attr.type == PERF_TYPE_HW_CACHE && evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_l1_icache_stats[cpu].n != 0) { print_l1_icache_misses(cpu, evsel, avg); } else if ( evsel->attr.type == PERF_TYPE_HW_CACHE && evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_dtlb_cache_stats[cpu].n != 0) { print_dtlb_cache_misses(cpu, evsel, avg); } else if ( evsel->attr.type == PERF_TYPE_HW_CACHE && evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_itlb_cache_stats[cpu].n != 0) { print_itlb_cache_misses(cpu, evsel, avg); } else if ( evsel->attr.type == PERF_TYPE_HW_CACHE && evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_ll_cache_stats[cpu].n != 0) { print_ll_cache_misses(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { total = avg_stats(&runtime_cacherefs_stats[cpu]); if (total) ratio = avg * 100 / total; fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { print_stalled_cycles_frontend(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { print_stalled_cycles_backend(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) ratio = 1.0 * avg / total; fprintf(stderr, " # %8.3f GHz ", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) ratio = 1000.0 * avg / total; fprintf(stderr, " # %8.3f M/sec ", ratio); } else { fprintf(stderr, " "); } } /* * Print out the results of a single counter: * aggregated counts in system-wide mode */ static void print_counter_aggr(struct perf_evsel *counter) { struct perf_stat *ps = counter->priv; double avg = avg_stats(&ps->res_stats[0]); int scaled = counter->counts->scaled; if (scaled == -1) { fprintf(stderr, "%*s%s%*s", csv_output ? 0 : 18, "<not counted>", csv_sep, csv_output ? 0 : -24, event_name(counter)); if (counter->cgrp) fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name); fputc('\n', stderr); return; } if (nsec_counter(counter)) nsec_printout(-1, counter, avg); else abs_printout(-1, counter, avg); if (csv_output) { fputc('\n', stderr); return; } print_noise(counter, avg); if (scaled) { double avg_enabled, avg_running; avg_enabled = avg_stats(&ps->res_stats[1]); avg_running = avg_stats(&ps->res_stats[2]); fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled); } fprintf(stderr, "\n"); } /* * Print out the results of a single counter: * does not use aggregated count in system-wide */ static void print_counter(struct perf_evsel *counter) { u64 ena, run, val; int cpu; for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { val = counter->counts->cpu[cpu].val; ena = counter->counts->cpu[cpu].ena; run = counter->counts->cpu[cpu].run; if (run == 0 || ena == 0) { fprintf(stderr, "CPU%*d%s%*s%s%*s", csv_output ? 0 : -4, evsel_list->cpus->map[cpu], csv_sep, csv_output ? 0 : 18, "<not counted>", csv_sep, csv_output ? 0 : -24, event_name(counter)); if (counter->cgrp) fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name); fputc('\n', stderr); continue; } if (nsec_counter(counter)) nsec_printout(cpu, counter, val); else abs_printout(cpu, counter, val); if (!csv_output) { print_noise(counter, 1.0); if (run != ena) fprintf(stderr, " (%.2f%%)", 100.0 * run / ena); } fputc('\n', stderr); } } static void print_stat(int argc, const char **argv) { struct perf_evsel *counter; int i; fflush(stdout); if (!csv_output) { fprintf(stderr, "\n"); fprintf(stderr, " Performance counter stats for "); if(target_pid == -1 && target_tid == -1) { fprintf(stderr, "\'%s", argv[0]); for (i = 1; i < argc; i++) fprintf(stderr, " %s", argv[i]); } else if (target_pid != -1) fprintf(stderr, "process id \'%d", target_pid); else fprintf(stderr, "thread id \'%d", target_tid); fprintf(stderr, "\'"); if (run_count > 1) fprintf(stderr, " (%d runs)", run_count); fprintf(stderr, ":\n\n"); } if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) print_counter(counter); } else { list_for_each_entry(counter, &evsel_list->entries, node) print_counter_aggr(counter); } if (!csv_output) { if (!null_run) fprintf(stderr, "\n"); fprintf(stderr, " %17.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { fprintf(stderr, " "); print_noise_pct(stddev_stats(&walltime_nsecs_stats), avg_stats(&walltime_nsecs_stats)); } fprintf(stderr, "\n\n"); } } static volatile int signr = -1; static void skip_signal(int signo) { if(child_pid == -1) done = 1; signr = signo; } static void sig_atexit(void) { if (child_pid != -1) kill(child_pid, SIGTERM); if (signr == -1) return; signal(signr, SIG_DFL); kill(getpid(), signr); } static const char * const stat_usage[] = { "perf stat [<options>] [<command>]", NULL }; static int stat__set_big_num(const struct option *opt __used, const char *s __used, int unset) { big_num_opt = unset ? 0 : 1; return 0; } static const struct option options[] = { OPT_CALLBACK('e', "event", &evsel_list, "event", "event selector. use 'perf list' to list available events", parse_events), OPT_CALLBACK(0, "filter", &evsel_list, "filter", "event filter", parse_filter), OPT_BOOLEAN('i', "no-inherit", &no_inherit, "child tasks do not inherit counters"), OPT_INTEGER('p', "pid", &target_pid, "stat events on existing process id"), OPT_INTEGER('t', "tid", &target_tid, "stat events on existing thread id"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_INTEGER('r', "repeat", &run_count, "repeat command and print average + stddev (max: 100)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), OPT_INCR('d', "detailed", &detailed_run, "detailed run - start a lot of events"), OPT_BOOLEAN('S', "sync", &sync_run, "call sync() before starting a run"), OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to monitor in system-wide"), OPT_BOOLEAN('A', "no-aggr", &no_aggr, "disable CPU count aggregation"), OPT_STRING('x', "field-separator", &csv_sep, "separator", "print counts with custom separator"), OPT_CALLBACK('G', "cgroup", &evsel_list, "name", "monitor event in cgroup name only", parse_cgroups), OPT_END() }; /* * Add default attributes, if there were no attributes specified or * if -d/--detailed, -d -d or -d -d -d is used: */ static int add_default_attributes(void) { struct perf_evsel *pos; size_t attr_nr = 0; size_t c; /* Set attrs if no event is selected and !null_run: */ if (null_run) return 0; if (!evsel_list->nr_entries) { for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { pos = perf_evsel__new(default_attrs + c, c + attr_nr); if (pos == NULL) return -1; perf_evlist__add(evsel_list, pos); } attr_nr += c; } /* Detailed events get appended to the event list: */ if (detailed_run < 1) return 0; /* Append detailed run extra attributes: */ for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); if (pos == NULL) return -1; perf_evlist__add(evsel_list, pos); } attr_nr += c; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); if (pos == NULL) return -1; perf_evlist__add(evsel_list, pos); } if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); if (pos == NULL) return -1; perf_evlist__add(evsel_list, pos); } return 0; } int cmd_stat(int argc, const char **argv, const char *prefix __used) { struct perf_evsel *pos; int status = -ENOMEM; setlocale(LC_ALL, ""); evsel_list = perf_evlist__new(NULL, NULL); if (evsel_list == NULL) return -ENOMEM; argc = parse_options(argc, argv, options, stat_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (csv_sep) csv_output = true; else csv_sep = DEFAULT_SEPARATOR; /* * let the spreadsheet do the pretty-printing */ if (csv_output) { /* User explicitely passed -B? */ if (big_num_opt == 1) { fprintf(stderr, "-B option not supported with -x\n"); usage_with_options(stat_usage, options); } else /* Nope, so disable big number formatting */ big_num = false; } else if (big_num_opt == 0) /* User passed --no-big-num */ big_num = false; if (!argc && target_pid == -1 && target_tid == -1) usage_with_options(stat_usage, options); if (run_count <= 0) usage_with_options(stat_usage, options); /* no_aggr, cgroup are for system-wide only */ if ((no_aggr || nr_cgroups) && !system_wide) { fprintf(stderr, "both cgroup and no-aggregation " "modes only available in system-wide mode\n"); usage_with_options(stat_usage, options); } if (add_default_attributes()) goto out; if (target_pid != -1) target_tid = target_pid; evsel_list->threads = thread_map__new(target_pid, target_tid); if (evsel_list->threads == NULL) { pr_err("Problems finding threads of monitor\n"); usage_with_options(stat_usage, options); } if (system_wide) evsel_list->cpus = cpu_map__new(cpu_list); else evsel_list->cpus = cpu_map__dummy_new(); if (evsel_list->cpus == NULL) { perror("failed to parse CPUs map"); usage_with_options(stat_usage, options); return -1; } list_for_each_entry(pos, &evsel_list->entries, node) { if (perf_evsel__alloc_stat_priv(pos) < 0 || perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 || perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0) goto out_free_fd; } /* * We dont want to block the signals - that would cause * child tasks to inherit that and Ctrl-C would not work. * What we want is for Ctrl-C to work in the exec()-ed * task, but being ignored by perf stat itself: */ atexit(sig_atexit); signal(SIGINT, skip_signal); signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); status = 0; for (run_idx = 0; run_idx < run_count; run_idx++) { if (run_count != 1 && verbose) fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); if (sync_run) sync(); status = run_perf_stat(argc, argv); } if (status != -1) print_stat(argc, argv); out_free_fd: list_for_each_entry(pos, &evsel_list->entries, node) perf_evsel__free_stat_priv(pos); perf_evlist__delete_maps(evsel_list); out: perf_evlist__delete(evsel_list); return status; }
gpl-2.0
cholokei/msm8660_test_kernel-1
arch/powerpc/platforms/ps3/mm.c
3672
32570
/* * PS3 address space management. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/memory_hotplug.h> #include <linux/memblock.h> #include <linux/slab.h> #include <asm/cell-regs.h> #include <asm/firmware.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include "platform.h" #if defined(DEBUG) #define DBG udbg_printf #else #define DBG pr_devel #endif enum { #if defined(CONFIG_PS3_DYNAMIC_DMA) USE_DYNAMIC_DMA = 1, #else USE_DYNAMIC_DMA = 0, #endif }; enum { PAGE_SHIFT_4K = 12U, PAGE_SHIFT_64K = 16U, PAGE_SHIFT_16M = 24U, }; static unsigned long make_page_sizes(unsigned long a, unsigned long b) { return (a << 56) | (b << 48); } enum { ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, ALLOCATE_MEMORY_ADDR_ZERO = 0X08, }; /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ enum { HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ }; /*============================================================================*/ /* virtual address space routines */ /*============================================================================*/ /** * struct mem_region - memory region structure * @base: base address * @size: size in bytes * @offset: difference between base and rm.size */ struct mem_region { u64 base; u64 size; unsigned long offset; }; /** * struct map - address space state variables holder * @total: total memory available as reported by HV * @vas_id - HV virtual address space id * @htab_size: htab size in bytes * * The HV virtual address space (vas) allows for hotplug memory regions. * Memory regions can be created and destroyed in the vas at runtime. * @rm: real mode (bootmem) region * @r1: hotplug memory region(s) * * ps3 addresses * virt_addr: a cpu 'translated' effective address * phys_addr: an address in what Linux thinks is the physical address space * lpar_addr: an address in the HV virtual address space * bus_addr: an io controller 'translated' address on a device bus */ struct map { u64 total; u64 vas_id; u64 htab_size; struct mem_region rm; struct mem_region r1; }; #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) static void __maybe_unused _debug_dump_map(const struct map *m, const char *func, int line) { DBG("%s:%d: map.total = %llxh\n", func, line, m->total); DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size); DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id); DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size); DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base); DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size); } static struct map map; /** * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address * @phys_addr: linux physical address */ unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) { BUG_ON(is_kernel_addr(phys_addr)); return (phys_addr < map.rm.size || phys_addr >= map.total) ? phys_addr : phys_addr + map.r1.offset; } EXPORT_SYMBOL(ps3_mm_phys_to_lpar); /** * ps3_mm_vas_create - create the virtual address space */ void __init ps3_mm_vas_create(unsigned long* htab_size) { int result; u64 start_address; u64 size; u64 access_right; u64 max_page_size; u64 flags; result = lv1_query_logical_partition_address_region_info(0, &start_address, &size, &access_right, &max_page_size, &flags); if (result) { DBG("%s:%d: lv1_query_logical_partition_address_region_info " "failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } if (max_page_size < PAGE_SHIFT_16M) { DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__, max_page_size); goto fail; } BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), &map.vas_id, &map.htab_size); if (result) { DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } result = lv1_select_virtual_address_space(map.vas_id); if (result) { DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } *htab_size = map.htab_size; debug_dump_map(&map); return; fail: panic("ps3_mm_vas_create failed"); } /** * ps3_mm_vas_destroy - */ void ps3_mm_vas_destroy(void) { int result; DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); if (map.vas_id) { result = lv1_select_virtual_address_space(0); BUG_ON(result); result = lv1_destruct_virtual_address_space(map.vas_id); BUG_ON(result); map.vas_id = 0; } } /*============================================================================*/ /* memory hotplug routines */ /*============================================================================*/ /** * ps3_mm_region_create - create a memory region in the vas * @r: pointer to a struct mem_region to accept initialized values * @size: requested region size * * This implementation creates the region with the vas large page size. * @size is rounded down to a multiple of the vas large page size. */ static int ps3_mm_region_create(struct mem_region *r, unsigned long size) { int result; u64 muid; r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__, size - r->size, (size - r->size) / 1024 / 1024); if (r->size == 0) { DBG("%s:%d: size == 0\n", __func__, __LINE__); result = -1; goto zero_region; } result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); if (result || r->base < map.rm.size) { DBG("%s:%d: lv1_allocate_memory failed: %s\n", __func__, __LINE__, ps3_result(result)); goto zero_region; } r->offset = r->base - map.rm.size; return result; zero_region: r->size = r->base = r->offset = 0; return result; } /** * ps3_mm_region_destroy - destroy a memory region * @r: pointer to struct mem_region */ static void ps3_mm_region_destroy(struct mem_region *r) { int result; DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); if (r->base) { result = lv1_release_memory(r->base); BUG_ON(result); r->size = r->base = r->offset = 0; map.total = map.rm.size; } } /** * ps3_mm_add_memory - hot add memory */ static int __init ps3_mm_add_memory(void) { int result; unsigned long start_addr; unsigned long start_pfn; unsigned long nr_pages; if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; BUG_ON(!mem_init_done); start_addr = map.rm.size; start_pfn = start_addr >> PAGE_SHIFT; nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", __func__, __LINE__, start_addr, start_pfn, nr_pages); result = add_memory(0, start_addr, map.r1.size); if (result) { pr_err("%s:%d: add_memory failed: (%d)\n", __func__, __LINE__, result); return result; } memblock_add(start_addr, map.r1.size); memblock_analyze(); result = online_pages(start_pfn, nr_pages); if (result) pr_err("%s:%d: online_pages failed: (%d)\n", __func__, __LINE__, result); return result; } device_initcall(ps3_mm_add_memory); /*============================================================================*/ /* dma routines */ /*============================================================================*/ /** * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address. * @r: pointer to dma region structure * @lpar_addr: HV lpar address */ static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r, unsigned long lpar_addr) { if (lpar_addr >= map.rm.size) lpar_addr -= map.r1.offset; BUG_ON(lpar_addr < r->offset); BUG_ON(lpar_addr >= r->offset + r->len); return r->bus_addr + lpar_addr - r->offset; } #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, const char *func, int line) { DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, r->dev->dev_id); DBG("%s:%d: page_size %u\n", func, line, r->page_size); DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); DBG("%s:%d: len %lxh\n", func, line, r->len); DBG("%s:%d: offset %lxh\n", func, line, r->offset); } /** * dma_chunk - A chunk of dma pages mapped by the io controller. * @region - The dma region that owns this chunk. * @lpar_addr: Starting lpar address of the area to map. * @bus_addr: Starting ioc bus address of the area to map. * @len: Length in bytes of the area to map. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the * list of all chuncks owned by the region. * * This implementation uses a very simple dma page manager * based on the dma_chunk structure. This scheme assumes * that all drivers use very well behaved dma ops. */ struct dma_chunk { struct ps3_dma_region *region; unsigned long lpar_addr; unsigned long bus_addr; unsigned long len; struct list_head link; unsigned int usage_count; }; #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, int line) { DBG("%s:%d: r.dev %llu:%llu\n", func, line, c->region->dev->bus_id, c->region->dev->dev_id); DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); DBG("%s:%d: c.len %lxh\n", func, line, c->len); } static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, unsigned long bus_addr, unsigned long len) { struct dma_chunk *c; unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { /* intersection */ if (aligned_bus >= c->bus_addr && aligned_bus + aligned_len <= c->bus_addr + c->len) return c; /* below */ if (aligned_bus + aligned_len <= c->bus_addr) continue; /* above */ if (aligned_bus >= c->bus_addr + c->len) continue; /* we don't handle the multi-chunk case for now */ dma_dump_chunk(c); BUG(); } return NULL; } static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, unsigned long lpar_addr, unsigned long len) { struct dma_chunk *c; unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { /* intersection */ if (c->lpar_addr <= aligned_lpar && aligned_lpar < c->lpar_addr + c->len) { if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) return c; else { dma_dump_chunk(c); BUG(); } } /* below */ if (aligned_lpar + aligned_len <= c->lpar_addr) { continue; } /* above */ if (c->lpar_addr + c->len <= aligned_lpar) { continue; } } return NULL; } static int dma_sb_free_chunk(struct dma_chunk *c) { int result = 0; if (c->bus_addr) { result = lv1_unmap_device_dma_region(c->region->dev->bus_id, c->region->dev->dev_id, c->bus_addr, c->len); BUG_ON(result); } kfree(c); return result; } static int dma_ioc0_free_chunk(struct dma_chunk *c) { int result = 0; int iopage; unsigned long offset; struct ps3_dma_region *r = c->region; DBG("%s:start\n", __func__); for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { offset = (1 << r->page_size) * iopage; /* put INVALID entry */ result = lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, 0); DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__, c->bus_addr + offset, c->lpar_addr + offset, r->ioid); if (result) { DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__, __LINE__, ps3_result(result)); } } kfree(c); DBG("%s:end\n", __func__); return result; } /** * dma_sb_map_pages - Maps dma pages into the io controller bus address space. * @r: Pointer to a struct ps3_dma_region. * @phys_addr: Starting physical address of the area to map. * @len: Length in bytes of the area to map. * c_out: A pointer to receive an allocated struct dma_chunk for this area. * * This is the lowest level dma mapping routine, and is the one that will * make the HV call to add the pages into the io controller address space. */ static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) { int result; struct dma_chunk *c; c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; } c->region = r; c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); c->len = len; BUG_ON(iopte_flag != 0xf800000000000000UL); result = lv1_map_device_dma_region(c->region->dev->bus_id, c->region->dev->dev_id, c->lpar_addr, c->bus_addr, c->len, iopte_flag); if (result) { DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail_map; } list_add(&c->link, &r->chunk_list.head); *c_out = c; return 0; fail_map: kfree(c); fail_alloc: *c_out = NULL; DBG(" <- %s:%d\n", __func__, __LINE__); return result; } static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) { int result; struct dma_chunk *c, *last; int iopage, pages; unsigned long offset; DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; } c->region = r; c->len = len; c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); /* allocate IO address */ if (list_empty(&r->chunk_list.head)) { /* first one */ c->bus_addr = r->bus_addr; } else { /* derive from last bus addr*/ last = list_entry(r->chunk_list.head.next, struct dma_chunk, link); c->bus_addr = last->bus_addr + last->len; DBG("%s: last bus=%#lx, len=%#lx\n", __func__, last->bus_addr, last->len); } /* FIXME: check whether length exceeds region size */ /* build ioptes for the area */ pages = len >> r->page_size; DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__, r->page_size, r->len, pages, iopte_flag); for (iopage = 0; iopage < pages; iopage++) { offset = (1 << r->page_size) * iopage; result = lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, iopte_flag); if (result) { pr_warning("%s:%d: lv1_put_iopte failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail_map; } DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__, iopage, c->bus_addr + offset, c->lpar_addr + offset, r->ioid); } /* be sure that last allocated one is inserted at head */ list_add(&c->link, &r->chunk_list.head); *c_out = c; DBG("%s: end\n", __func__); return 0; fail_map: for (iopage--; 0 <= iopage; iopage--) { lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, 0); } kfree(c); fail_alloc: *c_out = NULL; return result; } /** * dma_sb_region_create - Create a device dma region. * @r: Pointer to a struct ps3_dma_region. * * This is the lowest level dma region create routine, and is the one that * will make the HV call to create the region. */ static int dma_sb_region_create(struct ps3_dma_region *r) { int result; u64 bus_addr; DBG(" -> %s:%d:\n", __func__, __LINE__); BUG_ON(!r); if (!r->dev->bus_id) { pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, r->dev->bus_id, r->dev->dev_id); return 0; } DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__, __LINE__, r->len, r->page_size, r->offset); BUG_ON(!r->len); BUG_ON(!r->page_size); BUG_ON(!r->region_ops); INIT_LIST_HEAD(&r->chunk_list.head); spin_lock_init(&r->chunk_list.lock); result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, roundup_pow_of_two(r->len), r->page_size, r->region_type, &bus_addr); r->bus_addr = bus_addr; if (result) { DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->len = r->bus_addr = 0; } return result; } static int dma_ioc0_region_create(struct ps3_dma_region *r) { int result; u64 bus_addr; INIT_LIST_HEAD(&r->chunk_list.head); spin_lock_init(&r->chunk_list.lock); result = lv1_allocate_io_segment(0, r->len, r->page_size, &bus_addr); r->bus_addr = bus_addr; if (result) { DBG("%s:%d: lv1_allocate_io_segment failed: %s\n", __func__, __LINE__, ps3_result(result)); r->len = r->bus_addr = 0; } DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__, r->len, r->page_size, r->bus_addr); return result; } /** * dma_region_free - Free a device dma region. * @r: Pointer to a struct ps3_dma_region. * * This is the lowest level dma region free routine, and is the one that * will make the HV call to free the region. */ static int dma_sb_region_free(struct ps3_dma_region *r) { int result; struct dma_chunk *c; struct dma_chunk *tmp; BUG_ON(!r); if (!r->dev->bus_id) { pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, r->dev->bus_id, r->dev->dev_id); return 0; } list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { list_del(&c->link); dma_sb_free_chunk(c); } result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, r->bus_addr); if (result) DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->bus_addr = 0; return result; } static int dma_ioc0_region_free(struct ps3_dma_region *r) { int result; struct dma_chunk *c, *n; DBG("%s: start\n", __func__); list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { list_del(&c->link); dma_ioc0_free_chunk(c); } result = lv1_release_io_segment(0, r->bus_addr); if (result) DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->bus_addr = 0; DBG("%s: end\n", __func__); return result; } /** * dma_sb_map_area - Map an area of memory into a device dma region. * @r: Pointer to a struct ps3_dma_region. * @virt_addr: Starting virtual address of the area to map. * @len: Length in bytes of the area to map. * @bus_addr: A pointer to return the starting ioc bus address of the area to * map. * * This is the common dma mapping routine. */ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { int result; unsigned long flags; struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); if (!USE_DYNAMIC_DMA) { unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); DBG(" -> %s:%d\n", __func__, __LINE__); DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, virt_addr); DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, phys_addr); DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, lpar_addr); DBG("%s:%d len %lxh\n", __func__, __LINE__, len); DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__, *bus_addr, len); } spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, *bus_addr, len); if (c) { DBG("%s:%d: reusing mapped chunk", __func__, __LINE__); dma_dump_chunk(c); c->usage_count++; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); if (result) { *bus_addr = 0; DBG("%s:%d: dma_sb_map_pages failed (%d)\n", __func__, __LINE__, result); spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } c->usage_count = 1; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { int result; unsigned long flags; struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, virt_addr, len); DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__, phys_addr, aligned_phys, aligned_len); spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); if (c) { /* FIXME */ BUG(); *bus_addr = c->bus_addr + phys_addr - aligned_phys; c->usage_count++; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); if (result) { *bus_addr = 0; DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n", __func__, __LINE__, result); spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } *bus_addr = c->bus_addr + phys_addr - aligned_phys; DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__, virt_addr, phys_addr, aligned_phys, *bus_addr); c->usage_count = 1; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } /** * dma_sb_unmap_area - Unmap an area of memory from a device dma region. * @r: Pointer to a struct ps3_dma_region. * @bus_addr: The starting ioc bus address of the area to unmap. * @len: Length in bytes of the area to unmap. * * This is the common dma unmap routine. */ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { unsigned long flags; struct dma_chunk *c; spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, bus_addr, len); if (!c) { unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", __func__, __LINE__, bus_addr); DBG("%s:%d: not found: len %lxh\n", __func__, __LINE__, len); DBG("%s:%d: not found: aligned_bus %lxh\n", __func__, __LINE__, aligned_bus); DBG("%s:%d: not found: aligned_len %lxh\n", __func__, __LINE__, aligned_len); BUG(); } c->usage_count--; if (!c->usage_count) { list_del(&c->link); dma_sb_free_chunk(c); } spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } static int dma_ioc0_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { unsigned long flags; struct dma_chunk *c; DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len); spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, bus_addr, len); if (!c) { unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", __func__, __LINE__, bus_addr); DBG("%s:%d: not found: len %lxh\n", __func__, __LINE__, len); DBG("%s:%d: not found: aligned_bus %lxh\n", __func__, __LINE__, aligned_bus); DBG("%s:%d: not found: aligned_len %lxh\n", __func__, __LINE__, aligned_len); BUG(); } c->usage_count--; if (!c->usage_count) { list_del(&c->link); dma_ioc0_free_chunk(c); } spin_unlock_irqrestore(&r->chunk_list.lock, flags); DBG("%s: end\n", __func__); return 0; } /** * dma_sb_region_create_linear - Setup a linear dma mapping for a device. * @r: Pointer to a struct ps3_dma_region. * * This routine creates an HV dma region for the device and maps all available * ram into the io controller bus address space. */ static int dma_sb_region_create_linear(struct ps3_dma_region *r) { int result; unsigned long virt_addr, len; dma_addr_t tmp; if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ /* force 16M dma pages for linear mapping */ if (r->page_size != PS3_DMA_16M) { pr_info("%s:%d: forcing 16M pages for linear map\n", __func__, __LINE__); r->page_size = PS3_DMA_16M; r->len = _ALIGN_UP(r->len, 1 << r->page_size); } } result = dma_sb_region_create(r); BUG_ON(result); if (r->offset < map.rm.size) { /* Map (part of) 1st RAM chunk */ virt_addr = map.rm.base + r->offset; len = map.rm.size - r->offset; if (len > r->len) len = r->len; result = dma_sb_map_area(r, virt_addr, len, &tmp, CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | CBE_IOPTE_M); BUG_ON(result); } if (r->offset + r->len > map.rm.size) { /* Map (part of) 2nd RAM chunk */ virt_addr = map.rm.size; len = r->len; if (r->offset >= map.rm.size) virt_addr += r->offset - map.rm.size; else len -= map.rm.size - r->offset; result = dma_sb_map_area(r, virt_addr, len, &tmp, CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | CBE_IOPTE_M); BUG_ON(result); } return result; } /** * dma_sb_region_free_linear - Free a linear dma mapping for a device. * @r: Pointer to a struct ps3_dma_region. * * This routine will unmap all mapped areas and free the HV dma region. */ static int dma_sb_region_free_linear(struct ps3_dma_region *r) { int result; dma_addr_t bus_addr; unsigned long len, lpar_addr; if (r->offset < map.rm.size) { /* Unmap (part of) 1st RAM chunk */ lpar_addr = map.rm.base + r->offset; len = map.rm.size - r->offset; if (len > r->len) len = r->len; bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); result = dma_sb_unmap_area(r, bus_addr, len); BUG_ON(result); } if (r->offset + r->len > map.rm.size) { /* Unmap (part of) 2nd RAM chunk */ lpar_addr = map.r1.base; len = r->len; if (r->offset >= map.rm.size) lpar_addr += r->offset - map.rm.size; else len -= map.rm.size - r->offset; bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); result = dma_sb_unmap_area(r, bus_addr, len); BUG_ON(result); } result = dma_sb_region_free(r); BUG_ON(result); return result; } /** * dma_sb_map_area_linear - Map an area of memory into a device dma region. * @r: Pointer to a struct ps3_dma_region. * @virt_addr: Starting virtual address of the area to map. * @len: Length in bytes of the area to map. * @bus_addr: A pointer to return the starting ioc bus address of the area to * map. * * This routine just returns the corresponding bus address. Actual mapping * occurs in dma_region_create_linear(). */ static int dma_sb_map_area_linear(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); return 0; } /** * dma_unmap_area_linear - Unmap an area of memory from a device dma region. * @r: Pointer to a struct ps3_dma_region. * @bus_addr: The starting ioc bus address of the area to unmap. * @len: Length in bytes of the area to unmap. * * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear(). */ static int dma_sb_unmap_area_linear(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { return 0; }; static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = { .create = dma_sb_region_create, .free = dma_sb_region_free, .map = dma_sb_map_area, .unmap = dma_sb_unmap_area }; static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = { .create = dma_sb_region_create_linear, .free = dma_sb_region_free_linear, .map = dma_sb_map_area_linear, .unmap = dma_sb_unmap_area_linear }; static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = { .create = dma_ioc0_region_create, .free = dma_ioc0_region_free, .map = dma_ioc0_map_area, .unmap = dma_ioc0_unmap_area }; int ps3_dma_region_init(struct ps3_system_bus_device *dev, struct ps3_dma_region *r, enum ps3_dma_page_size page_size, enum ps3_dma_region_type region_type, void *addr, unsigned long len) { unsigned long lpar_addr; lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; r->dev = dev; r->page_size = page_size; r->region_type = region_type; r->offset = lpar_addr; if (r->offset >= map.rm.size) r->offset -= map.r1.offset; r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); switch (dev->dev_type) { case PS3_DEVICE_TYPE_SB: r->region_ops = (USE_DYNAMIC_DMA) ? &ps3_dma_sb_region_ops : &ps3_dma_sb_region_linear_ops; break; case PS3_DEVICE_TYPE_IOC0: r->region_ops = &ps3_dma_ioc0_region_ops; break; default: BUG(); return -EINVAL; } return 0; } EXPORT_SYMBOL(ps3_dma_region_init); int ps3_dma_region_create(struct ps3_dma_region *r) { BUG_ON(!r); BUG_ON(!r->region_ops); BUG_ON(!r->region_ops->create); return r->region_ops->create(r); } EXPORT_SYMBOL(ps3_dma_region_create); int ps3_dma_region_free(struct ps3_dma_region *r) { BUG_ON(!r); BUG_ON(!r->region_ops); BUG_ON(!r->region_ops->free); return r->region_ops->free(r); } EXPORT_SYMBOL(ps3_dma_region_free); int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); } int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { return r->region_ops->unmap(r, bus_addr, len); } /*============================================================================*/ /* system startup routines */ /*============================================================================*/ /** * ps3_mm_init - initialize the address space state variables */ void __init ps3_mm_init(void) { int result; DBG(" -> %s:%d\n", __func__, __LINE__); result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, &map.total); if (result) panic("ps3_repository_read_mm_info() failed"); map.rm.offset = map.rm.base; map.vas_id = map.htab_size = 0; /* this implementation assumes map.rm.base is zero */ BUG_ON(map.rm.base); BUG_ON(!map.rm.size); /* arrange to do this in ps3_mm_add_memory */ ps3_mm_region_create(&map.r1, map.total - map.rm.size); /* correct map.total for the real total amount of memory we use */ map.total = map.rm.size + map.r1.size; DBG(" <- %s:%d\n", __func__, __LINE__); } /** * ps3_mm_shutdown - final cleanup of address space */ void ps3_mm_shutdown(void) { ps3_mm_region_destroy(&map.r1); }
gpl-2.0
draekko/android_kernel_ba2x_2.0
sound/isa/es1688/es1688_lib.c
3928
30973
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of ESS ES1688/688/488 chip * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/ioport.h> #include <sound/core.h> #include <sound/es1688.h> #include <sound/initval.h> #include <asm/io.h> #include <asm/dma.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("ESS ESx688 lowlevel module"); MODULE_LICENSE("GPL"); static int snd_es1688_dsp_command(struct snd_es1688 *chip, unsigned char val) { int i; for (i = 10000; i; i--) if ((inb(ES1688P(chip, STATUS)) & 0x80) == 0) { outb(val, ES1688P(chip, COMMAND)); return 1; } #ifdef CONFIG_SND_DEBUG printk(KERN_DEBUG "snd_es1688_dsp_command: timeout (0x%x)\n", val); #endif return 0; } static int snd_es1688_dsp_get_byte(struct snd_es1688 *chip) { int i; for (i = 1000; i; i--) if (inb(ES1688P(chip, DATA_AVAIL)) & 0x80) return inb(ES1688P(chip, READ)); snd_printd("es1688 get byte failed: 0x%lx = 0x%x!!!\n", ES1688P(chip, DATA_AVAIL), inb(ES1688P(chip, DATA_AVAIL))); return -ENODEV; } static int snd_es1688_write(struct snd_es1688 *chip, unsigned char reg, unsigned char data) { if (!snd_es1688_dsp_command(chip, reg)) return 0; return snd_es1688_dsp_command(chip, data); } static int snd_es1688_read(struct snd_es1688 *chip, unsigned char reg) { /* Read a byte from an extended mode register of ES1688 */ if (!snd_es1688_dsp_command(chip, 0xc0)) return -1; if (!snd_es1688_dsp_command(chip, reg)) return -1; return snd_es1688_dsp_get_byte(chip); } void snd_es1688_mixer_write(struct snd_es1688 *chip, unsigned char reg, unsigned char data) { outb(reg, ES1688P(chip, MIXER_ADDR)); udelay(10); outb(data, ES1688P(chip, MIXER_DATA)); udelay(10); } static unsigned char snd_es1688_mixer_read(struct snd_es1688 *chip, unsigned char reg) { unsigned char result; outb(reg, ES1688P(chip, MIXER_ADDR)); udelay(10); result = inb(ES1688P(chip, MIXER_DATA)); udelay(10); return result; } int snd_es1688_reset(struct snd_es1688 *chip) { int i; outb(3, ES1688P(chip, RESET)); /* valid only for ESS chips, SB -> 1 */ udelay(10); outb(0, ES1688P(chip, RESET)); udelay(30); for (i = 0; i < 1000 && !(inb(ES1688P(chip, DATA_AVAIL)) & 0x80); i++); if (inb(ES1688P(chip, READ)) != 0xaa) { snd_printd("ess_reset at 0x%lx: failed!!!\n", chip->port); return -ENODEV; } snd_es1688_dsp_command(chip, 0xc6); /* enable extended mode */ return 0; } EXPORT_SYMBOL(snd_es1688_reset); static int snd_es1688_probe(struct snd_es1688 *chip) { unsigned long flags; unsigned short major, minor, hw; int i; /* * initialization sequence */ spin_lock_irqsave(&chip->reg_lock, flags); /* Some ESS1688 cards need this */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE2)); /* ENABLE2 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE2)); /* ENABLE2 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE2)); /* ENABLE2 */ inb(ES1688P(chip, ENABLE1)); /* ENABLE1 */ inb(ES1688P(chip, ENABLE0)); /* ENABLE0 */ if (snd_es1688_reset(chip) < 0) { snd_printdd("ESS: [0x%lx] reset failed... 0x%x\n", chip->port, inb(ES1688P(chip, READ))); spin_unlock_irqrestore(&chip->reg_lock, flags); return -ENODEV; } snd_es1688_dsp_command(chip, 0xe7); /* return identification */ for (i = 1000, major = minor = 0; i; i--) { if (inb(ES1688P(chip, DATA_AVAIL)) & 0x80) { if (major == 0) { major = inb(ES1688P(chip, READ)); } else { minor = inb(ES1688P(chip, READ)); } } } spin_unlock_irqrestore(&chip->reg_lock, flags); snd_printdd("ESS: [0x%lx] found.. major = 0x%x, minor = 0x%x\n", chip->port, major, minor); chip->version = (major << 8) | minor; if (!chip->version) return -ENODEV; /* probably SB */ hw = ES1688_HW_AUTO; switch (chip->version & 0xfff0) { case 0x4880: snd_printk(KERN_ERR "[0x%lx] ESS: AudioDrive ES488 detected, " "but driver is in another place\n", chip->port); return -ENODEV; case 0x6880: hw = (chip->version & 0x0f) >= 8 ? ES1688_HW_1688 : ES1688_HW_688; break; default: snd_printk(KERN_ERR "[0x%lx] ESS: unknown AudioDrive chip " "with version 0x%x (Jazz16 soundcard?)\n", chip->port, chip->version); return -ENODEV; } spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xb1, 0x10); /* disable IRQ */ snd_es1688_write(chip, 0xb2, 0x00); /* disable DMA */ spin_unlock_irqrestore(&chip->reg_lock, flags); /* enable joystick, but disable OPL3 */ spin_lock_irqsave(&chip->mixer_lock, flags); snd_es1688_mixer_write(chip, 0x40, 0x01); spin_unlock_irqrestore(&chip->mixer_lock, flags); return 0; } static int snd_es1688_init(struct snd_es1688 * chip, int enable) { static int irqs[16] = {-1, -1, 0, -1, -1, 1, -1, 2, -1, 0, 3, -1, -1, -1, -1, -1}; unsigned long flags; int cfg, irq_bits, dma, dma_bits, tmp, tmp1; /* ok.. setup MPU-401 port and joystick and OPL3 */ cfg = 0x01; /* enable joystick, but disable OPL3 */ if (enable && chip->mpu_port >= 0x300 && chip->mpu_irq > 0 && chip->hardware != ES1688_HW_688) { tmp = (chip->mpu_port & 0x0f0) >> 4; if (tmp <= 3) { switch (chip->mpu_irq) { case 9: tmp1 = 4; break; case 5: tmp1 = 5; break; case 7: tmp1 = 6; break; case 10: tmp1 = 7; break; default: tmp1 = 0; } if (tmp1) { cfg |= (tmp << 3) | (tmp1 << 5); } } } #if 0 snd_printk(KERN_DEBUG "mpu cfg = 0x%x\n", cfg); #endif spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_mixer_write(chip, 0x40, cfg); spin_unlock_irqrestore(&chip->reg_lock, flags); /* --- */ spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_read(chip, 0xb1); snd_es1688_read(chip, 0xb2); spin_unlock_irqrestore(&chip->reg_lock, flags); if (enable) { cfg = 0xf0; /* enable only DMA counter interrupt */ irq_bits = irqs[chip->irq & 0x0f]; if (irq_bits < 0) { snd_printk(KERN_ERR "[0x%lx] ESS: bad IRQ %d " "for ES1688 chip!!\n", chip->port, chip->irq); #if 0 irq_bits = 0; cfg = 0x10; #endif return -EINVAL; } spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xb1, cfg | (irq_bits << 2)); spin_unlock_irqrestore(&chip->reg_lock, flags); cfg = 0xf0; /* extended mode DMA enable */ dma = chip->dma8; if (dma > 3 || dma == 2) { snd_printk(KERN_ERR "[0x%lx] ESS: bad DMA channel %d " "for ES1688 chip!!\n", chip->port, dma); #if 0 dma_bits = 0; cfg = 0x00; /* disable all DMA */ #endif return -EINVAL; } else { dma_bits = dma; if (dma != 3) dma_bits++; } spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xb2, cfg | (dma_bits << 2)); spin_unlock_irqrestore(&chip->reg_lock, flags); } else { spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xb1, 0x10); /* disable IRQ */ snd_es1688_write(chip, 0xb2, 0x00); /* disable DMA */ spin_unlock_irqrestore(&chip->reg_lock, flags); } spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_read(chip, 0xb1); snd_es1688_read(chip, 0xb2); snd_es1688_reset(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } /* */ static struct snd_ratnum clocks[2] = { { .num = 795444, .den_min = 1, .den_max = 128, .den_step = 1, }, { .num = 397722, .den_min = 1, .den_max = 128, .den_step = 1, } }; static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = { .nrats = 2, .rats = clocks, }; static void snd_es1688_set_rate(struct snd_es1688 *chip, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int bits, divider; if (runtime->rate_num == clocks[0].num) bits = 256 - runtime->rate_den; else bits = 128 - runtime->rate_den; /* set filter register */ divider = 256 - 7160000*20/(8*82*runtime->rate); /* write result to hardware */ snd_es1688_write(chip, 0xa1, bits); snd_es1688_write(chip, 0xa2, divider); } static int snd_es1688_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { return snd_pcm_lib_ioctl(substream, cmd, arg); } static int snd_es1688_trigger(struct snd_es1688 *chip, int cmd, unsigned char value) { int val; if (cmd == SNDRV_PCM_TRIGGER_STOP) { value = 0x00; } else if (cmd != SNDRV_PCM_TRIGGER_START) { return -EINVAL; } spin_lock(&chip->reg_lock); chip->trigger_value = value; val = snd_es1688_read(chip, 0xb8); if ((val < 0) || (val & 0x0f) == value) { spin_unlock(&chip->reg_lock); return -EINVAL; /* something is wrong */ } #if 0 printk(KERN_DEBUG "trigger: val = 0x%x, value = 0x%x\n", val, value); printk(KERN_DEBUG "trigger: pointer = 0x%x\n", snd_dma_pointer(chip->dma8, chip->dma_size)); #endif snd_es1688_write(chip, 0xb8, (val & 0xf0) | value); spin_unlock(&chip->reg_lock); return 0; } static int snd_es1688_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_es1688_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_es1688_playback_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_es1688 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); chip->dma_size = size; spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_reset(chip); snd_es1688_set_rate(chip, substream); snd_es1688_write(chip, 0xb8, 4); /* auto init DMA mode */ snd_es1688_write(chip, 0xa8, (snd_es1688_read(chip, 0xa8) & ~0x03) | (3 - runtime->channels)); snd_es1688_write(chip, 0xb9, 2); /* demand mode (4 bytes/request) */ if (runtime->channels == 1) { if (snd_pcm_format_width(runtime->format) == 8) { /* 8. bit mono */ snd_es1688_write(chip, 0xb6, 0x80); snd_es1688_write(chip, 0xb7, 0x51); snd_es1688_write(chip, 0xb7, 0xd0); } else { /* 16. bit mono */ snd_es1688_write(chip, 0xb6, 0x00); snd_es1688_write(chip, 0xb7, 0x71); snd_es1688_write(chip, 0xb7, 0xf4); } } else { if (snd_pcm_format_width(runtime->format) == 8) { /* 8. bit stereo */ snd_es1688_write(chip, 0xb6, 0x80); snd_es1688_write(chip, 0xb7, 0x51); snd_es1688_write(chip, 0xb7, 0x98); } else { /* 16. bit stereo */ snd_es1688_write(chip, 0xb6, 0x00); snd_es1688_write(chip, 0xb7, 0x71); snd_es1688_write(chip, 0xb7, 0xbc); } } snd_es1688_write(chip, 0xb1, (snd_es1688_read(chip, 0xb1) & 0x0f) | 0x50); snd_es1688_write(chip, 0xb2, (snd_es1688_read(chip, 0xb2) & 0x0f) | 0x50); snd_es1688_dsp_command(chip, ES1688_DSP_CMD_SPKON); spin_unlock_irqrestore(&chip->reg_lock, flags); /* --- */ count = -count; snd_dma_program(chip->dma8, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xa4, (unsigned char) count); snd_es1688_write(chip, 0xa5, (unsigned char) (count >> 8)); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_es1688_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); return snd_es1688_trigger(chip, cmd, 0x05); } static int snd_es1688_capture_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_es1688 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); chip->dma_size = size; spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_reset(chip); snd_es1688_set_rate(chip, substream); snd_es1688_dsp_command(chip, ES1688_DSP_CMD_SPKOFF); snd_es1688_write(chip, 0xb8, 0x0e); /* auto init DMA mode */ snd_es1688_write(chip, 0xa8, (snd_es1688_read(chip, 0xa8) & ~0x03) | (3 - runtime->channels)); snd_es1688_write(chip, 0xb9, 2); /* demand mode (4 bytes/request) */ if (runtime->channels == 1) { if (snd_pcm_format_width(runtime->format) == 8) { /* 8. bit mono */ snd_es1688_write(chip, 0xb7, 0x51); snd_es1688_write(chip, 0xb7, 0xd0); } else { /* 16. bit mono */ snd_es1688_write(chip, 0xb7, 0x71); snd_es1688_write(chip, 0xb7, 0xf4); } } else { if (snd_pcm_format_width(runtime->format) == 8) { /* 8. bit stereo */ snd_es1688_write(chip, 0xb7, 0x51); snd_es1688_write(chip, 0xb7, 0x98); } else { /* 16. bit stereo */ snd_es1688_write(chip, 0xb7, 0x71); snd_es1688_write(chip, 0xb7, 0xbc); } } snd_es1688_write(chip, 0xb1, (snd_es1688_read(chip, 0xb1) & 0x0f) | 0x50); snd_es1688_write(chip, 0xb2, (snd_es1688_read(chip, 0xb2) & 0x0f) | 0x50); spin_unlock_irqrestore(&chip->reg_lock, flags); /* --- */ count = -count; snd_dma_program(chip->dma8, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT); spin_lock_irqsave(&chip->reg_lock, flags); snd_es1688_write(chip, 0xa4, (unsigned char) count); snd_es1688_write(chip, 0xa5, (unsigned char) (count >> 8)); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_es1688_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); return snd_es1688_trigger(chip, cmd, 0x0f); } static irqreturn_t snd_es1688_interrupt(int irq, void *dev_id) { struct snd_es1688 *chip = dev_id; if (chip->trigger_value == 0x05) /* ok.. playback is active */ snd_pcm_period_elapsed(chip->playback_substream); if (chip->trigger_value == 0x0f) /* ok.. capture is active */ snd_pcm_period_elapsed(chip->capture_substream); inb(ES1688P(chip, DATA_AVAIL)); /* ack interrupt */ return IRQ_HANDLED; } static snd_pcm_uframes_t snd_es1688_playback_pointer(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); size_t ptr; if (chip->trigger_value != 0x05) return 0; ptr = snd_dma_pointer(chip->dma8, chip->dma_size); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_es1688_capture_pointer(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); size_t ptr; if (chip->trigger_value != 0x0f) return 0; ptr = snd_dma_pointer(chip->dma8, chip->dma_size); return bytes_to_frames(substream->runtime, ptr); } /* */ static struct snd_pcm_hardware snd_es1688_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_es1688_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* */ static int snd_es1688_playback_open(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (chip->capture_substream != NULL) return -EAGAIN; chip->playback_substream = substream; runtime->hw = snd_es1688_playback; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_clocks); return 0; } static int snd_es1688_capture_open(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (chip->playback_substream != NULL) return -EAGAIN; chip->capture_substream = substream; runtime->hw = snd_es1688_capture; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_clocks); return 0; } static int snd_es1688_playback_close(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); chip->playback_substream = NULL; return 0; } static int snd_es1688_capture_close(struct snd_pcm_substream *substream) { struct snd_es1688 *chip = snd_pcm_substream_chip(substream); chip->capture_substream = NULL; return 0; } static int snd_es1688_free(struct snd_es1688 *chip) { if (chip->res_port) { snd_es1688_init(chip, 0); release_and_free_resource(chip->res_port); } if (chip->irq >= 0) free_irq(chip->irq, (void *) chip); if (chip->dma8 >= 0) { disable_dma(chip->dma8); free_dma(chip->dma8); } return 0; } static int snd_es1688_dev_free(struct snd_device *device) { struct snd_es1688 *chip = device->device_data; return snd_es1688_free(chip); } static const char *snd_es1688_chip_id(struct snd_es1688 *chip) { static char tmp[16]; sprintf(tmp, "ES%s688 rev %i", chip->hardware == ES1688_HW_688 ? "" : "1", chip->version & 0x0f); return tmp; } int snd_es1688_create(struct snd_card *card, struct snd_es1688 *chip, unsigned long port, unsigned long mpu_port, int irq, int mpu_irq, int dma8, unsigned short hardware) { static struct snd_device_ops ops = { .dev_free = snd_es1688_dev_free, }; int err; if (chip == NULL) return -ENOMEM; chip->irq = -1; chip->dma8 = -1; if ((chip->res_port = request_region(port + 4, 12, "ES1688")) == NULL) { snd_printk(KERN_ERR "es1688: can't grab port 0x%lx\n", port + 4); return -EBUSY; } if (request_irq(irq, snd_es1688_interrupt, IRQF_DISABLED, "ES1688", (void *) chip)) { snd_printk(KERN_ERR "es1688: can't grab IRQ %d\n", irq); return -EBUSY; } chip->irq = irq; if (request_dma(dma8, "ES1688")) { snd_printk(KERN_ERR "es1688: can't grab DMA8 %d\n", dma8); return -EBUSY; } chip->dma8 = dma8; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->mixer_lock); chip->port = port; mpu_port &= ~0x000f; if (mpu_port < 0x300 || mpu_port > 0x330) mpu_port = 0; chip->mpu_port = mpu_port; chip->mpu_irq = mpu_irq; chip->hardware = hardware; err = snd_es1688_probe(chip); if (err < 0) return err; err = snd_es1688_init(chip, 1); if (err < 0) return err; /* Register device */ return snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); } static struct snd_pcm_ops snd_es1688_playback_ops = { .open = snd_es1688_playback_open, .close = snd_es1688_playback_close, .ioctl = snd_es1688_ioctl, .hw_params = snd_es1688_hw_params, .hw_free = snd_es1688_hw_free, .prepare = snd_es1688_playback_prepare, .trigger = snd_es1688_playback_trigger, .pointer = snd_es1688_playback_pointer, }; static struct snd_pcm_ops snd_es1688_capture_ops = { .open = snd_es1688_capture_open, .close = snd_es1688_capture_close, .ioctl = snd_es1688_ioctl, .hw_params = snd_es1688_hw_params, .hw_free = snd_es1688_hw_free, .prepare = snd_es1688_capture_prepare, .trigger = snd_es1688_capture_trigger, .pointer = snd_es1688_capture_pointer, }; int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int err; err = snd_pcm_new(card, "ESx688", device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es1688_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_es1688_capture_ops); pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX; sprintf(pcm->name, snd_es1688_chip_id(chip)); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), 64*1024, 64*1024); if (rpcm) *rpcm = pcm; return 0; } /* * MIXER part */ static int snd_es1688_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[9] = { "Mic", "Mic Master", "CD", "AOUT", "Mic1", "Mix", "Line", "Master" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 8; if (uinfo->value.enumerated.item > 7) uinfo->value.enumerated.item = 7; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_es1688_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = snd_es1688_mixer_read(chip, ES1688_REC_DEV) & 7; return 0; } static int snd_es1688_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; unsigned char oval, nval; int change; if (ucontrol->value.enumerated.item[0] > 8) return -EINVAL; spin_lock_irqsave(&chip->reg_lock, flags); oval = snd_es1688_mixer_read(chip, ES1688_REC_DEV); nval = (ucontrol->value.enumerated.item[0] & 7) | (oval & ~15); change = nval != oval; if (change) snd_es1688_mixer_write(chip, ES1688_REC_DEV, nval); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } #define ES1688_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es1688_info_single, \ .get = snd_es1688_get_single, .put = snd_es1688_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_es1688_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es1688_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.integer.value[0] = (snd_es1688_mixer_read(chip, reg) >> shift) & mask; spin_unlock_irqrestore(&chip->reg_lock, flags); if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_es1688_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned char oval, nval; nval = (ucontrol->value.integer.value[0] & mask); if (invert) nval = mask - nval; nval <<= shift; spin_lock_irqsave(&chip->reg_lock, flags); oval = snd_es1688_mixer_read(chip, reg); nval = (oval & ~(mask << shift)) | nval; change = nval != oval; if (change) snd_es1688_mixer_write(chip, reg, nval); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } #define ES1688_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es1688_info_double, \ .get = snd_es1688_get_double, .put = snd_es1688_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_es1688_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es1688_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; unsigned char left, right; spin_lock_irqsave(&chip->reg_lock, flags); if (left_reg < 0xa0) left = snd_es1688_mixer_read(chip, left_reg); else left = snd_es1688_read(chip, left_reg); if (left_reg != right_reg) { if (right_reg < 0xa0) right = snd_es1688_mixer_read(chip, right_reg); else right = snd_es1688_read(chip, right_reg); } else right = left; spin_unlock_irqrestore(&chip->reg_lock, flags); ucontrol->value.integer.value[0] = (left >> shift_left) & mask; ucontrol->value.integer.value[1] = (right >> shift_right) & mask; if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_es1688_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es1688 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned char val1, val2, oval1, oval2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irqsave(&chip->reg_lock, flags); if (left_reg != right_reg) { if (left_reg < 0xa0) oval1 = snd_es1688_mixer_read(chip, left_reg); else oval1 = snd_es1688_read(chip, left_reg); if (right_reg < 0xa0) oval2 = snd_es1688_mixer_read(chip, right_reg); else oval2 = snd_es1688_read(chip, right_reg); val1 = (oval1 & ~(mask << shift_left)) | val1; val2 = (oval2 & ~(mask << shift_right)) | val2; change = val1 != oval1 || val2 != oval2; if (change) { if (left_reg < 0xa0) snd_es1688_mixer_write(chip, left_reg, val1); else snd_es1688_write(chip, left_reg, val1); if (right_reg < 0xa0) snd_es1688_mixer_write(chip, right_reg, val1); else snd_es1688_write(chip, right_reg, val1); } } else { if (left_reg < 0xa0) oval1 = snd_es1688_mixer_read(chip, left_reg); else oval1 = snd_es1688_read(chip, left_reg); val1 = (oval1 & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2; change = val1 != oval1; if (change) { if (left_reg < 0xa0) snd_es1688_mixer_write(chip, left_reg, val1); else snd_es1688_write(chip, left_reg, val1); } } spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_es1688_controls[] = { ES1688_DOUBLE("Master Playback Volume", 0, ES1688_MASTER_DEV, ES1688_MASTER_DEV, 4, 0, 15, 0), ES1688_DOUBLE("PCM Playback Volume", 0, ES1688_PCM_DEV, ES1688_PCM_DEV, 4, 0, 15, 0), ES1688_DOUBLE("Line Playback Volume", 0, ES1688_LINE_DEV, ES1688_LINE_DEV, 4, 0, 15, 0), ES1688_DOUBLE("CD Playback Volume", 0, ES1688_CD_DEV, ES1688_CD_DEV, 4, 0, 15, 0), ES1688_DOUBLE("FM Playback Volume", 0, ES1688_FM_DEV, ES1688_FM_DEV, 4, 0, 15, 0), ES1688_DOUBLE("Mic Playback Volume", 0, ES1688_MIC_DEV, ES1688_MIC_DEV, 4, 0, 15, 0), ES1688_DOUBLE("Aux Playback Volume", 0, ES1688_AUX_DEV, ES1688_AUX_DEV, 4, 0, 15, 0), ES1688_SINGLE("Beep Playback Volume", 0, ES1688_SPEAKER_DEV, 0, 7, 0), ES1688_DOUBLE("Capture Volume", 0, ES1688_RECLEV_DEV, ES1688_RECLEV_DEV, 4, 0, 15, 0), ES1688_SINGLE("Capture Switch", 0, ES1688_REC_DEV, 4, 1, 1), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_es1688_info_mux, .get = snd_es1688_get_mux, .put = snd_es1688_put_mux, }, }; #define ES1688_INIT_TABLE_SIZE (sizeof(snd_es1688_init_table)/2) static unsigned char snd_es1688_init_table[][2] = { { ES1688_MASTER_DEV, 0 }, { ES1688_PCM_DEV, 0 }, { ES1688_LINE_DEV, 0 }, { ES1688_CD_DEV, 0 }, { ES1688_FM_DEV, 0 }, { ES1688_MIC_DEV, 0 }, { ES1688_AUX_DEV, 0 }, { ES1688_SPEAKER_DEV, 0 }, { ES1688_RECLEV_DEV, 0 }, { ES1688_REC_DEV, 0x17 } }; int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip) { unsigned int idx; int err; unsigned char reg, val; if (snd_BUG_ON(!chip || !card)) return -EINVAL; strcpy(card->mixername, snd_es1688_chip_id(chip)); for (idx = 0; idx < ARRAY_SIZE(snd_es1688_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es1688_controls[idx], chip))) < 0) return err; } for (idx = 0; idx < ES1688_INIT_TABLE_SIZE; idx++) { reg = snd_es1688_init_table[idx][0]; val = snd_es1688_init_table[idx][1]; if (reg < 0xa0) snd_es1688_mixer_write(chip, reg, val); else snd_es1688_write(chip, reg, val); } return 0; } EXPORT_SYMBOL(snd_es1688_mixer_write); EXPORT_SYMBOL(snd_es1688_create); EXPORT_SYMBOL(snd_es1688_pcm); EXPORT_SYMBOL(snd_es1688_mixer); /* * INIT part */ static int __init alsa_es1688_init(void) { return 0; } static void __exit alsa_es1688_exit(void) { } module_init(alsa_es1688_init) module_exit(alsa_es1688_exit)
gpl-2.0
oppo-source/N1-4.2-kernel-source
drivers/usb/gadget/file_storage.c
4952
106153
/* * file_storage.c -- File-backed USB Storage Gadget, for USB development * * Copyright (C) 2003-2008 Alan Stern * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the above-listed copyright holders may not be used * to endorse or promote products derived from this software without * specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * The File-backed Storage Gadget acts as a USB Mass Storage device, * appearing to the host as a disk drive or as a CD-ROM drive. In addition * to providing an example of a genuinely useful gadget driver for a USB * device, it also illustrates a technique of double-buffering for increased * throughput. Last but not least, it gives an easy way to probe the * behavior of the Mass Storage drivers in a USB host. * * Backing storage is provided by a regular file or a block device, specified * by the "file" module parameter. Access can be limited to read-only by * setting the optional "ro" module parameter. (For CD-ROM emulation, * access is always read-only.) The gadget will indicate that it has * removable media if the optional "removable" module parameter is set. * * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI), * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected * by the optional "transport" module parameter. It also supports the * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03), * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by * the optional "protocol" module parameter. In addition, the default * Vendor ID, Product ID, release number and serial number can be overridden. * * There is support for multiple logical units (LUNs), each of which has * its own backing file. The number of LUNs can be set using the optional * "luns" module parameter (anywhere from 1 to 8), and the corresponding * files are specified using comma-separated lists for "file" and "ro". * The default number of LUNs is taken from the number of "file" elements; * it is 1 if "file" is not given. If "removable" is not set then a backing * file must be specified for each LUN. If it is set, then an unspecified * or empty backing filename means the LUN's medium is not loaded. Ideally * each LUN would be settable independently as a disk drive or a CD-ROM * drive, but currently all LUNs have to be the same type. The CD-ROM * emulation includes a single data track and no audio tracks; hence there * need be only one backing file per LUN. * * Requirements are modest; only a bulk-in and a bulk-out endpoint are * needed (an interrupt-out endpoint is also needed for CBI). The memory * requirement amounts to two 16K buffers, size configurable by a parameter. * Support is included for both full-speed and high-speed operation. * * Note that the driver is slightly non-portable in that it assumes a * single memory/DMA buffer will be useable for bulk-in, bulk-out, and * interrupt-in endpoints. With most device controllers this isn't an * issue, but there may be some with hardware restrictions that prevent * a buffer from being used by more than one endpoint. * * Module options: * * file=filename[,filename...] * Required if "removable" is not set, names of * the files or block devices used for * backing storage * serial=HHHH... Required serial number (string of hex chars) * ro=b[,b...] Default false, booleans for read-only access * removable Default false, boolean for removable media * luns=N Default N = number of filenames, number of * LUNs to support * nofua=b[,b...] Default false, booleans for ignore FUA flag * in SCSI WRITE(10,12) commands * stall Default determined according to the type of * USB device controller (usually true), * boolean to permit the driver to halt * bulk endpoints * cdrom Default false, boolean for whether to emulate * a CD-ROM drive * transport=XXX Default BBB, transport name (CB, CBI, or BBB) * protocol=YYY Default SCSI, protocol name (RBC, 8020 or * ATAPI, QIC, UFI, 8070, or SCSI; * also 1 - 6) * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID * release=0xRRRR Override the USB release number (bcdDevice) * buflen=N Default N=16384, buffer size used (will be * rounded down to a multiple of * PAGE_CACHE_SIZE) * * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro", * "removable", "luns", "nofua", "stall", and "cdrom" options are available; * default values are used for everything else. * * The pathnames of the backing files and the ro settings are available in * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of * the gadget's sysfs directory. If the "removable" option is set, writing to * these files will simulate ejecting/loading the medium (writing an empty * line means eject) and adjusting a write-enable tab. Changes to the ro * setting are not allowed when the medium is loaded or if CD-ROM emulation * is being used. * * This gadget driver is heavily based on "Gadget Zero" by David Brownell. * The driver's SCSI command interface was based on the "Information * technology - Small Computer System Interface - 2" document from * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the * "Universal Serial Bus Mass Storage Class UFI Command Specification" * document, Revision 1.0, December 14, 1998, available at * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. */ /* * Driver Design * * The FSG driver is fairly straightforward. There is a main kernel * thread that handles most of the work. Interrupt routines field * callbacks from the controller driver: bulk- and interrupt-request * completion notifications, endpoint-0 events, and disconnect events. * Completion events are passed to the main thread by wakeup calls. Many * ep0 requests are handled at interrupt time, but SetInterface, * SetConfiguration, and device reset requests are forwarded to the * thread in the form of "exceptions" using SIGUSR1 signals (since they * should interrupt any ongoing file I/O operations). * * The thread's main routine implements the standard command/data/status * parts of a SCSI interaction. It and its subroutines are full of tests * for pending signals/exceptions -- all this polling is necessary since * the kernel has no setjmp/longjmp equivalents. (Maybe this is an * indication that the driver really wants to be running in userspace.) * An important point is that so long as the thread is alive it keeps an * open reference to the backing file. This will prevent unmounting * the backing file's underlying filesystem and could cause problems * during system shutdown, for example. To prevent such problems, the * thread catches INT, TERM, and KILL signals and converts them into * an EXIT exception. * * In normal operation the main thread is started during the gadget's * fsg_bind() callback and stopped during fsg_unbind(). But it can also * exit when it receives a signal, and there's no point leaving the * gadget running when the thread is dead. So just before the thread * exits, it deregisters the gadget driver. This makes things a little * tricky: The driver is deregistered at two places, and the exiting * thread can indirectly call fsg_unbind() which in turn can tell the * thread to exit. The first problem is resolved through the use of the * REGISTERED atomic bitflag; the driver will only be deregistered once. * The second problem is resolved by having fsg_unbind() check * fsg->state; it won't try to stop the thread if the state is already * FSG_STATE_TERMINATED. * * To provide maximum throughput, the driver uses a circular pipeline of * buffer heads (struct fsg_buffhd). In principle the pipeline can be * arbitrarily long; in practice the benefits don't justify having more * than 2 stages (i.e., double buffering). But it helps to think of the * pipeline as being a long one. Each buffer head contains a bulk-in and * a bulk-out request pointer (since the buffer can be used for both * output and input -- directions always are given from the host's * point of view) as well as a pointer to the buffer and various state * variables. * * Use of the pipeline follows a simple protocol. There is a variable * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. * At any time that buffer head may still be in use from an earlier * request, so each buffer head has a state variable indicating whether * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the * buffer head to be EMPTY, filling the buffer either by file I/O or by * USB I/O (during which the buffer head is BUSY), and marking the buffer * head FULL when the I/O is complete. Then the buffer will be emptied * (again possibly by USB I/O, during which it is marked BUSY) and * finally marked EMPTY again (possibly by a completion routine). * * A module parameter tells the driver to avoid stalling the bulk * endpoints wherever the transport specification allows. This is * necessary for some UDCs like the SuperH, which cannot reliably clear a * halt on a bulk endpoint. However, under certain circumstances the * Bulk-only specification requires a stall. In such cases the driver * will halt the endpoint and set a flag indicating that it should clear * the halt in software during the next device reset. Hopefully this * will permit everything to work correctly. Furthermore, although the * specification allows the bulk-out endpoint to halt when the host sends * too much data, implementing this would cause an unavoidable race. * The driver will always use the "no-stall" approach for OUT transfers. * * One subtle point concerns sending status-stage responses for ep0 * requests. Some of these requests, such as device reset, can involve * interrupting an ongoing file I/O operation, which might take an * arbitrarily long time. During that delay the host might give up on * the original ep0 request and issue a new one. When that happens the * driver should not notify the host about completion of the original * request, as the host will no longer be waiting for it. So the driver * assigns to each ep0 request a unique tag, and it keeps track of the * tag value of the request associated with a long-running exception * (device-reset, interface-change, or configuration-change). When the * exception handler is finished, the status-stage response is submitted * only if the current ep0 request tag is equal to the exception request * tag. Thus only the most recently received ep0 request will get a * status-stage response. * * Warning: This driver source file is too long. It ought to be split up * into a header file plus about 3 separate .c files, to handle the details * of the Gadget, USB Mass Storage, and SCSI protocols. */ /* #define VERBOSE_DEBUG */ /* #define DUMP_MSGS */ #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/dcache.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kref.h> #include <linux/kthread.h> #include <linux/limits.h> #include <linux/module.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/freezer.h> #include <linux/utsname.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "gadget_chips.h" /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "usbstring.c" #include "config.c" #include "epautoconf.c" /*-------------------------------------------------------------------------*/ #define DRIVER_DESC "File-backed Storage Gadget" #define DRIVER_NAME "g_file_storage" #define DRIVER_VERSION "1 September 2010" static char fsg_string_manufacturer[64]; static const char fsg_string_product[] = DRIVER_DESC; static const char fsg_string_config[] = "Self-powered"; static const char fsg_string_interface[] = "Mass Storage"; #include "storage_common.c" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Alan Stern"); MODULE_LICENSE("Dual BSD/GPL"); /* * This driver assumes self-powered hardware and has no way for users to * trigger remote wakeup. It uses autoconfiguration to select endpoints * and endpoint addresses. */ /*-------------------------------------------------------------------------*/ /* Encapsulate the module parameter settings */ static struct { char *file[FSG_MAX_LUNS]; char *serial; bool ro[FSG_MAX_LUNS]; bool nofua[FSG_MAX_LUNS]; unsigned int num_filenames; unsigned int num_ros; unsigned int num_nofuas; unsigned int nluns; bool removable; bool can_stall; bool cdrom; char *transport_parm; char *protocol_parm; unsigned short vendor; unsigned short product; unsigned short release; unsigned int buflen; int transport_type; char *transport_name; int protocol_type; char *protocol_name; } mod_data = { // Default values .transport_parm = "BBB", .protocol_parm = "SCSI", .removable = 0, .can_stall = 1, .cdrom = 0, .vendor = FSG_VENDOR_ID, .product = FSG_PRODUCT_ID, .release = 0xffff, // Use controller chip type .buflen = 16384, }; module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames, S_IRUGO); MODULE_PARM_DESC(file, "names of backing files or devices"); module_param_named(serial, mod_data.serial, charp, S_IRUGO); MODULE_PARM_DESC(serial, "USB serial number"); module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO); MODULE_PARM_DESC(ro, "true to force read-only"); module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas, S_IRUGO); MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit"); module_param_named(luns, mod_data.nluns, uint, S_IRUGO); MODULE_PARM_DESC(luns, "number of LUNs"); module_param_named(removable, mod_data.removable, bool, S_IRUGO); MODULE_PARM_DESC(removable, "true to simulate removable media"); module_param_named(stall, mod_data.can_stall, bool, S_IRUGO); MODULE_PARM_DESC(stall, "false to prevent bulk stalls"); module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO); MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk"); /* In the non-TEST version, only the module parameters listed above * are available. */ #ifdef CONFIG_USB_FILE_STORAGE_TEST module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO); MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)"); module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO); MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, " "8070, or SCSI)"); module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO); MODULE_PARM_DESC(vendor, "USB Vendor ID"); module_param_named(product, mod_data.product, ushort, S_IRUGO); MODULE_PARM_DESC(product, "USB Product ID"); module_param_named(release, mod_data.release, ushort, S_IRUGO); MODULE_PARM_DESC(release, "USB release number"); module_param_named(buflen, mod_data.buflen, uint, S_IRUGO); MODULE_PARM_DESC(buflen, "I/O buffer size"); #endif /* CONFIG_USB_FILE_STORAGE_TEST */ /* * These definitions will permit the compiler to avoid generating code for * parts of the driver that aren't used in the non-TEST version. Even gcc * can recognize when a test of a constant expression yields a dead code * path. */ #ifdef CONFIG_USB_FILE_STORAGE_TEST #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK) #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI) #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI) #else #define transport_is_bbb() 1 #define transport_is_cbi() 0 #define protocol_is_scsi() 1 #endif /* CONFIG_USB_FILE_STORAGE_TEST */ /*-------------------------------------------------------------------------*/ struct fsg_dev { /* lock protects: state, all the req_busy's, and cbbuf_cmnd */ spinlock_t lock; struct usb_gadget *gadget; /* filesem protects: backing files in use */ struct rw_semaphore filesem; /* reference counting: wait until all LUNs are released */ struct kref ref; struct usb_ep *ep0; // Handy copy of gadget->ep0 struct usb_request *ep0req; // For control responses unsigned int ep0_req_tag; const char *ep0req_name; struct usb_request *intreq; // For interrupt responses int intreq_busy; struct fsg_buffhd *intr_buffhd; unsigned int bulk_out_maxpacket; enum fsg_state state; // For exception handling unsigned int exception_req_tag; u8 config, new_config; unsigned int running : 1; unsigned int bulk_in_enabled : 1; unsigned int bulk_out_enabled : 1; unsigned int intr_in_enabled : 1; unsigned int phase_error : 1; unsigned int short_packet_received : 1; unsigned int bad_lun_okay : 1; unsigned long atomic_bitflags; #define REGISTERED 0 #define IGNORE_BULK_OUT 1 #define SUSPENDED 2 struct usb_ep *bulk_in; struct usb_ep *bulk_out; struct usb_ep *intr_in; struct fsg_buffhd *next_buffhd_to_fill; struct fsg_buffhd *next_buffhd_to_drain; int thread_wakeup_needed; struct completion thread_notifier; struct task_struct *thread_task; int cmnd_size; u8 cmnd[MAX_COMMAND_SIZE]; enum data_direction data_dir; u32 data_size; u32 data_size_from_cmnd; u32 tag; unsigned int lun; u32 residue; u32 usb_amount_left; /* The CB protocol offers no way for a host to know when a command * has completed. As a result the next command may arrive early, * and we will still have to handle it. For that reason we need * a buffer to store new commands when using CB (or CBI, which * does not oblige a host to wait for command completion either). */ int cbbuf_cmnd_size; u8 cbbuf_cmnd[MAX_COMMAND_SIZE]; unsigned int nluns; struct fsg_lun *luns; struct fsg_lun *curlun; /* Must be the last entry */ struct fsg_buffhd buffhds[]; }; typedef void (*fsg_routine_t)(struct fsg_dev *); static int exception_in_progress(struct fsg_dev *fsg) { return (fsg->state > FSG_STATE_IDLE); } /* Make bulk-out requests be divisible by the maxpacket size */ static void set_bulk_out_req_length(struct fsg_dev *fsg, struct fsg_buffhd *bh, unsigned int length) { unsigned int rem; bh->bulk_out_intended_length = length; rem = length % fsg->bulk_out_maxpacket; if (rem > 0) length += fsg->bulk_out_maxpacket - rem; bh->outreq->length = length; } static struct fsg_dev *the_fsg; static struct usb_gadget_driver fsg_driver; /*-------------------------------------------------------------------------*/ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) { const char *name; if (ep == fsg->bulk_in) name = "bulk-in"; else if (ep == fsg->bulk_out) name = "bulk-out"; else name = ep->name; DBG(fsg, "%s set halt\n", name); return usb_ep_set_halt(ep); } /*-------------------------------------------------------------------------*/ /* * DESCRIPTORS ... most are static, but strings and (full) configuration * descriptors are built on demand. Also the (static) config and interface * descriptors are adjusted during fsg_bind(). */ /* There is only one configuration. */ #define CONFIG_VALUE 1 static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, /* The next three values can be overridden by module parameters */ .idVendor = cpu_to_le16(FSG_VENDOR_ID), .idProduct = cpu_to_le16(FSG_PRODUCT_ID), .bcdDevice = cpu_to_le16(0xffff), .iManufacturer = FSG_STRING_MANUFACTURER, .iProduct = FSG_STRING_PRODUCT, .iSerialNumber = FSG_STRING_SERIAL, .bNumConfigurations = 1, }; static struct usb_config_descriptor config_desc = { .bLength = sizeof config_desc, .bDescriptorType = USB_DT_CONFIG, /* wTotalLength computed by usb_gadget_config_buf() */ .bNumInterfaces = 1, .bConfigurationValue = CONFIG_VALUE, .iConfiguration = FSG_STRING_CONFIG, .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2, }; static struct usb_qualifier_descriptor dev_qualifier = { .bLength = sizeof dev_qualifier, .bDescriptorType = USB_DT_DEVICE_QUALIFIER, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, .bNumConfigurations = 1, }; static int populate_bos(struct fsg_dev *fsg, u8 *buf) { memcpy(buf, &fsg_bos_desc, USB_DT_BOS_SIZE); buf += USB_DT_BOS_SIZE; memcpy(buf, &fsg_ext_cap_desc, USB_DT_USB_EXT_CAP_SIZE); buf += USB_DT_USB_EXT_CAP_SIZE; memcpy(buf, &fsg_ss_cap_desc, USB_DT_USB_SS_CAP_SIZE); return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE + USB_DT_USB_EXT_CAP_SIZE; } /* * Config descriptors must agree with the code that sets configurations * and with code managing interfaces and their altsettings. They must * also handle different speeds and other-speed requests. */ static int populate_config_buf(struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index) { enum usb_device_speed speed = gadget->speed; int len; const struct usb_descriptor_header **function; if (index > 0) return -EINVAL; if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG) speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH ? (const struct usb_descriptor_header **)fsg_hs_function : (const struct usb_descriptor_header **)fsg_fs_function; /* for now, don't advertise srp-only devices */ if (!gadget_is_otg(gadget)) function++; len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function); ((struct usb_config_descriptor *) buf)->bDescriptorType = type; return len; } /*-------------------------------------------------------------------------*/ /* These routines may be called in process context or in_irq */ /* Caller must hold fsg->lock */ static void wakeup_thread(struct fsg_dev *fsg) { /* Tell the main thread that something has happened */ fsg->thread_wakeup_needed = 1; if (fsg->thread_task) wake_up_process(fsg->thread_task); } static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state) { unsigned long flags; /* Do nothing if a higher-priority exception is already in progress. * If a lower-or-equal priority exception is in progress, preempt it * and notify the main thread by sending it a signal. */ spin_lock_irqsave(&fsg->lock, flags); if (fsg->state <= new_state) { fsg->exception_req_tag = fsg->ep0_req_tag; fsg->state = new_state; if (fsg->thread_task) send_sig_info(SIGUSR1, SEND_SIG_FORCED, fsg->thread_task); } spin_unlock_irqrestore(&fsg->lock, flags); } /*-------------------------------------------------------------------------*/ /* The disconnect callback and ep0 routines. These always run in_irq, * except that ep0_queue() is called in the main thread to acknowledge * completion of various requests: set config, set interface, and * Bulk-only device reset. */ static void fsg_disconnect(struct usb_gadget *gadget) { struct fsg_dev *fsg = get_gadget_data(gadget); DBG(fsg, "disconnect or port reset\n"); raise_exception(fsg, FSG_STATE_DISCONNECT); } static int ep0_queue(struct fsg_dev *fsg) { int rc; rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC); if (rc != 0 && rc != -ESHUTDOWN) { /* We can't do much more than wait for a reset */ WARNING(fsg, "error in submission: %s --> %d\n", fsg->ep0->name, rc); } return rc; } static void ep0_complete(struct usb_ep *ep, struct usb_request *req) { struct fsg_dev *fsg = ep->driver_data; if (req->actual > 0) dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual); if (req->status || req->actual != req->length) DBG(fsg, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, req->length); if (req->status == -ECONNRESET) // Request was cancelled usb_ep_fifo_flush(ep); if (req->status == 0 && req->context) ((fsg_routine_t) (req->context))(fsg); } /*-------------------------------------------------------------------------*/ /* Bulk and interrupt endpoint completion handlers. * These always run in_irq. */ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) { struct fsg_dev *fsg = ep->driver_data; struct fsg_buffhd *bh = req->context; if (req->status || req->actual != req->length) DBG(fsg, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, req->length); if (req->status == -ECONNRESET) // Request was cancelled usb_ep_fifo_flush(ep); /* Hold the lock while we update the request and buffer states */ smp_wmb(); spin_lock(&fsg->lock); bh->inreq_busy = 0; bh->state = BUF_STATE_EMPTY; wakeup_thread(fsg); spin_unlock(&fsg->lock); } static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) { struct fsg_dev *fsg = ep->driver_data; struct fsg_buffhd *bh = req->context; dump_msg(fsg, "bulk-out", req->buf, req->actual); if (req->status || req->actual != bh->bulk_out_intended_length) DBG(fsg, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, bh->bulk_out_intended_length); if (req->status == -ECONNRESET) // Request was cancelled usb_ep_fifo_flush(ep); /* Hold the lock while we update the request and buffer states */ smp_wmb(); spin_lock(&fsg->lock); bh->outreq_busy = 0; bh->state = BUF_STATE_FULL; wakeup_thread(fsg); spin_unlock(&fsg->lock); } #ifdef CONFIG_USB_FILE_STORAGE_TEST static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) { struct fsg_dev *fsg = ep->driver_data; struct fsg_buffhd *bh = req->context; if (req->status || req->actual != req->length) DBG(fsg, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, req->length); if (req->status == -ECONNRESET) // Request was cancelled usb_ep_fifo_flush(ep); /* Hold the lock while we update the request and buffer states */ smp_wmb(); spin_lock(&fsg->lock); fsg->intreq_busy = 0; bh->state = BUF_STATE_EMPTY; wakeup_thread(fsg); spin_unlock(&fsg->lock); } #else static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) {} #endif /* CONFIG_USB_FILE_STORAGE_TEST */ /*-------------------------------------------------------------------------*/ /* Ep0 class-specific handlers. These always run in_irq. */ #ifdef CONFIG_USB_FILE_STORAGE_TEST static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct usb_request *req = fsg->ep0req; static u8 cbi_reset_cmnd[6] = { SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff}; /* Error in command transfer? */ if (req->status || req->length != req->actual || req->actual < 6 || req->actual > MAX_COMMAND_SIZE) { /* Not all controllers allow a protocol stall after * receiving control-out data, but we'll try anyway. */ fsg_set_halt(fsg, fsg->ep0); return; // Wait for reset } /* Is it the special reset command? */ if (req->actual >= sizeof cbi_reset_cmnd && memcmp(req->buf, cbi_reset_cmnd, sizeof cbi_reset_cmnd) == 0) { /* Raise an exception to stop the current operation * and reinitialize our state. */ DBG(fsg, "cbi reset request\n"); raise_exception(fsg, FSG_STATE_RESET); return; } VDBG(fsg, "CB[I] accept device-specific command\n"); spin_lock(&fsg->lock); /* Save the command for later */ if (fsg->cbbuf_cmnd_size) WARNING(fsg, "CB[I] overwriting previous command\n"); fsg->cbbuf_cmnd_size = req->actual; memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size); wakeup_thread(fsg); spin_unlock(&fsg->lock); } #else static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) {} #endif /* CONFIG_USB_FILE_STORAGE_TEST */ static int class_setup_req(struct fsg_dev *fsg, const struct usb_ctrlrequest *ctrl) { struct usb_request *req = fsg->ep0req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); if (!fsg->config) return value; /* Handle Bulk-only class-specific requests */ if (transport_is_bbb()) { switch (ctrl->bRequest) { case US_BULK_RESET_REQUEST: if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; if (w_index != 0 || w_value != 0 || w_length != 0) { value = -EDOM; break; } /* Raise an exception to stop the current operation * and reinitialize our state. */ DBG(fsg, "bulk reset request\n"); raise_exception(fsg, FSG_STATE_RESET); value = DELAYED_STATUS; break; case US_BULK_GET_MAX_LUN: if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; if (w_index != 0 || w_value != 0 || w_length != 1) { value = -EDOM; break; } VDBG(fsg, "get max LUN\n"); *(u8 *) req->buf = fsg->nluns - 1; value = 1; break; } } /* Handle CBI class-specific requests */ else { switch (ctrl->bRequest) { case USB_CBI_ADSC_REQUEST: if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; if (w_index != 0 || w_value != 0) { value = -EDOM; break; } if (w_length > MAX_COMMAND_SIZE) { value = -EOVERFLOW; break; } value = w_length; fsg->ep0req->context = received_cbi_adsc; break; } } if (value == -EOPNOTSUPP) VDBG(fsg, "unknown class-specific control req " "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue), w_index, w_length); return value; } /*-------------------------------------------------------------------------*/ /* Ep0 standard request handlers. These always run in_irq. */ static int standard_setup_req(struct fsg_dev *fsg, const struct usb_ctrlrequest *ctrl) { struct usb_request *req = fsg->ep0req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); /* Usually this just stores reply data in the pre-allocated ep0 buffer, * but config change events will also reconfigure hardware. */ switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; switch (w_value >> 8) { case USB_DT_DEVICE: VDBG(fsg, "get device descriptor\n"); device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket; value = sizeof device_desc; memcpy(req->buf, &device_desc, value); break; case USB_DT_DEVICE_QUALIFIER: VDBG(fsg, "get device qualifier\n"); if (!gadget_is_dualspeed(fsg->gadget) || fsg->gadget->speed == USB_SPEED_SUPER) break; /* * Assume ep0 uses the same maxpacket value for both * speeds */ dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket; value = sizeof dev_qualifier; memcpy(req->buf, &dev_qualifier, value); break; case USB_DT_OTHER_SPEED_CONFIG: VDBG(fsg, "get other-speed config descriptor\n"); if (!gadget_is_dualspeed(fsg->gadget) || fsg->gadget->speed == USB_SPEED_SUPER) break; goto get_config; case USB_DT_CONFIG: VDBG(fsg, "get configuration descriptor\n"); get_config: value = populate_config_buf(fsg->gadget, req->buf, w_value >> 8, w_value & 0xff); break; case USB_DT_STRING: VDBG(fsg, "get string descriptor\n"); /* wIndex == language code */ value = usb_gadget_get_string(&fsg_stringtab, w_value & 0xff, req->buf); break; case USB_DT_BOS: VDBG(fsg, "get bos descriptor\n"); if (gadget_is_superspeed(fsg->gadget)) value = populate_bos(fsg, req->buf); break; } break; /* One config, two speeds */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; VDBG(fsg, "set configuration\n"); if (w_value == CONFIG_VALUE || w_value == 0) { fsg->new_config = w_value; /* Raise an exception to wipe out previous transaction * state (queued bufs, etc) and set the new config. */ raise_exception(fsg, FSG_STATE_CONFIG_CHANGE); value = DELAYED_STATUS; } break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; VDBG(fsg, "get configuration\n"); *(u8 *) req->buf = fsg->config; value = 1; break; case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD | USB_RECIP_INTERFACE)) break; if (fsg->config && w_index == 0) { /* Raise an exception to wipe out previous transaction * state (queued bufs, etc) and install the new * interface altsetting. */ raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE); value = DELAYED_STATUS; } break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE)) break; if (!fsg->config) break; if (w_index != 0) { value = -EDOM; break; } VDBG(fsg, "get interface\n"); *(u8 *) req->buf = 0; value = 1; break; default: VDBG(fsg, "unknown control req %02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, le16_to_cpu(ctrl->wLength)); } return value; } static int fsg_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct fsg_dev *fsg = get_gadget_data(gadget); int rc; int w_length = le16_to_cpu(ctrl->wLength); ++fsg->ep0_req_tag; // Record arrival of a new request fsg->ep0req->context = NULL; fsg->ep0req->length = 0; dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) rc = class_setup_req(fsg, ctrl); else rc = standard_setup_req(fsg, ctrl); /* Respond with data/status or defer until later? */ if (rc >= 0 && rc != DELAYED_STATUS) { rc = min(rc, w_length); fsg->ep0req->length = rc; fsg->ep0req->zero = rc < w_length; fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out"); rc = ep0_queue(fsg); } /* Device either stalls (rc < 0) or reports success */ return rc; } /*-------------------------------------------------------------------------*/ /* All the following routines run in process context */ /* Use this for bulk or interrupt transfers, not ep0 */ static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, struct usb_request *req, int *pbusy, enum fsg_buffer_state *state) { int rc; if (ep == fsg->bulk_in) dump_msg(fsg, "bulk-in", req->buf, req->length); else if (ep == fsg->intr_in) dump_msg(fsg, "intr-in", req->buf, req->length); spin_lock_irq(&fsg->lock); *pbusy = 1; *state = BUF_STATE_BUSY; spin_unlock_irq(&fsg->lock); rc = usb_ep_queue(ep, req, GFP_KERNEL); if (rc != 0) { *pbusy = 0; *state = BUF_STATE_EMPTY; /* We can't do much more than wait for a reset */ /* Note: currently the net2280 driver fails zero-length * submissions if DMA is enabled. */ if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0)) WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc); } } static int sleep_thread(struct fsg_dev *fsg) { int rc = 0; /* Wait until a signal arrives or we are woken up */ for (;;) { try_to_freeze(); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { rc = -EINTR; break; } if (fsg->thread_wakeup_needed) break; schedule(); } __set_current_state(TASK_RUNNING); fsg->thread_wakeup_needed = 0; return rc; } /*-------------------------------------------------------------------------*/ static int do_read(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; u32 lba; struct fsg_buffhd *bh; int rc; u32 amount_left; loff_t file_offset, file_offset_tmp; unsigned int amount; ssize_t nread; /* Get the starting Logical Block Address and check that it's * not too big */ if (fsg->cmnd[0] == READ_6) lba = get_unaligned_be24(&fsg->cmnd[1]); else { lba = get_unaligned_be32(&fsg->cmnd[2]); /* We allow DPO (Disable Page Out = don't save data in the * cache) and FUA (Force Unit Access = don't read from the * cache), but we don't implement them. */ if ((fsg->cmnd[1] & ~0x18) != 0) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } } if (lba >= curlun->num_sectors) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } file_offset = ((loff_t) lba) << curlun->blkbits; /* Carry out the file reads */ amount_left = fsg->data_size_from_cmnd; if (unlikely(amount_left == 0)) return -EIO; // No default reply for (;;) { /* Figure out how much we need to read: * Try to read the remaining amount. * But don't read more than the buffer size. * And don't try to read past the end of the file. */ amount = min((unsigned int) amount_left, mod_data.buflen); amount = min((loff_t) amount, curlun->file_length - file_offset); /* Wait for the next buffer to become available */ bh = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } /* If we were asked to read past the end of file, * end with an empty buffer. */ if (amount == 0) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; bh->inreq->length = 0; bh->state = BUF_STATE_FULL; break; } /* Perform the read */ file_offset_tmp = file_offset; nread = vfs_read(curlun->filp, (char __user *) bh->buf, amount, &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); if (signal_pending(current)) return -EINTR; if (nread < 0) { LDBG(curlun, "error in file read: %d\n", (int) nread); nread = 0; } else if (nread < amount) { LDBG(curlun, "partial file read: %d/%u\n", (int) nread, amount); nread = round_down(nread, curlun->blksize); } file_offset += nread; amount_left -= nread; fsg->residue -= nread; /* Except at the end of the transfer, nread will be * equal to the buffer size, which is divisible by the * bulk-in maxpacket size. */ bh->inreq->length = nread; bh->state = BUF_STATE_FULL; /* If an error occurred, report it and its position */ if (nread < amount) { curlun->sense_data = SS_UNRECOVERED_READ_ERROR; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; break; } if (amount_left == 0) break; // No more left to read /* Send this buffer and go read some more */ bh->inreq->zero = 0; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; } return -EIO; // No default reply } /*-------------------------------------------------------------------------*/ static int do_write(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; u32 lba; struct fsg_buffhd *bh; int get_some_more; u32 amount_left_to_req, amount_left_to_write; loff_t usb_offset, file_offset, file_offset_tmp; unsigned int amount; ssize_t nwritten; int rc; if (curlun->ro) { curlun->sense_data = SS_WRITE_PROTECTED; return -EINVAL; } spin_lock(&curlun->filp->f_lock); curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait spin_unlock(&curlun->filp->f_lock); /* Get the starting Logical Block Address and check that it's * not too big */ if (fsg->cmnd[0] == WRITE_6) lba = get_unaligned_be24(&fsg->cmnd[1]); else { lba = get_unaligned_be32(&fsg->cmnd[2]); /* We allow DPO (Disable Page Out = don't save data in the * cache) and FUA (Force Unit Access = write directly to the * medium). We don't implement DPO; we implement FUA by * performing synchronous output. */ if ((fsg->cmnd[1] & ~0x18) != 0) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } /* FUA */ if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) { spin_lock(&curlun->filp->f_lock); curlun->filp->f_flags |= O_DSYNC; spin_unlock(&curlun->filp->f_lock); } } if (lba >= curlun->num_sectors) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } /* Carry out the file writes */ get_some_more = 1; file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd; while (amount_left_to_write > 0) { /* Queue a request for more data from the host */ bh = fsg->next_buffhd_to_fill; if (bh->state == BUF_STATE_EMPTY && get_some_more) { /* Figure out how much we want to get: * Try to get the remaining amount, * but not more than the buffer size. */ amount = min(amount_left_to_req, mod_data.buflen); /* Beyond the end of the backing file? */ if (usb_offset >= curlun->file_length) { get_some_more = 0; curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; curlun->sense_data_info = usb_offset >> curlun->blkbits; curlun->info_valid = 1; continue; } /* Get the next buffer */ usb_offset += amount; fsg->usb_amount_left -= amount; amount_left_to_req -= amount; if (amount_left_to_req == 0) get_some_more = 0; /* Except at the end of the transfer, amount will be * equal to the buffer size, which is divisible by * the bulk-out maxpacket size. */ set_bulk_out_req_length(fsg, bh, amount); start_transfer(fsg, fsg->bulk_out, bh->outreq, &bh->outreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; continue; } /* Write the received data to the backing file */ bh = fsg->next_buffhd_to_drain; if (bh->state == BUF_STATE_EMPTY && !get_some_more) break; // We stopped early if (bh->state == BUF_STATE_FULL) { smp_rmb(); fsg->next_buffhd_to_drain = bh->next; bh->state = BUF_STATE_EMPTY; /* Did something go wrong with the transfer? */ if (bh->outreq->status != 0) { curlun->sense_data = SS_COMMUNICATION_FAILURE; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; break; } amount = bh->outreq->actual; if (curlun->file_length - file_offset < amount) { LERROR(curlun, "write %u @ %llu beyond end %llu\n", amount, (unsigned long long) file_offset, (unsigned long long) curlun->file_length); amount = curlun->file_length - file_offset; } /* Don't accept excess data. The spec doesn't say * what to do in this case. We'll ignore the error. */ amount = min(amount, bh->bulk_out_intended_length); /* Don't write a partial block */ amount = round_down(amount, curlun->blksize); if (amount == 0) goto empty_write; /* Perform the write */ file_offset_tmp = file_offset; nwritten = vfs_write(curlun->filp, (char __user *) bh->buf, amount, &file_offset_tmp); VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nwritten); if (signal_pending(current)) return -EINTR; // Interrupted! if (nwritten < 0) { LDBG(curlun, "error in file write: %d\n", (int) nwritten); nwritten = 0; } else if (nwritten < amount) { LDBG(curlun, "partial file write: %d/%u\n", (int) nwritten, amount); nwritten = round_down(nwritten, curlun->blksize); } file_offset += nwritten; amount_left_to_write -= nwritten; fsg->residue -= nwritten; /* If an error occurred, report it and its position */ if (nwritten < amount) { curlun->sense_data = SS_WRITE_ERROR; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; break; } empty_write: /* Did the host decide to stop early? */ if (bh->outreq->actual < bh->bulk_out_intended_length) { fsg->short_packet_received = 1; break; } continue; } /* Wait for something to happen */ rc = sleep_thread(fsg); if (rc) return rc; } return -EIO; // No default reply } /*-------------------------------------------------------------------------*/ static int do_synchronize_cache(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; int rc; /* We ignore the requested LBA and write out all file's * dirty data buffers. */ rc = fsg_lun_fsync_sub(curlun); if (rc) curlun->sense_data = SS_WRITE_ERROR; return 0; } /*-------------------------------------------------------------------------*/ static void invalidate_sub(struct fsg_lun *curlun) { struct file *filp = curlun->filp; struct inode *inode = filp->f_path.dentry->d_inode; unsigned long rc; rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); } static int do_verify(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; u32 lba; u32 verification_length; struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; loff_t file_offset, file_offset_tmp; u32 amount_left; unsigned int amount; ssize_t nread; /* Get the starting Logical Block Address and check that it's * not too big */ lba = get_unaligned_be32(&fsg->cmnd[2]); if (lba >= curlun->num_sectors) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } /* We allow DPO (Disable Page Out = don't save data in the * cache) but we don't implement it. */ if ((fsg->cmnd[1] & ~0x10) != 0) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } verification_length = get_unaligned_be16(&fsg->cmnd[7]); if (unlikely(verification_length == 0)) return -EIO; // No default reply /* Prepare to carry out the file verify */ amount_left = verification_length << curlun->blkbits; file_offset = ((loff_t) lba) << curlun->blkbits; /* Write out all the dirty buffers before invalidating them */ fsg_lun_fsync_sub(curlun); if (signal_pending(current)) return -EINTR; invalidate_sub(curlun); if (signal_pending(current)) return -EINTR; /* Just try to read the requested blocks */ while (amount_left > 0) { /* Figure out how much we need to read: * Try to read the remaining amount, but not more than * the buffer size. * And don't try to read past the end of the file. */ amount = min((unsigned int) amount_left, mod_data.buflen); amount = min((loff_t) amount, curlun->file_length - file_offset); if (amount == 0) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; break; } /* Perform the read */ file_offset_tmp = file_offset; nread = vfs_read(curlun->filp, (char __user *) bh->buf, amount, &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); if (signal_pending(current)) return -EINTR; if (nread < 0) { LDBG(curlun, "error in file verify: %d\n", (int) nread); nread = 0; } else if (nread < amount) { LDBG(curlun, "partial file verify: %d/%u\n", (int) nread, amount); nread = round_down(nread, curlun->blksize); } if (nread == 0) { curlun->sense_data = SS_UNRECOVERED_READ_ERROR; curlun->sense_data_info = file_offset >> curlun->blkbits; curlun->info_valid = 1; break; } file_offset += nread; amount_left -= nread; } return 0; } /*-------------------------------------------------------------------------*/ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh) { u8 *buf = (u8 *) bh->buf; static char vendor_id[] = "Linux "; static char product_disk_id[] = "File-Stor Gadget"; static char product_cdrom_id[] = "File-CD Gadget "; if (!fsg->curlun) { // Unsupported LUNs are okay fsg->bad_lun_okay = 1; memset(buf, 0, 36); buf[0] = 0x7f; // Unsupported, no device-type buf[4] = 31; // Additional length return 36; } memset(buf, 0, 8); buf[0] = (mod_data.cdrom ? TYPE_ROM : TYPE_DISK); if (mod_data.removable) buf[1] = 0x80; buf[2] = 2; // ANSI SCSI level 2 buf[3] = 2; // SCSI-2 INQUIRY data format buf[4] = 31; // Additional length // No special options sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, (mod_data.cdrom ? product_cdrom_id : product_disk_id), mod_data.release); return 36; } static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; u8 *buf = (u8 *) bh->buf; u32 sd, sdinfo; int valid; /* * From the SCSI-2 spec., section 7.9 (Unit attention condition): * * If a REQUEST SENSE command is received from an initiator * with a pending unit attention condition (before the target * generates the contingent allegiance condition), then the * target shall either: * a) report any pending sense data and preserve the unit * attention condition on the logical unit, or, * b) report the unit attention condition, may discard any * pending sense data, and clear the unit attention * condition on the logical unit for that initiator. * * FSG normally uses option a); enable this code to use option b). */ #if 0 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { curlun->sense_data = curlun->unit_attention_data; curlun->unit_attention_data = SS_NO_SENSE; } #endif if (!curlun) { // Unsupported LUNs are okay fsg->bad_lun_okay = 1; sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; sdinfo = 0; valid = 0; } else { sd = curlun->sense_data; sdinfo = curlun->sense_data_info; valid = curlun->info_valid << 7; curlun->sense_data = SS_NO_SENSE; curlun->sense_data_info = 0; curlun->info_valid = 0; } memset(buf, 0, 18); buf[0] = valid | 0x70; // Valid, current error buf[2] = SK(sd); put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ buf[7] = 18 - 8; // Additional sense length buf[12] = ASC(sd); buf[13] = ASCQ(sd); return 18; } static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; u32 lba = get_unaligned_be32(&fsg->cmnd[2]); int pmi = fsg->cmnd[8]; u8 *buf = (u8 *) bh->buf; /* Check the PMI and LBA fields */ if (pmi > 1 || (pmi == 0 && lba != 0)) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); /* Max logical block */ put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */ return 8; } static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; int msf = fsg->cmnd[1] & 0x02; u32 lba = get_unaligned_be32(&fsg->cmnd[2]); u8 *buf = (u8 *) bh->buf; if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */ curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } if (lba >= curlun->num_sectors) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } memset(buf, 0, 8); buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ store_cdrom_address(&buf[4], msf, lba); return 8; } static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; int msf = fsg->cmnd[1] & 0x02; int start_track = fsg->cmnd[6]; u8 *buf = (u8 *) bh->buf; if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ start_track > 1) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } memset(buf, 0, 20); buf[1] = (20-2); /* TOC data length */ buf[2] = 1; /* First track number */ buf[3] = 1; /* Last track number */ buf[5] = 0x16; /* Data track, copying allowed */ buf[6] = 0x01; /* Only track is number 1 */ store_cdrom_address(&buf[8], msf, 0); buf[13] = 0x16; /* Lead-out track is data */ buf[14] = 0xAA; /* Lead-out track number */ store_cdrom_address(&buf[16], msf, curlun->num_sectors); return 20; } static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; int mscmnd = fsg->cmnd[0]; u8 *buf = (u8 *) bh->buf; u8 *buf0 = buf; int pc, page_code; int changeable_values, all_pages; int valid_page = 0; int len, limit; if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } pc = fsg->cmnd[2] >> 6; page_code = fsg->cmnd[2] & 0x3f; if (pc == 3) { curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; return -EINVAL; } changeable_values = (pc == 1); all_pages = (page_code == 0x3f); /* Write the mode parameter header. Fixed values are: default * medium type, no cache control (DPOFUA), and no block descriptors. * The only variable value is the WriteProtect bit. We will fill in * the mode data length later. */ memset(buf, 0, 8); if (mscmnd == MODE_SENSE) { buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA buf += 4; limit = 255; } else { // MODE_SENSE_10 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA buf += 8; limit = 65535; // Should really be mod_data.buflen } /* No block descriptors */ /* The mode pages, in numerical order. The only page we support * is the Caching page. */ if (page_code == 0x08 || all_pages) { valid_page = 1; buf[0] = 0x08; // Page code buf[1] = 10; // Page length memset(buf+2, 0, 10); // None of the fields are changeable if (!changeable_values) { buf[2] = 0x04; // Write cache enable, // Read cache not disabled // No cache retention priorities put_unaligned_be16(0xffff, &buf[4]); /* Don't disable prefetch */ /* Minimum prefetch = 0 */ put_unaligned_be16(0xffff, &buf[8]); /* Maximum prefetch */ put_unaligned_be16(0xffff, &buf[10]); /* Maximum prefetch ceiling */ } buf += 12; } /* Check that a valid page was requested and the mode data length * isn't too long. */ len = buf - buf0; if (!valid_page || len > limit) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } /* Store the mode data length */ if (mscmnd == MODE_SENSE) buf0[0] = len - 1; else put_unaligned_be16(len - 2, buf0); return len; } static int do_start_stop(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; int loej, start; if (!mod_data.removable) { curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } // int immed = fsg->cmnd[1] & 0x01; loej = fsg->cmnd[4] & 0x02; start = fsg->cmnd[4] & 0x01; #ifdef CONFIG_USB_FILE_STORAGE_TEST if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } if (!start) { /* Are we allowed to unload the media? */ if (curlun->prevent_medium_removal) { LDBG(curlun, "unload attempt prevented\n"); curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; return -EINVAL; } if (loej) { // Simulate an unload/eject up_read(&fsg->filesem); down_write(&fsg->filesem); fsg_lun_close(curlun); up_write(&fsg->filesem); down_read(&fsg->filesem); } } else { /* Our emulation doesn't support mounting; the medium is * available for use as soon as it is loaded. */ if (!fsg_lun_is_open(curlun)) { curlun->sense_data = SS_MEDIUM_NOT_PRESENT; return -EINVAL; } } #endif return 0; } static int do_prevent_allow(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; int prevent; if (!mod_data.removable) { curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } prevent = fsg->cmnd[4] & 0x01; if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } if (curlun->prevent_medium_removal && !prevent) fsg_lun_fsync_sub(curlun); curlun->prevent_medium_removal = prevent; return 0; } static int do_read_format_capacities(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; u8 *buf = (u8 *) bh->buf; buf[0] = buf[1] = buf[2] = 0; buf[3] = 8; // Only the Current/Maximum Capacity Descriptor buf += 4; put_unaligned_be32(curlun->num_sectors, &buf[0]); /* Number of blocks */ put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */ buf[4] = 0x02; /* Current capacity */ return 12; } static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct fsg_lun *curlun = fsg->curlun; /* We don't support MODE SELECT */ curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } /*-------------------------------------------------------------------------*/ static int halt_bulk_in_endpoint(struct fsg_dev *fsg) { int rc; rc = fsg_set_halt(fsg, fsg->bulk_in); if (rc == -EAGAIN) VDBG(fsg, "delayed bulk-in endpoint halt\n"); while (rc != 0) { if (rc != -EAGAIN) { WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); rc = 0; break; } /* Wait for a short time and then try again */ if (msleep_interruptible(100) != 0) return -EINTR; rc = usb_ep_set_halt(fsg->bulk_in); } return rc; } static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) { int rc; DBG(fsg, "bulk-in set wedge\n"); rc = usb_ep_set_wedge(fsg->bulk_in); if (rc == -EAGAIN) VDBG(fsg, "delayed bulk-in endpoint wedge\n"); while (rc != 0) { if (rc != -EAGAIN) { WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); rc = 0; break; } /* Wait for a short time and then try again */ if (msleep_interruptible(100) != 0) return -EINTR; rc = usb_ep_set_wedge(fsg->bulk_in); } return rc; } static int throw_away_data(struct fsg_dev *fsg) { struct fsg_buffhd *bh; u32 amount; int rc; while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY || fsg->usb_amount_left > 0) { /* Throw away the data in a filled buffer */ if (bh->state == BUF_STATE_FULL) { smp_rmb(); bh->state = BUF_STATE_EMPTY; fsg->next_buffhd_to_drain = bh->next; /* A short packet or an error ends everything */ if (bh->outreq->actual < bh->bulk_out_intended_length || bh->outreq->status != 0) { raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); return -EINTR; } continue; } /* Try to submit another request if we need one */ bh = fsg->next_buffhd_to_fill; if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) { amount = min(fsg->usb_amount_left, (u32) mod_data.buflen); /* Except at the end of the transfer, amount will be * equal to the buffer size, which is divisible by * the bulk-out maxpacket size. */ set_bulk_out_req_length(fsg, bh, amount); start_transfer(fsg, fsg->bulk_out, bh->outreq, &bh->outreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; fsg->usb_amount_left -= amount; continue; } /* Otherwise wait for something to happen */ rc = sleep_thread(fsg); if (rc) return rc; } return 0; } static int finish_reply(struct fsg_dev *fsg) { struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; int rc = 0; switch (fsg->data_dir) { case DATA_DIR_NONE: break; // Nothing to send /* If we don't know whether the host wants to read or write, * this must be CB or CBI with an unknown command. We mustn't * try to send or receive any data. So stall both bulk pipes * if we can and wait for a reset. */ case DATA_DIR_UNKNOWN: if (mod_data.can_stall) { fsg_set_halt(fsg, fsg->bulk_out); rc = halt_bulk_in_endpoint(fsg); } break; /* All but the last buffer of data must have already been sent */ case DATA_DIR_TO_HOST: if (fsg->data_size == 0) ; // Nothing to send /* If there's no residue, simply send the last buffer */ else if (fsg->residue == 0) { bh->inreq->zero = 0; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; } /* There is a residue. For CB and CBI, simply mark the end * of the data with a short packet. However, if we are * allowed to stall, there was no data at all (residue == * data_size), and the command failed (invalid LUN or * sense data is set), then halt the bulk-in endpoint * instead. */ else if (!transport_is_bbb()) { if (mod_data.can_stall && fsg->residue == fsg->data_size && (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) { bh->state = BUF_STATE_EMPTY; rc = halt_bulk_in_endpoint(fsg); } else { bh->inreq->zero = 1; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; } } /* * For Bulk-only, mark the end of the data with a short * packet. If we are allowed to stall, halt the bulk-in * endpoint. (Note: This violates the Bulk-Only Transport * specification, which requires us to pad the data if we * don't halt the endpoint. Presumably nobody will mind.) */ else { bh->inreq->zero = 1; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; if (mod_data.can_stall) rc = halt_bulk_in_endpoint(fsg); } break; /* We have processed all we want from the data the host has sent. * There may still be outstanding bulk-out requests. */ case DATA_DIR_FROM_HOST: if (fsg->residue == 0) ; // Nothing to receive /* Did the host stop sending unexpectedly early? */ else if (fsg->short_packet_received) { raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); rc = -EINTR; } /* We haven't processed all the incoming data. Even though * we may be allowed to stall, doing so would cause a race. * The controller may already have ACK'ed all the remaining * bulk-out packets, in which case the host wouldn't see a * STALL. Not realizing the endpoint was halted, it wouldn't * clear the halt -- leading to problems later on. */ #if 0 else if (mod_data.can_stall) { fsg_set_halt(fsg, fsg->bulk_out); raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); rc = -EINTR; } #endif /* We can't stall. Read in the excess data and throw it * all away. */ else rc = throw_away_data(fsg); break; } return rc; } static int send_status(struct fsg_dev *fsg) { struct fsg_lun *curlun = fsg->curlun; struct fsg_buffhd *bh; int rc; u8 status = US_BULK_STAT_OK; u32 sd, sdinfo = 0; /* Wait for the next buffer to become available */ bh = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } if (curlun) { sd = curlun->sense_data; sdinfo = curlun->sense_data_info; } else if (fsg->bad_lun_okay) sd = SS_NO_SENSE; else sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; if (fsg->phase_error) { DBG(fsg, "sending phase-error status\n"); status = US_BULK_STAT_PHASE; sd = SS_INVALID_COMMAND; } else if (sd != SS_NO_SENSE) { DBG(fsg, "sending command-failure status\n"); status = US_BULK_STAT_FAIL; VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" " info x%x\n", SK(sd), ASC(sd), ASCQ(sd), sdinfo); } if (transport_is_bbb()) { struct bulk_cs_wrap *csw = bh->buf; /* Store and send the Bulk-only CSW */ csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); csw->Tag = fsg->tag; csw->Residue = cpu_to_le32(fsg->residue); csw->Status = status; bh->inreq->length = US_BULK_CS_WRAP_LEN; bh->inreq->zero = 0; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); } else if (mod_data.transport_type == USB_PR_CB) { /* Control-Bulk transport has no status phase! */ return 0; } else { // USB_PR_CBI struct interrupt_data *buf = bh->buf; /* Store and send the Interrupt data. UFI sends the ASC * and ASCQ bytes. Everything else sends a Type (which * is always 0) and the status Value. */ if (mod_data.protocol_type == USB_SC_UFI) { buf->bType = ASC(sd); buf->bValue = ASCQ(sd); } else { buf->bType = 0; buf->bValue = status; } fsg->intreq->length = CBI_INTERRUPT_DATA_LEN; fsg->intr_buffhd = bh; // Point to the right buffhd fsg->intreq->buf = bh->inreq->buf; fsg->intreq->context = bh; start_transfer(fsg, fsg->intr_in, fsg->intreq, &fsg->intreq_busy, &bh->state); } fsg->next_buffhd_to_fill = bh->next; return 0; } /*-------------------------------------------------------------------------*/ /* Check whether the command is properly formed and whether its data size * and direction agree with the values we already have. */ static int check_command(struct fsg_dev *fsg, int cmnd_size, enum data_direction data_dir, unsigned int mask, int needs_medium, const char *name) { int i; int lun = fsg->cmnd[1] >> 5; static const char dirletter[4] = {'u', 'o', 'i', 'n'}; char hdlen[20]; struct fsg_lun *curlun; /* Adjust the expected cmnd_size for protocol encapsulation padding. * Transparent SCSI doesn't pad. */ if (protocol_is_scsi()) ; /* There's some disagreement as to whether RBC pads commands or not. * We'll play it safe and accept either form. */ else if (mod_data.protocol_type == USB_SC_RBC) { if (fsg->cmnd_size == 12) cmnd_size = 12; /* All the other protocols pad to 12 bytes */ } else cmnd_size = 12; hdlen[0] = 0; if (fsg->data_dir != DATA_DIR_UNKNOWN) sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir], fsg->data_size); VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", name, cmnd_size, dirletter[(int) data_dir], fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen); /* We can't reply at all until we know the correct data direction * and size. */ if (fsg->data_size_from_cmnd == 0) data_dir = DATA_DIR_NONE; if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI fsg->data_dir = data_dir; fsg->data_size = fsg->data_size_from_cmnd; } else { // Bulk-only if (fsg->data_size < fsg->data_size_from_cmnd) { /* Host data size < Device data size is a phase error. * Carry out the command, but only transfer as much * as we are allowed. */ fsg->data_size_from_cmnd = fsg->data_size; fsg->phase_error = 1; } } fsg->residue = fsg->usb_amount_left = fsg->data_size; /* Conflicting data directions is a phase error */ if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) { fsg->phase_error = 1; return -EINVAL; } /* Verify the length of the command itself */ if (cmnd_size != fsg->cmnd_size) { /* Special case workaround: There are plenty of buggy SCSI * implementations. Many have issues with cbw->Length * field passing a wrong command size. For those cases we * always try to work around the problem by using the length * sent by the host side provided it is at least as large * as the correct command length. * Examples of such cases would be MS-Windows, which issues * REQUEST SENSE with cbw->Length == 12 where it should * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and * REQUEST SENSE with cbw->Length == 10 where it should * be 6 as well. */ if (cmnd_size <= fsg->cmnd_size) { DBG(fsg, "%s is buggy! Expected length %d " "but we got %d\n", name, cmnd_size, fsg->cmnd_size); cmnd_size = fsg->cmnd_size; } else { fsg->phase_error = 1; return -EINVAL; } } /* Check that the LUN values are consistent */ if (transport_is_bbb()) { if (fsg->lun != lun) DBG(fsg, "using LUN %d from CBW, " "not LUN %d from CDB\n", fsg->lun, lun); } /* Check the LUN */ curlun = fsg->curlun; if (curlun) { if (fsg->cmnd[0] != REQUEST_SENSE) { curlun->sense_data = SS_NO_SENSE; curlun->sense_data_info = 0; curlun->info_valid = 0; } } else { fsg->bad_lun_okay = 0; /* INQUIRY and REQUEST SENSE commands are explicitly allowed * to use unsupported LUNs; all others may not. */ if (fsg->cmnd[0] != INQUIRY && fsg->cmnd[0] != REQUEST_SENSE) { DBG(fsg, "unsupported LUN %d\n", fsg->lun); return -EINVAL; } } /* If a unit attention condition exists, only INQUIRY and * REQUEST SENSE commands are allowed; anything else must fail. */ if (curlun && curlun->unit_attention_data != SS_NO_SENSE && fsg->cmnd[0] != INQUIRY && fsg->cmnd[0] != REQUEST_SENSE) { curlun->sense_data = curlun->unit_attention_data; curlun->unit_attention_data = SS_NO_SENSE; return -EINVAL; } /* Check that only command bytes listed in the mask are non-zero */ fsg->cmnd[1] &= 0x1f; // Mask away the LUN for (i = 1; i < cmnd_size; ++i) { if (fsg->cmnd[i] && !(mask & (1 << i))) { if (curlun) curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } } /* If the medium isn't mounted and the command needs to access * it, return an error. */ if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { curlun->sense_data = SS_MEDIUM_NOT_PRESENT; return -EINVAL; } return 0; } /* wrapper of check_command for data size in blocks handling */ static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size, enum data_direction data_dir, unsigned int mask, int needs_medium, const char *name) { if (fsg->curlun) fsg->data_size_from_cmnd <<= fsg->curlun->blkbits; return check_command(fsg, cmnd_size, data_dir, mask, needs_medium, name); } static int do_scsi_command(struct fsg_dev *fsg) { struct fsg_buffhd *bh; int rc; int reply = -EINVAL; int i; static char unknown[16]; dump_cdb(fsg); /* Wait for the next buffer to become available for data or status */ bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } fsg->phase_error = 0; fsg->short_packet_received = 0; down_read(&fsg->filesem); // We're using the backing file switch (fsg->cmnd[0]) { case INQUIRY: fsg->data_size_from_cmnd = fsg->cmnd[4]; if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, (1<<4), 0, "INQUIRY")) == 0) reply = do_inquiry(fsg, bh); break; case MODE_SELECT: fsg->data_size_from_cmnd = fsg->cmnd[4]; if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, (1<<1) | (1<<4), 0, "MODE SELECT(6)")) == 0) reply = do_mode_select(fsg, bh); break; case MODE_SELECT_10: fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, (1<<1) | (3<<7), 0, "MODE SELECT(10)")) == 0) reply = do_mode_select(fsg, bh); break; case MODE_SENSE: fsg->data_size_from_cmnd = fsg->cmnd[4]; if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, (1<<1) | (1<<2) | (1<<4), 0, "MODE SENSE(6)")) == 0) reply = do_mode_sense(fsg, bh); break; case MODE_SENSE_10: fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, (1<<1) | (1<<2) | (3<<7), 0, "MODE SENSE(10)")) == 0) reply = do_mode_sense(fsg, bh); break; case ALLOW_MEDIUM_REMOVAL: fsg->data_size_from_cmnd = 0; if ((reply = check_command(fsg, 6, DATA_DIR_NONE, (1<<4), 0, "PREVENT-ALLOW MEDIUM REMOVAL")) == 0) reply = do_prevent_allow(fsg); break; case READ_6: i = fsg->cmnd[4]; fsg->data_size_from_cmnd = (i == 0) ? 256 : i; if ((reply = check_command_size_in_blocks(fsg, 6, DATA_DIR_TO_HOST, (7<<1) | (1<<4), 1, "READ(6)")) == 0) reply = do_read(fsg); break; case READ_10: fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command_size_in_blocks(fsg, 10, DATA_DIR_TO_HOST, (1<<1) | (0xf<<2) | (3<<7), 1, "READ(10)")) == 0) reply = do_read(fsg); break; case READ_12: fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]); if ((reply = check_command_size_in_blocks(fsg, 12, DATA_DIR_TO_HOST, (1<<1) | (0xf<<2) | (0xf<<6), 1, "READ(12)")) == 0) reply = do_read(fsg); break; case READ_CAPACITY: fsg->data_size_from_cmnd = 8; if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, (0xf<<2) | (1<<8), 1, "READ CAPACITY")) == 0) reply = do_read_capacity(fsg, bh); break; case READ_HEADER: if (!mod_data.cdrom) goto unknown_cmnd; fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, (3<<7) | (0x1f<<1), 1, "READ HEADER")) == 0) reply = do_read_header(fsg, bh); break; case READ_TOC: if (!mod_data.cdrom) goto unknown_cmnd; fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, (7<<6) | (1<<1), 1, "READ TOC")) == 0) reply = do_read_toc(fsg, bh); break; case READ_FORMAT_CAPACITIES: fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, (3<<7), 1, "READ FORMAT CAPACITIES")) == 0) reply = do_read_format_capacities(fsg, bh); break; case REQUEST_SENSE: fsg->data_size_from_cmnd = fsg->cmnd[4]; if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, (1<<4), 0, "REQUEST SENSE")) == 0) reply = do_request_sense(fsg, bh); break; case START_STOP: fsg->data_size_from_cmnd = 0; if ((reply = check_command(fsg, 6, DATA_DIR_NONE, (1<<1) | (1<<4), 0, "START-STOP UNIT")) == 0) reply = do_start_stop(fsg); break; case SYNCHRONIZE_CACHE: fsg->data_size_from_cmnd = 0; if ((reply = check_command(fsg, 10, DATA_DIR_NONE, (0xf<<2) | (3<<7), 1, "SYNCHRONIZE CACHE")) == 0) reply = do_synchronize_cache(fsg); break; case TEST_UNIT_READY: fsg->data_size_from_cmnd = 0; reply = check_command(fsg, 6, DATA_DIR_NONE, 0, 1, "TEST UNIT READY"); break; /* Although optional, this command is used by MS-Windows. We * support a minimal version: BytChk must be 0. */ case VERIFY: fsg->data_size_from_cmnd = 0; if ((reply = check_command(fsg, 10, DATA_DIR_NONE, (1<<1) | (0xf<<2) | (3<<7), 1, "VERIFY")) == 0) reply = do_verify(fsg); break; case WRITE_6: i = fsg->cmnd[4]; fsg->data_size_from_cmnd = (i == 0) ? 256 : i; if ((reply = check_command_size_in_blocks(fsg, 6, DATA_DIR_FROM_HOST, (7<<1) | (1<<4), 1, "WRITE(6)")) == 0) reply = do_write(fsg); break; case WRITE_10: fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); if ((reply = check_command_size_in_blocks(fsg, 10, DATA_DIR_FROM_HOST, (1<<1) | (0xf<<2) | (3<<7), 1, "WRITE(10)")) == 0) reply = do_write(fsg); break; case WRITE_12: fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]); if ((reply = check_command_size_in_blocks(fsg, 12, DATA_DIR_FROM_HOST, (1<<1) | (0xf<<2) | (0xf<<6), 1, "WRITE(12)")) == 0) reply = do_write(fsg); break; /* Some mandatory commands that we recognize but don't implement. * They don't mean much in this setting. It's left as an exercise * for anyone interested to implement RESERVE and RELEASE in terms * of Posix locks. */ case FORMAT_UNIT: case RELEASE: case RESERVE: case SEND_DIAGNOSTIC: // Fall through default: unknown_cmnd: fsg->data_size_from_cmnd = 0; sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); if ((reply = check_command(fsg, fsg->cmnd_size, DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) { fsg->curlun->sense_data = SS_INVALID_COMMAND; reply = -EINVAL; } break; } up_read(&fsg->filesem); if (reply == -EINTR || signal_pending(current)) return -EINTR; /* Set up the single reply buffer for finish_reply() */ if (reply == -EINVAL) reply = 0; // Error reply length if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) { reply = min((u32) reply, fsg->data_size_from_cmnd); bh->inreq->length = reply; bh->state = BUF_STATE_FULL; fsg->residue -= reply; } // Otherwise it's already set return 0; } /*-------------------------------------------------------------------------*/ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) { struct usb_request *req = bh->outreq; struct bulk_cb_wrap *cbw = req->buf; /* Was this a real packet? Should it be ignored? */ if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) return -EINVAL; /* Is the CBW valid? */ if (req->actual != US_BULK_CB_WRAP_LEN || cbw->Signature != cpu_to_le32( US_BULK_CB_SIGN)) { DBG(fsg, "invalid CBW: len %u sig 0x%x\n", req->actual, le32_to_cpu(cbw->Signature)); /* The Bulk-only spec says we MUST stall the IN endpoint * (6.6.1), so it's unavoidable. It also says we must * retain this state until the next reset, but there's * no way to tell the controller driver it should ignore * Clear-Feature(HALT) requests. * * We aren't required to halt the OUT endpoint; instead * we can simply accept and discard any data received * until the next reset. */ wedge_bulk_in_endpoint(fsg); set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); return -EINVAL; } /* Is the CBW meaningful? */ if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " "cmdlen %u\n", cbw->Lun, cbw->Flags, cbw->Length); /* We can do anything we want here, so let's stall the * bulk pipes if we are allowed to. */ if (mod_data.can_stall) { fsg_set_halt(fsg, fsg->bulk_out); halt_bulk_in_endpoint(fsg); } return -EINVAL; } /* Save the command for later */ fsg->cmnd_size = cbw->Length; memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size); if (cbw->Flags & US_BULK_FLAG_IN) fsg->data_dir = DATA_DIR_TO_HOST; else fsg->data_dir = DATA_DIR_FROM_HOST; fsg->data_size = le32_to_cpu(cbw->DataTransferLength); if (fsg->data_size == 0) fsg->data_dir = DATA_DIR_NONE; fsg->lun = cbw->Lun; fsg->tag = cbw->Tag; return 0; } static int get_next_command(struct fsg_dev *fsg) { struct fsg_buffhd *bh; int rc = 0; if (transport_is_bbb()) { /* Wait for the next buffer to become available */ bh = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } /* Queue a request to read a Bulk-only CBW */ set_bulk_out_req_length(fsg, bh, US_BULK_CB_WRAP_LEN); start_transfer(fsg, fsg->bulk_out, bh->outreq, &bh->outreq_busy, &bh->state); /* We will drain the buffer in software, which means we * can reuse it for the next filling. No need to advance * next_buffhd_to_fill. */ /* Wait for the CBW to arrive */ while (bh->state != BUF_STATE_FULL) { rc = sleep_thread(fsg); if (rc) return rc; } smp_rmb(); rc = received_cbw(fsg, bh); bh->state = BUF_STATE_EMPTY; } else { // USB_PR_CB or USB_PR_CBI /* Wait for the next command to arrive */ while (fsg->cbbuf_cmnd_size == 0) { rc = sleep_thread(fsg); if (rc) return rc; } /* Is the previous status interrupt request still busy? * The host is allowed to skip reading the status, * so we must cancel it. */ if (fsg->intreq_busy) usb_ep_dequeue(fsg->intr_in, fsg->intreq); /* Copy the command and mark the buffer empty */ fsg->data_dir = DATA_DIR_UNKNOWN; spin_lock_irq(&fsg->lock); fsg->cmnd_size = fsg->cbbuf_cmnd_size; memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size); fsg->cbbuf_cmnd_size = 0; spin_unlock_irq(&fsg->lock); /* Use LUN from the command */ fsg->lun = fsg->cmnd[1] >> 5; } /* Update current lun */ if (fsg->lun >= 0 && fsg->lun < fsg->nluns) fsg->curlun = &fsg->luns[fsg->lun]; else fsg->curlun = NULL; return rc; } /*-------------------------------------------------------------------------*/ static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep, const struct usb_endpoint_descriptor *d) { int rc; ep->driver_data = fsg; ep->desc = d; rc = usb_ep_enable(ep); if (rc) ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc); return rc; } static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep, struct usb_request **preq) { *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); if (*preq) return 0; ERROR(fsg, "can't allocate request for %s\n", ep->name); return -ENOMEM; } /* * Reset interface setting and re-init endpoint state (toggle etc). * Call with altsetting < 0 to disable the interface. The only other * available altsetting is 0, which enables the interface. */ static int do_set_interface(struct fsg_dev *fsg, int altsetting) { int rc = 0; int i; const struct usb_endpoint_descriptor *d; if (fsg->running) DBG(fsg, "reset interface\n"); reset: /* Deallocate the requests */ for (i = 0; i < fsg_num_buffers; ++i) { struct fsg_buffhd *bh = &fsg->buffhds[i]; if (bh->inreq) { usb_ep_free_request(fsg->bulk_in, bh->inreq); bh->inreq = NULL; } if (bh->outreq) { usb_ep_free_request(fsg->bulk_out, bh->outreq); bh->outreq = NULL; } } if (fsg->intreq) { usb_ep_free_request(fsg->intr_in, fsg->intreq); fsg->intreq = NULL; } /* Disable the endpoints */ if (fsg->bulk_in_enabled) { usb_ep_disable(fsg->bulk_in); fsg->bulk_in_enabled = 0; } if (fsg->bulk_out_enabled) { usb_ep_disable(fsg->bulk_out); fsg->bulk_out_enabled = 0; } if (fsg->intr_in_enabled) { usb_ep_disable(fsg->intr_in); fsg->intr_in_enabled = 0; } fsg->running = 0; if (altsetting < 0 || rc != 0) return rc; DBG(fsg, "set interface %d\n", altsetting); /* Enable the endpoints */ d = fsg_ep_desc(fsg->gadget, &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc, &fsg_ss_bulk_in_desc); if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0) goto reset; fsg->bulk_in_enabled = 1; d = fsg_ep_desc(fsg->gadget, &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc, &fsg_ss_bulk_out_desc); if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0) goto reset; fsg->bulk_out_enabled = 1; fsg->bulk_out_maxpacket = usb_endpoint_maxp(d); clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); if (transport_is_cbi()) { d = fsg_ep_desc(fsg->gadget, &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc, &fsg_ss_intr_in_desc); if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0) goto reset; fsg->intr_in_enabled = 1; } /* Allocate the requests */ for (i = 0; i < fsg_num_buffers; ++i) { struct fsg_buffhd *bh = &fsg->buffhds[i]; if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0) goto reset; if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0) goto reset; bh->inreq->buf = bh->outreq->buf = bh->buf; bh->inreq->context = bh->outreq->context = bh; bh->inreq->complete = bulk_in_complete; bh->outreq->complete = bulk_out_complete; } if (transport_is_cbi()) { if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0) goto reset; fsg->intreq->complete = intr_in_complete; } fsg->running = 1; for (i = 0; i < fsg->nluns; ++i) fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; return rc; } /* * Change our operational configuration. This code must agree with the code * that returns config descriptors, and with interface altsetting code. * * It's also responsible for power management interactions. Some * configurations might not work with our current power sources. * For now we just assume the gadget is always self-powered. */ static int do_set_config(struct fsg_dev *fsg, u8 new_config) { int rc = 0; /* Disable the single interface */ if (fsg->config != 0) { DBG(fsg, "reset config\n"); fsg->config = 0; rc = do_set_interface(fsg, -1); } /* Enable the interface */ if (new_config != 0) { fsg->config = new_config; if ((rc = do_set_interface(fsg, 0)) != 0) fsg->config = 0; // Reset on errors else INFO(fsg, "%s config #%d\n", usb_speed_string(fsg->gadget->speed), fsg->config); } return rc; } /*-------------------------------------------------------------------------*/ static void handle_exception(struct fsg_dev *fsg) { siginfo_t info; int sig; int i; int num_active; struct fsg_buffhd *bh; enum fsg_state old_state; u8 new_config; struct fsg_lun *curlun; unsigned int exception_req_tag; int rc; /* Clear the existing signals. Anything but SIGUSR1 is converted * into a high-priority EXIT exception. */ for (;;) { sig = dequeue_signal_lock(current, &current->blocked, &info); if (!sig) break; if (sig != SIGUSR1) { if (fsg->state < FSG_STATE_EXIT) DBG(fsg, "Main thread exiting on signal\n"); raise_exception(fsg, FSG_STATE_EXIT); } } /* Cancel all the pending transfers */ if (fsg->intreq_busy) usb_ep_dequeue(fsg->intr_in, fsg->intreq); for (i = 0; i < fsg_num_buffers; ++i) { bh = &fsg->buffhds[i]; if (bh->inreq_busy) usb_ep_dequeue(fsg->bulk_in, bh->inreq); if (bh->outreq_busy) usb_ep_dequeue(fsg->bulk_out, bh->outreq); } /* Wait until everything is idle */ for (;;) { num_active = fsg->intreq_busy; for (i = 0; i < fsg_num_buffers; ++i) { bh = &fsg->buffhds[i]; num_active += bh->inreq_busy + bh->outreq_busy; } if (num_active == 0) break; if (sleep_thread(fsg)) return; } /* Clear out the controller's fifos */ if (fsg->bulk_in_enabled) usb_ep_fifo_flush(fsg->bulk_in); if (fsg->bulk_out_enabled) usb_ep_fifo_flush(fsg->bulk_out); if (fsg->intr_in_enabled) usb_ep_fifo_flush(fsg->intr_in); /* Reset the I/O buffer states and pointers, the SCSI * state, and the exception. Then invoke the handler. */ spin_lock_irq(&fsg->lock); for (i = 0; i < fsg_num_buffers; ++i) { bh = &fsg->buffhds[i]; bh->state = BUF_STATE_EMPTY; } fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain = &fsg->buffhds[0]; exception_req_tag = fsg->exception_req_tag; new_config = fsg->new_config; old_state = fsg->state; if (old_state == FSG_STATE_ABORT_BULK_OUT) fsg->state = FSG_STATE_STATUS_PHASE; else { for (i = 0; i < fsg->nluns; ++i) { curlun = &fsg->luns[i]; curlun->prevent_medium_removal = 0; curlun->sense_data = curlun->unit_attention_data = SS_NO_SENSE; curlun->sense_data_info = 0; curlun->info_valid = 0; } fsg->state = FSG_STATE_IDLE; } spin_unlock_irq(&fsg->lock); /* Carry out any extra actions required for the exception */ switch (old_state) { default: break; case FSG_STATE_ABORT_BULK_OUT: send_status(fsg); spin_lock_irq(&fsg->lock); if (fsg->state == FSG_STATE_STATUS_PHASE) fsg->state = FSG_STATE_IDLE; spin_unlock_irq(&fsg->lock); break; case FSG_STATE_RESET: /* In case we were forced against our will to halt a * bulk endpoint, clear the halt now. (The SuperH UDC * requires this.) */ if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) usb_ep_clear_halt(fsg->bulk_in); if (transport_is_bbb()) { if (fsg->ep0_req_tag == exception_req_tag) ep0_queue(fsg); // Complete the status stage } else if (transport_is_cbi()) send_status(fsg); // Status by interrupt pipe /* Technically this should go here, but it would only be * a waste of time. Ditto for the INTERFACE_CHANGE and * CONFIG_CHANGE cases. */ // for (i = 0; i < fsg->nluns; ++i) // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; break; case FSG_STATE_INTERFACE_CHANGE: rc = do_set_interface(fsg, 0); if (fsg->ep0_req_tag != exception_req_tag) break; if (rc != 0) // STALL on errors fsg_set_halt(fsg, fsg->ep0); else // Complete the status stage ep0_queue(fsg); break; case FSG_STATE_CONFIG_CHANGE: rc = do_set_config(fsg, new_config); if (fsg->ep0_req_tag != exception_req_tag) break; if (rc != 0) // STALL on errors fsg_set_halt(fsg, fsg->ep0); else // Complete the status stage ep0_queue(fsg); break; case FSG_STATE_DISCONNECT: for (i = 0; i < fsg->nluns; ++i) fsg_lun_fsync_sub(fsg->luns + i); do_set_config(fsg, 0); // Unconfigured state break; case FSG_STATE_EXIT: case FSG_STATE_TERMINATED: do_set_config(fsg, 0); // Free resources spin_lock_irq(&fsg->lock); fsg->state = FSG_STATE_TERMINATED; // Stop the thread spin_unlock_irq(&fsg->lock); break; } } /*-------------------------------------------------------------------------*/ static int fsg_main_thread(void *fsg_) { struct fsg_dev *fsg = fsg_; /* Allow the thread to be killed by a signal, but set the signal mask * to block everything but INT, TERM, KILL, and USR1. */ allow_signal(SIGINT); allow_signal(SIGTERM); allow_signal(SIGKILL); allow_signal(SIGUSR1); /* Allow the thread to be frozen */ set_freezable(); /* Arrange for userspace references to be interpreted as kernel * pointers. That way we can pass a kernel pointer to a routine * that expects a __user pointer and it will work okay. */ set_fs(get_ds()); /* The main loop */ while (fsg->state != FSG_STATE_TERMINATED) { if (exception_in_progress(fsg) || signal_pending(current)) { handle_exception(fsg); continue; } if (!fsg->running) { sleep_thread(fsg); continue; } if (get_next_command(fsg)) continue; spin_lock_irq(&fsg->lock); if (!exception_in_progress(fsg)) fsg->state = FSG_STATE_DATA_PHASE; spin_unlock_irq(&fsg->lock); if (do_scsi_command(fsg) || finish_reply(fsg)) continue; spin_lock_irq(&fsg->lock); if (!exception_in_progress(fsg)) fsg->state = FSG_STATE_STATUS_PHASE; spin_unlock_irq(&fsg->lock); if (send_status(fsg)) continue; spin_lock_irq(&fsg->lock); if (!exception_in_progress(fsg)) fsg->state = FSG_STATE_IDLE; spin_unlock_irq(&fsg->lock); } spin_lock_irq(&fsg->lock); fsg->thread_task = NULL; spin_unlock_irq(&fsg->lock); /* If we are exiting because of a signal, unregister the * gadget driver. */ if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) usb_gadget_unregister_driver(&fsg_driver); /* Let the unbind and cleanup routines know the thread has exited */ complete_and_exit(&fsg->thread_notifier, 0); } /*-------------------------------------------------------------------------*/ /* The write permissions and store_xxx pointers are set in fsg_bind() */ static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL); static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL); static DEVICE_ATTR(file, 0444, fsg_show_file, NULL); /*-------------------------------------------------------------------------*/ static void fsg_release(struct kref *ref) { struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref); kfree(fsg->luns); kfree(fsg); } static void lun_release(struct device *dev) { struct rw_semaphore *filesem = dev_get_drvdata(dev); struct fsg_dev *fsg = container_of(filesem, struct fsg_dev, filesem); kref_put(&fsg->ref, fsg_release); } static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget) { struct fsg_dev *fsg = get_gadget_data(gadget); int i; struct fsg_lun *curlun; struct usb_request *req = fsg->ep0req; DBG(fsg, "unbind\n"); clear_bit(REGISTERED, &fsg->atomic_bitflags); /* If the thread isn't already dead, tell it to exit now */ if (fsg->state != FSG_STATE_TERMINATED) { raise_exception(fsg, FSG_STATE_EXIT); wait_for_completion(&fsg->thread_notifier); /* The cleanup routine waits for this completion also */ complete(&fsg->thread_notifier); } /* Unregister the sysfs attribute files and the LUNs */ for (i = 0; i < fsg->nluns; ++i) { curlun = &fsg->luns[i]; if (curlun->registered) { device_remove_file(&curlun->dev, &dev_attr_nofua); device_remove_file(&curlun->dev, &dev_attr_ro); device_remove_file(&curlun->dev, &dev_attr_file); fsg_lun_close(curlun); device_unregister(&curlun->dev); curlun->registered = 0; } } /* Free the data buffers */ for (i = 0; i < fsg_num_buffers; ++i) kfree(fsg->buffhds[i].buf); /* Free the request and buffer for endpoint 0 */ if (req) { kfree(req->buf); usb_ep_free_request(fsg->ep0, req); } set_gadget_data(gadget, NULL); } static int __init check_parameters(struct fsg_dev *fsg) { int prot; int gcnum; /* Store the default values */ mod_data.transport_type = USB_PR_BULK; mod_data.transport_name = "Bulk-only"; mod_data.protocol_type = USB_SC_SCSI; mod_data.protocol_name = "Transparent SCSI"; /* Some peripheral controllers are known not to be able to * halt bulk endpoints correctly. If one of them is present, * disable stalls. */ if (gadget_is_at91(fsg->gadget)) mod_data.can_stall = 0; if (mod_data.release == 0xffff) { // Parameter wasn't set gcnum = usb_gadget_controller_number(fsg->gadget); if (gcnum >= 0) mod_data.release = 0x0300 + gcnum; else { WARNING(fsg, "controller '%s' not recognized\n", fsg->gadget->name); mod_data.release = 0x0399; } } prot = simple_strtol(mod_data.protocol_parm, NULL, 0); #ifdef CONFIG_USB_FILE_STORAGE_TEST if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) { ; // Use default setting } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) { mod_data.transport_type = USB_PR_CB; mod_data.transport_name = "Control-Bulk"; } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) { mod_data.transport_type = USB_PR_CBI; mod_data.transport_name = "Control-Bulk-Interrupt"; } else { ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm); return -EINVAL; } if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 || prot == USB_SC_SCSI) { ; // Use default setting } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 || prot == USB_SC_RBC) { mod_data.protocol_type = USB_SC_RBC; mod_data.protocol_name = "RBC"; } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 || strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 || prot == USB_SC_8020) { mod_data.protocol_type = USB_SC_8020; mod_data.protocol_name = "8020i (ATAPI)"; } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 || prot == USB_SC_QIC) { mod_data.protocol_type = USB_SC_QIC; mod_data.protocol_name = "QIC-157"; } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 || prot == USB_SC_UFI) { mod_data.protocol_type = USB_SC_UFI; mod_data.protocol_name = "UFI"; } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 || prot == USB_SC_8070) { mod_data.protocol_type = USB_SC_8070; mod_data.protocol_name = "8070i"; } else { ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm); return -EINVAL; } mod_data.buflen &= PAGE_CACHE_MASK; if (mod_data.buflen <= 0) { ERROR(fsg, "invalid buflen\n"); return -ETOOSMALL; } #endif /* CONFIG_USB_FILE_STORAGE_TEST */ /* Serial string handling. * On a real device, the serial string would be loaded * from permanent storage. */ if (mod_data.serial) { const char *ch; unsigned len = 0; /* Sanity check : * The CB[I] specification limits the serial string to * 12 uppercase hexadecimal characters. * BBB need at least 12 uppercase hexadecimal characters, * with a maximum of 126. */ for (ch = mod_data.serial; *ch; ++ch) { ++len; if ((*ch < '0' || *ch > '9') && (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */ WARNING(fsg, "Invalid serial string character: %c\n", *ch); goto no_serial; } } if (len > 126 || (mod_data.transport_type == USB_PR_BULK && len < 12) || (mod_data.transport_type != USB_PR_BULK && len > 12)) { WARNING(fsg, "Invalid serial string length!\n"); goto no_serial; } fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial; } else { WARNING(fsg, "No serial-number string provided!\n"); no_serial: device_desc.iSerialNumber = 0; } return 0; } static int __init fsg_bind(struct usb_gadget *gadget) { struct fsg_dev *fsg = the_fsg; int rc; int i; struct fsg_lun *curlun; struct usb_ep *ep; struct usb_request *req; char *pathbuf, *p; fsg->gadget = gadget; set_gadget_data(gadget, fsg); fsg->ep0 = gadget->ep0; fsg->ep0->driver_data = fsg; if ((rc = check_parameters(fsg)) != 0) goto out; if (mod_data.removable) { // Enable the store_xxx attributes dev_attr_file.attr.mode = 0644; dev_attr_file.store = fsg_store_file; if (!mod_data.cdrom) { dev_attr_ro.attr.mode = 0644; dev_attr_ro.store = fsg_store_ro; } } /* Only for removable media? */ dev_attr_nofua.attr.mode = 0644; dev_attr_nofua.store = fsg_store_nofua; /* Find out how many LUNs there should be */ i = mod_data.nluns; if (i == 0) i = max(mod_data.num_filenames, 1u); if (i > FSG_MAX_LUNS) { ERROR(fsg, "invalid number of LUNs: %d\n", i); rc = -EINVAL; goto out; } /* Create the LUNs, open their backing files, and register the * LUN devices in sysfs. */ fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL); if (!fsg->luns) { rc = -ENOMEM; goto out; } fsg->nluns = i; for (i = 0; i < fsg->nluns; ++i) { curlun = &fsg->luns[i]; curlun->cdrom = !!mod_data.cdrom; curlun->ro = mod_data.cdrom || mod_data.ro[i]; curlun->initially_ro = curlun->ro; curlun->removable = mod_data.removable; curlun->nofua = mod_data.nofua[i]; curlun->dev.release = lun_release; curlun->dev.parent = &gadget->dev; curlun->dev.driver = &fsg_driver.driver; dev_set_drvdata(&curlun->dev, &fsg->filesem); dev_set_name(&curlun->dev,"%s-lun%d", dev_name(&gadget->dev), i); kref_get(&fsg->ref); rc = device_register(&curlun->dev); if (rc) { INFO(fsg, "failed to register LUN%d: %d\n", i, rc); put_device(&curlun->dev); goto out; } curlun->registered = 1; rc = device_create_file(&curlun->dev, &dev_attr_ro); if (rc) goto out; rc = device_create_file(&curlun->dev, &dev_attr_nofua); if (rc) goto out; rc = device_create_file(&curlun->dev, &dev_attr_file); if (rc) goto out; if (mod_data.file[i] && *mod_data.file[i]) { rc = fsg_lun_open(curlun, mod_data.file[i]); if (rc) goto out; } else if (!mod_data.removable) { ERROR(fsg, "no file given for LUN%d\n", i); rc = -EINVAL; goto out; } } /* Find all the endpoints we will use */ usb_ep_autoconfig_reset(gadget); ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); if (!ep) goto autoconf_fail; ep->driver_data = fsg; // claim the endpoint fsg->bulk_in = ep; ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); if (!ep) goto autoconf_fail; ep->driver_data = fsg; // claim the endpoint fsg->bulk_out = ep; if (transport_is_cbi()) { ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc); if (!ep) goto autoconf_fail; ep->driver_data = fsg; // claim the endpoint fsg->intr_in = ep; } /* Fix up the descriptors */ device_desc.idVendor = cpu_to_le16(mod_data.vendor); device_desc.idProduct = cpu_to_le16(mod_data.product); device_desc.bcdDevice = cpu_to_le16(mod_data.release); i = (transport_is_cbi() ? 3 : 2); // Number of endpoints fsg_intf_desc.bNumEndpoints = i; fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type; fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type; fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL; if (gadget_is_dualspeed(gadget)) { fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL; /* Assume endpoint addresses are the same for both speeds */ fsg_hs_bulk_in_desc.bEndpointAddress = fsg_fs_bulk_in_desc.bEndpointAddress; fsg_hs_bulk_out_desc.bEndpointAddress = fsg_fs_bulk_out_desc.bEndpointAddress; fsg_hs_intr_in_desc.bEndpointAddress = fsg_fs_intr_in_desc.bEndpointAddress; } if (gadget_is_superspeed(gadget)) { unsigned max_burst; fsg_ss_function[i + FSG_SS_FUNCTION_PRE_EP_ENTRIES] = NULL; /* Calculate bMaxBurst, we know packet size is 1024 */ max_burst = min_t(unsigned, mod_data.buflen / 1024, 15); /* Assume endpoint addresses are the same for both speeds */ fsg_ss_bulk_in_desc.bEndpointAddress = fsg_fs_bulk_in_desc.bEndpointAddress; fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; fsg_ss_bulk_out_desc.bEndpointAddress = fsg_fs_bulk_out_desc.bEndpointAddress; fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; } if (gadget_is_otg(gadget)) fsg_otg_desc.bmAttributes |= USB_OTG_HNP; rc = -ENOMEM; /* Allocate the request and buffer for endpoint 0 */ fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL); if (!req) goto out; req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL); if (!req->buf) goto out; req->complete = ep0_complete; /* Allocate the data buffers */ for (i = 0; i < fsg_num_buffers; ++i) { struct fsg_buffhd *bh = &fsg->buffhds[i]; /* Allocate for the bulk-in endpoint. We assume that * the buffer will also work with the bulk-out (and * interrupt-in) endpoint. */ bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL); if (!bh->buf) goto out; bh->next = bh + 1; } fsg->buffhds[fsg_num_buffers - 1].next = &fsg->buffhds[0]; /* This should reflect the actual gadget power source */ usb_gadget_set_selfpowered(gadget); snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); fsg->thread_task = kthread_create(fsg_main_thread, fsg, "file-storage-gadget"); if (IS_ERR(fsg->thread_task)) { rc = PTR_ERR(fsg->thread_task); goto out; } INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n"); INFO(fsg, "NOTE: This driver is deprecated. " "Consider using g_mass_storage instead.\n"); INFO(fsg, "Number of LUNs=%d\n", fsg->nluns); pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); for (i = 0; i < fsg->nluns; ++i) { curlun = &fsg->luns[i]; if (fsg_lun_is_open(curlun)) { p = NULL; if (pathbuf) { p = d_path(&curlun->filp->f_path, pathbuf, PATH_MAX); if (IS_ERR(p)) p = NULL; } LINFO(curlun, "ro=%d, nofua=%d, file: %s\n", curlun->ro, curlun->nofua, (p ? p : "(error)")); } } kfree(pathbuf); DBG(fsg, "transport=%s (x%02x)\n", mod_data.transport_name, mod_data.transport_type); DBG(fsg, "protocol=%s (x%02x)\n", mod_data.protocol_name, mod_data.protocol_type); DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n", mod_data.vendor, mod_data.product, mod_data.release); DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n", mod_data.removable, mod_data.can_stall, mod_data.cdrom, mod_data.buflen); DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task)); set_bit(REGISTERED, &fsg->atomic_bitflags); /* Tell the thread to start working */ wake_up_process(fsg->thread_task); return 0; autoconf_fail: ERROR(fsg, "unable to autoconfigure all endpoints\n"); rc = -ENOTSUPP; out: fsg->state = FSG_STATE_TERMINATED; // The thread is dead fsg_unbind(gadget); complete(&fsg->thread_notifier); return rc; } /*-------------------------------------------------------------------------*/ static void fsg_suspend(struct usb_gadget *gadget) { struct fsg_dev *fsg = get_gadget_data(gadget); DBG(fsg, "suspend\n"); set_bit(SUSPENDED, &fsg->atomic_bitflags); } static void fsg_resume(struct usb_gadget *gadget) { struct fsg_dev *fsg = get_gadget_data(gadget); DBG(fsg, "resume\n"); clear_bit(SUSPENDED, &fsg->atomic_bitflags); } /*-------------------------------------------------------------------------*/ static struct usb_gadget_driver fsg_driver = { .max_speed = USB_SPEED_SUPER, .function = (char *) fsg_string_product, .unbind = fsg_unbind, .disconnect = fsg_disconnect, .setup = fsg_setup, .suspend = fsg_suspend, .resume = fsg_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, // .release = ... // .suspend = ... // .resume = ... }, }; static int __init fsg_alloc(void) { struct fsg_dev *fsg; fsg = kzalloc(sizeof *fsg + fsg_num_buffers * sizeof *(fsg->buffhds), GFP_KERNEL); if (!fsg) return -ENOMEM; spin_lock_init(&fsg->lock); init_rwsem(&fsg->filesem); kref_init(&fsg->ref); init_completion(&fsg->thread_notifier); the_fsg = fsg; return 0; } static int __init fsg_init(void) { int rc; struct fsg_dev *fsg; rc = fsg_num_buffers_validate(); if (rc != 0) return rc; if ((rc = fsg_alloc()) != 0) return rc; fsg = the_fsg; if ((rc = usb_gadget_probe_driver(&fsg_driver, fsg_bind)) != 0) kref_put(&fsg->ref, fsg_release); return rc; } module_init(fsg_init); static void __exit fsg_cleanup(void) { struct fsg_dev *fsg = the_fsg; /* Unregister the driver iff the thread hasn't already done so */ if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) usb_gadget_unregister_driver(&fsg_driver); /* Wait for the thread to finish up */ wait_for_completion(&fsg->thread_notifier); kref_put(&fsg->ref, fsg_release); } module_exit(fsg_cleanup);
gpl-2.0
binkybear/kernel_msm
arch/arm/mach-orion5x/net2big-setup.c
4952
11642
/* * arch/arm/mach-orion5x/net2big-setup.c * * LaCie 2Big Network NAS setup * * Copyright (C) 2009 Simon Guinot <sguinot@lacie.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <linux/delay.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * LaCie 2Big Network Info ****************************************************************************/ /* * 512KB NOR flash Device bus boot chip select */ #define NET2BIG_NOR_BOOT_BASE 0xfff80000 #define NET2BIG_NOR_BOOT_SIZE SZ_512K /***************************************************************************** * 512KB NOR Flash on Boot Device ****************************************************************************/ /* * TODO: Check write support on flash MX29LV400CBTC-70G */ static struct mtd_partition net2big_partitions[] = { { .name = "Full512kb", .size = MTDPART_SIZ_FULL, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data net2big_nor_flash_data = { .width = 1, .parts = net2big_partitions, .nr_parts = ARRAY_SIZE(net2big_partitions), }; static struct resource net2big_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = NET2BIG_NOR_BOOT_BASE, .end = NET2BIG_NOR_BOOT_BASE + NET2BIG_NOR_BOOT_SIZE - 1, }; static struct platform_device net2big_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &net2big_nor_flash_data, }, .num_resources = 1, .resource = &net2big_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data net2big_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /***************************************************************************** * I2C devices ****************************************************************************/ /* * i2c addr | chip | description * 0x32 | Ricoh 5C372b | RTC * 0x50 | HT24LC08 | eeprom (1kB) */ static struct i2c_board_info __initdata net2big_i2c_devices[] = { { I2C_BOARD_INFO("rs5c372b", 0x32), }, { I2C_BOARD_INFO("24c08", 0x50), }, }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data net2big_sata_data = { .n_ports = 2, }; #define NET2BIG_GPIO_SATA_POWER_REQ 19 #define NET2BIG_GPIO_SATA0_POWER 23 #define NET2BIG_GPIO_SATA1_POWER 25 static void __init net2big_sata_power_init(void) { int err; /* Configure GPIOs over MPP max number. */ orion_gpio_set_valid(NET2BIG_GPIO_SATA0_POWER, 1); orion_gpio_set_valid(NET2BIG_GPIO_SATA1_POWER, 1); err = gpio_request(NET2BIG_GPIO_SATA0_POWER, "SATA0 power status"); if (err == 0) { err = gpio_direction_input(NET2BIG_GPIO_SATA0_POWER); if (err) gpio_free(NET2BIG_GPIO_SATA0_POWER); } if (err) { pr_err("net2big: failed to setup SATA0 power GPIO\n"); return; } err = gpio_request(NET2BIG_GPIO_SATA1_POWER, "SATA1 power status"); if (err == 0) { err = gpio_direction_input(NET2BIG_GPIO_SATA1_POWER); if (err) gpio_free(NET2BIG_GPIO_SATA1_POWER); } if (err) { pr_err("net2big: failed to setup SATA1 power GPIO\n"); goto err_free_1; } err = gpio_request(NET2BIG_GPIO_SATA_POWER_REQ, "SATA power request"); if (err == 0) { err = gpio_direction_output(NET2BIG_GPIO_SATA_POWER_REQ, 0); if (err) gpio_free(NET2BIG_GPIO_SATA_POWER_REQ); } if (err) { pr_err("net2big: failed to setup SATA power request GPIO\n"); goto err_free_2; } if (gpio_get_value(NET2BIG_GPIO_SATA0_POWER) && gpio_get_value(NET2BIG_GPIO_SATA1_POWER)) { return; } /* * SATA power up on both disk is done by pulling high the CPLD power * request line. The 300ms delay is related to the CPLD clock and is * needed to be sure that the CPLD has take into account the low line * status. */ msleep(300); gpio_set_value(NET2BIG_GPIO_SATA_POWER_REQ, 1); pr_info("net2big: power up SATA hard disks\n"); return; err_free_2: gpio_free(NET2BIG_GPIO_SATA1_POWER); err_free_1: gpio_free(NET2BIG_GPIO_SATA0_POWER); return; } /***************************************************************************** * GPIO LEDs ****************************************************************************/ /* * The power front LEDs (blue and red) and SATA red LEDs are controlled via a * single GPIO line and are compatible with the leds-gpio driver. * * The SATA blue LEDs have some hardware blink capabilities which are detailed * in the following array: * * SATAx blue LED | SATAx activity | LED state * | | * 0 | 0 | blink (rate 300ms) * 1 | 0 | off * ? | 1 | on * * Notes: The blue and the red front LED's can't be on at the same time. * Blue LED have priority. */ #define NET2BIG_GPIO_PWR_RED_LED 6 #define NET2BIG_GPIO_PWR_BLUE_LED 16 #define NET2BIG_GPIO_PWR_LED_BLINK_STOP 7 #define NET2BIG_GPIO_SATA0_RED_LED 11 #define NET2BIG_GPIO_SATA1_RED_LED 10 #define NET2BIG_GPIO_SATA0_BLUE_LED 17 #define NET2BIG_GPIO_SATA1_BLUE_LED 13 static struct gpio_led net2big_leds[] = { { .name = "net2big:red:power", .gpio = NET2BIG_GPIO_PWR_RED_LED, }, { .name = "net2big:blue:power", .gpio = NET2BIG_GPIO_PWR_BLUE_LED, }, { .name = "net2big:red:sata0", .gpio = NET2BIG_GPIO_SATA0_RED_LED, }, { .name = "net2big:red:sata1", .gpio = NET2BIG_GPIO_SATA1_RED_LED, }, }; static struct gpio_led_platform_data net2big_led_data = { .num_leds = ARRAY_SIZE(net2big_leds), .leds = net2big_leds, }; static struct platform_device net2big_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &net2big_led_data, }, }; static void __init net2big_gpio_leds_init(void) { int err; /* Stop initial CPLD slow red/blue blinking on power LED. */ err = gpio_request(NET2BIG_GPIO_PWR_LED_BLINK_STOP, "Power LED blink stop"); if (err == 0) { err = gpio_direction_output(NET2BIG_GPIO_PWR_LED_BLINK_STOP, 1); if (err) gpio_free(NET2BIG_GPIO_PWR_LED_BLINK_STOP); } if (err) pr_err("net2big: failed to setup power LED blink GPIO\n"); /* * Configure SATA0 and SATA1 blue LEDs to blink in relation with the * hard disk activity. */ err = gpio_request(NET2BIG_GPIO_SATA0_BLUE_LED, "SATA0 blue LED control"); if (err == 0) { err = gpio_direction_output(NET2BIG_GPIO_SATA0_BLUE_LED, 1); if (err) gpio_free(NET2BIG_GPIO_SATA0_BLUE_LED); } if (err) pr_err("net2big: failed to setup SATA0 blue LED GPIO\n"); err = gpio_request(NET2BIG_GPIO_SATA1_BLUE_LED, "SATA1 blue LED control"); if (err == 0) { err = gpio_direction_output(NET2BIG_GPIO_SATA1_BLUE_LED, 1); if (err) gpio_free(NET2BIG_GPIO_SATA1_BLUE_LED); } if (err) pr_err("net2big: failed to setup SATA1 blue LED GPIO\n"); platform_device_register(&net2big_gpio_leds); } /**************************************************************************** * GPIO keys ****************************************************************************/ #define NET2BIG_GPIO_PUSH_BUTTON 18 #define NET2BIG_GPIO_POWER_SWITCH_ON 8 #define NET2BIG_GPIO_POWER_SWITCH_OFF 9 #define NET2BIG_SWITCH_POWER_ON 0x1 #define NET2BIG_SWITCH_POWER_OFF 0x2 static struct gpio_keys_button net2big_buttons[] = { { .type = EV_SW, .code = NET2BIG_SWITCH_POWER_OFF, .gpio = NET2BIG_GPIO_POWER_SWITCH_OFF, .desc = "Power rocker switch (auto|off)", .active_low = 0, }, { .type = EV_SW, .code = NET2BIG_SWITCH_POWER_ON, .gpio = NET2BIG_GPIO_POWER_SWITCH_ON, .desc = "Power rocker switch (on|auto)", .active_low = 0, }, { .type = EV_KEY, .code = KEY_POWER, .gpio = NET2BIG_GPIO_PUSH_BUTTON, .desc = "Front Push Button", .active_low = 0, }, }; static struct gpio_keys_platform_data net2big_button_data = { .buttons = net2big_buttons, .nbuttons = ARRAY_SIZE(net2big_buttons), }; static struct platform_device net2big_gpio_buttons = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &net2big_button_data, }, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int net2big_mpp_modes[] __initdata = { MPP0_GPIO, /* Raid mode (bit 0) */ MPP1_GPIO, /* USB port 2 fuse (0 = Fail, 1 = Ok) */ MPP2_GPIO, /* Raid mode (bit 1) */ MPP3_GPIO, /* Board ID (bit 0) */ MPP4_GPIO, /* Fan activity (0 = Off, 1 = On) */ MPP5_GPIO, /* Fan fail detection */ MPP6_GPIO, /* Red front LED (0 = Off, 1 = On) */ MPP7_GPIO, /* Disable initial blinking on front LED */ MPP8_GPIO, /* Rear power switch (on|auto) */ MPP9_GPIO, /* Rear power switch (auto|off) */ MPP10_GPIO, /* SATA 1 red LED (0 = Off, 1 = On) */ MPP11_GPIO, /* SATA 0 red LED (0 = Off, 1 = On) */ MPP12_GPIO, /* Board ID (bit 1) */ MPP13_GPIO, /* SATA 1 blue LED blink control */ MPP14_SATA_LED, MPP15_SATA_LED, MPP16_GPIO, /* Blue front LED control */ MPP17_GPIO, /* SATA 0 blue LED blink control */ MPP18_GPIO, /* Front button (0 = Released, 1 = Pushed ) */ MPP19_GPIO, /* SATA{0,1} power On/Off request */ 0, /* 22: USB port 1 fuse (0 = Fail, 1 = Ok) */ /* 23: SATA 0 power status */ /* 24: Board power off */ /* 25: SATA 1 power status */ }; #define NET2BIG_GPIO_POWER_OFF 24 static void net2big_power_off(void) { gpio_set_value(NET2BIG_GPIO_POWER_OFF, 1); } static void __init net2big_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(net2big_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_ehci1_init(); orion5x_eth_init(&net2big_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); orion5x_xor_init(); net2big_sata_power_init(); orion5x_sata_init(&net2big_sata_data); orion5x_setup_dev_boot_win(NET2BIG_NOR_BOOT_BASE, NET2BIG_NOR_BOOT_SIZE); platform_device_register(&net2big_nor_flash); platform_device_register(&net2big_gpio_buttons); net2big_gpio_leds_init(); i2c_register_board_info(0, net2big_i2c_devices, ARRAY_SIZE(net2big_i2c_devices)); orion_gpio_set_valid(NET2BIG_GPIO_POWER_OFF, 1); if (gpio_request(NET2BIG_GPIO_POWER_OFF, "power-off") == 0 && gpio_direction_output(NET2BIG_GPIO_POWER_OFF, 0) == 0) pm_power_off = net2big_power_off; else pr_err("net2big: failed to configure power-off GPIO\n"); pr_notice("net2big: Flash writing is not yet supported.\n"); } /* Warning: LaCie use a wrong mach-type (0x20e=526) in their bootloader. */ MACHINE_START(NET2BIG, "LaCie 2Big Network") .atag_offset = 0x100, .init_machine = net2big_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
vm03/android_kernel_lge_msm8610
fs/ncpfs/mmap.c
4952
2972
/* * mmap.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * */ #include <linux/stat.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/shm.h> #include <linux/errno.h> #include <linux/mman.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/memcontrol.h> #include <asm/uaccess.h> #include "ncp_fs.h" /* * Fill in the supplied page for mmap * XXX: how are we excluding truncate/invalidate here? Maybe need to lock * page? */ static int ncp_file_mmap_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct file *file = area->vm_file; struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; char *pg_addr; unsigned int already_read; unsigned int count; int bufsize; int pos; /* XXX: loff_t ? */ /* * ncpfs has nothing against high pages as long * as recvmsg and memset works on it */ vmf->page = alloc_page(GFP_HIGHUSER); if (!vmf->page) return VM_FAULT_OOM; pg_addr = kmap(vmf->page); pos = vmf->pgoff << PAGE_SHIFT; count = PAGE_SIZE; /* what we can read in one go */ bufsize = NCP_SERVER(inode)->buffer_size; already_read = 0; if (ncp_make_open(inode, O_RDONLY) >= 0) { while (already_read < count) { int read_this_time; int to_read; to_read = bufsize - (pos % bufsize); to_read = min_t(unsigned int, to_read, count - already_read); if (ncp_read_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_read, pg_addr + already_read, &read_this_time) != 0) { read_this_time = 0; } pos += read_this_time; already_read += read_this_time; if (read_this_time < to_read) { break; } } ncp_inode_close(inode); } if (already_read < PAGE_SIZE) memset(pg_addr + already_read, 0, PAGE_SIZE - already_read); flush_dcache_page(vmf->page); kunmap(vmf->page); /* * If I understand ncp_read_kernel() properly, the above always * fetches from the network, here the analogue of disk. * -- wli */ count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); return VM_FAULT_MAJOR; } static const struct vm_operations_struct ncp_file_mmap = { .fault = ncp_file_mmap_fault, }; /* This is used for a general mmap of a ncp file */ int ncp_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_path.dentry->d_inode; DPRINTK("ncp_mmap: called\n"); if (!ncp_conn_valid(NCP_SERVER(inode))) return -EIO; /* only PAGE_COW or read-only supported now */ if (vma->vm_flags & VM_SHARED) return -EINVAL; /* we do not support files bigger than 4GB... We eventually supports just 4GB... */ if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff > (1U << (32 - PAGE_SHIFT))) return -EFBIG; vma->vm_ops = &ncp_file_mmap; file_accessed(file); return 0; }
gpl-2.0
Tk-Glitch/Glitch_Flo_AOSP
sound/pci/ctxfi/ctpcm.c
5208
11289
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctpcm.c * * @Brief * This file contains the definition of the pcm device functions. * * @Author Liu Chun * @Date Apr 2 2008 * */ #include "ctpcm.h" #include "cttimer.h" #include <linux/slab.h> #include <sound/pcm.h> /* Hardware descriptions for playback */ static struct snd_pcm_hardware ct_pcm_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_192000), .rate_min = 8000, .rate_max = 192000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware ct_spdif_passthru_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_32000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* Hardware descriptions for capture */ static struct snd_pcm_hardware ct_pcm_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_96000), .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (384), .period_bytes_max = (64*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm) { struct ct_atc_pcm *apcm = atc_pcm; if (!apcm->substream) return; snd_pcm_period_elapsed(apcm->substream); } static void ct_atc_pcm_free_substream(struct snd_pcm_runtime *runtime) { struct ct_atc_pcm *apcm = runtime->private_data; struct ct_atc *atc = snd_pcm_substream_chip(apcm->substream); atc->pcm_release_resources(atc, apcm); ct_timer_instance_free(apcm->timer); kfree(apcm); runtime->private_data = NULL; } /* pcm playback operations */ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; if (IEC958 == substream->pcm->device) { runtime->hw = ct_spdif_passthru_playback_hw; atc->spdif_out_passthru(atc, 1); } else { runtime->hw = ct_pcm_playback_hw; if (FRONT == substream->pcm->device) runtime->hw.channels_max = 8; } err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) { kfree(apcm); return -ENOMEM; } runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; return 0; } static int ct_pcm_playback_close(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); /* TODO: Notify mixer inactive. */ if (IEC958 == substream->pcm->device) atc->spdif_out_passthru(atc, 0); /* The ct_atc_pcm object will be freed by runtime->private_free */ return 0; } static int ct_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); return err; } static int ct_pcm_hw_free(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); /* Free snd-allocated pages */ return snd_pcm_lib_free_pages(substream); } static int ct_pcm_playback_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; if (IEC958 == substream->pcm->device) err = atc->spdif_passthru_playback_prepare(atc, apcm); else err = atc->pcm_playback_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm playback failed!!!\n"); return err; } return 0; } static int ct_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: atc->pcm_playback_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: atc->pcm_playback_stop(atc, apcm); break; default: break; } return 0; } static snd_pcm_uframes_t ct_pcm_playback_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_playback_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* pcm capture operations */ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->started = 0; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; runtime->hw = ct_pcm_capture_hw; runtime->hw.rate_max = atc->rsr * atc->msr; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) { kfree(apcm); return -ENOMEM; } runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; return 0; } static int ct_pcm_capture_close(struct snd_pcm_substream *substream) { /* The ct_atc_pcm object will be freed by runtime->private_free */ /* TODO: Notify mixer inactive. */ return 0; } static int ct_pcm_capture_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; err = atc->pcm_capture_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm capture failed!!!\n"); return err; } return 0; } static int ct_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: atc->pcm_capture_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: atc->pcm_capture_stop(atc, apcm); break; default: atc->pcm_capture_stop(atc, apcm); break; } return 0; } static snd_pcm_uframes_t ct_pcm_capture_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_capture_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* PCM operators for playback */ static struct snd_pcm_ops ct_pcm_playback_ops = { .open = ct_pcm_playback_open, .close = ct_pcm_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_playback_prepare, .trigger = ct_pcm_playback_trigger, .pointer = ct_pcm_playback_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* PCM operators for capture */ static struct snd_pcm_ops ct_pcm_capture_ops = { .open = ct_pcm_capture_open, .close = ct_pcm_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_capture_prepare, .trigger = ct_pcm_capture_trigger, .pointer = ct_pcm_capture_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* Create ALSA pcm device */ int ct_alsa_pcm_create(struct ct_atc *atc, enum CTALSADEVS device, const char *device_name) { struct snd_pcm *pcm; int err; int playback_count, capture_count; playback_count = (IEC958 == device) ? 1 : 256; capture_count = (FRONT == device) ? 1 : 0; err = snd_pcm_new(atc->card, "ctxfi", device, playback_count, capture_count, &pcm); if (err < 0) { printk(KERN_ERR "ctxfi: snd_pcm_new failed!! Err=%d\n", err); return err; } pcm->private_data = atc; pcm->info_flags = 0; pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; strlcpy(pcm->name, device_name, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ct_pcm_playback_ops); if (FRONT == device) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(atc->pci), 128*1024, 128*1024); #ifdef CONFIG_PM atc->pcms[device] = pcm; #endif return 0; }
gpl-2.0
matthiasdiener/kmaf
drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
8024
4766
/*--------------------------------------------------------------------------- FT1000 driver for Flarion Flash OFDM NIC Device Copyright (C) 1999 David A. Hinds. All Rights Reserved. Copyright (C) 2002 Flarion Technologies, All rights reserved. Copyright (C) 2006 Patrik Ostrihon, All rights reserved. Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds. This file was modified to support the Flarion Flash OFDM NIC Device by Wai Chan (w.chan@flarion.com). Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -----------------------------------------------------------------------------*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> /*====================================================================*/ MODULE_AUTHOR("Wai Chan"); MODULE_DESCRIPTION("FT1000 PCMCIA driver"); MODULE_LICENSE("GPL"); /*====================================================================*/ static int ft1000_config(struct pcmcia_device *link); static void ft1000_detach(struct pcmcia_device *link); static int ft1000_attach(struct pcmcia_device *link); #include "ft1000.h" /*====================================================================*/ static void ft1000_reset(struct pcmcia_device *link) { pcmcia_reset_card(link->socket); } static int ft1000_attach(struct pcmcia_device *link) { link->priv = NULL; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return ft1000_config(link); } static void ft1000_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (dev) stop_ft1000_card(dev); pcmcia_disable_device(link); free_netdev(dev); } static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data) { return pcmcia_request_io(link); } /*====================================================================== ft1000_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the device available to the system. ======================================================================*/ static int ft1000_config(struct pcmcia_device *link) { int ret; dev_dbg(&link->dev, "ft1000_cs: ft1000_config(0x%p)\n", link); /* setup IO window */ ret = pcmcia_loop_config(link, ft1000_confcheck, NULL); if (ret) { printk(KERN_INFO "ft1000: Could not configure pcmcia\n"); return -ENODEV; } /* configure device */ ret = pcmcia_enable_device(link); if (ret) { printk(KERN_INFO "ft1000: could not enable pcmcia\n"); goto failed; } link->priv = init_ft1000_card(link, &ft1000_reset); if (!link->priv) { printk(KERN_INFO "ft1000: Could not register as network device\n"); goto failed; } /* Finally, report what we've done */ return 0; failed: pcmcia_disable_device(link); return -ENODEV; } static int ft1000_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int ft1000_resume(struct pcmcia_device *link) { return 0; } /*====================================================================*/ static const struct pcmcia_device_id ft1000_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x0100), PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1000), PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1300), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ft1000_ids); static struct pcmcia_driver ft1000_cs_driver = { .owner = THIS_MODULE, .name = "ft1000_cs", .probe = ft1000_attach, .remove = ft1000_detach, .id_table = ft1000_ids, .suspend = ft1000_suspend, .resume = ft1000_resume, }; static int __init init_ft1000_cs(void) { return pcmcia_register_driver(&ft1000_cs_driver); } static void __exit exit_ft1000_cs(void) { pcmcia_unregister_driver(&ft1000_cs_driver); } module_init(init_ft1000_cs); module_exit(exit_ft1000_cs);
gpl-2.0
atniptw/PonyBuntu
arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c
9304
77737
/* * SH7724 Pinmux * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on SH7723 Pinmux * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7724.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA, PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA, PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN, PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN, PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN, PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN, PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN, PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN, PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTQ7_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN, PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN, PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN, PTS6_IN, PTS5_IN, PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN, PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN, PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN, PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU, PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU, PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU, PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU, PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU, PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU, PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU, PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU, PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU, PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU, PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU, PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU, PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU, PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU, PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU, PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU, PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU, PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU, PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU, PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU, PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU, PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU, PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU, PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU, PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU, PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU, PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU, PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU, PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU, PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU, PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT, PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT, PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT, PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT, PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT, PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT, PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ7_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT, PTR1_OUT, PTR0_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT, PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT, PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT, PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT, PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN, PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN, PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN, PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN, PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN, PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN, PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN, PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN, PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN, PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN, PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN, PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN, PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN, PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN, PTJ7_FN, PTJ6_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN, PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN, PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN, PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN, PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN, PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN, PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN, PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN, PTQ7_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN, PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN, PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN, PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN, PTS6_FN, PTS5_FN, PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN, PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN, PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN, PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN, PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN, PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN, PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN, PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN, PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN, PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN, PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN, PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN, PSA15_0, PSA15_1, PSA14_0, PSA14_1, PSA13_0, PSA13_1, PSA12_0, PSA12_1, PSA10_0, PSA10_1, PSA9_0, PSA9_1, PSA8_0, PSA8_1, PSA7_0, PSA7_1, PSA6_0, PSA6_1, PSA5_0, PSA5_1, PSA3_0, PSA3_1, PSA2_0, PSA2_1, PSA1_0, PSA1_1, PSA0_0, PSA0_1, PSB14_0, PSB14_1, PSB13_0, PSB13_1, PSB12_0, PSB12_1, PSB11_0, PSB11_1, PSB10_0, PSB10_1, PSB9_0, PSB9_1, PSB8_0, PSB8_1, PSB7_0, PSB7_1, PSB6_0, PSB6_1, PSB5_0, PSB5_1, PSB4_0, PSB4_1, PSB3_0, PSB3_1, PSB2_0, PSB2_1, PSB1_0, PSB1_1, PSB0_0, PSB0_1, PSC15_0, PSC15_1, PSC14_0, PSC14_1, PSC13_0, PSC13_1, PSC12_0, PSC12_1, PSC11_0, PSC11_1, PSC10_0, PSC10_1, PSC9_0, PSC9_1, PSC8_0, PSC8_1, PSC7_0, PSC7_1, PSC6_0, PSC6_1, PSC5_0, PSC5_1, PSC4_0, PSC4_1, PSC2_0, PSC2_1, PSC1_0, PSC1_1, PSC0_0, PSC0_1, PSD15_0, PSD15_1, PSD14_0, PSD14_1, PSD13_0, PSD13_1, PSD12_0, PSD12_1, PSD11_0, PSD11_1, PSD10_0, PSD10_1, PSD9_0, PSD9_1, PSD8_0, PSD8_1, PSD7_0, PSD7_1, PSD6_0, PSD6_1, PSD5_0, PSD5_1, PSD4_0, PSD4_1, PSD3_0, PSD3_1, PSD2_0, PSD2_1, PSD1_0, PSD1_1, PSD0_0, PSD0_1, PSE15_0, PSE15_1, PSE14_0, PSE14_1, PSE13_0, PSE13_1, PSE12_0, PSE12_1, PSE11_0, PSE11_1, PSE10_0, PSE10_1, PSE9_0, PSE9_1, PSE8_0, PSE8_1, PSE7_0, PSE7_1, PSE6_0, PSE6_1, PSE5_0, PSE5_1, PSE4_0, PSE4_1, PSE3_0, PSE3_1, PSE2_0, PSE2_1, PSE1_0, PSE1_1, PSE0_0, PSE0_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /*PTA*/ D23_MARK, KEYOUT2_MARK, IDED15_MARK, D22_MARK, KEYOUT1_MARK, IDED14_MARK, D21_MARK, KEYOUT0_MARK, IDED13_MARK, D20_MARK, KEYIN4_MARK, IDED12_MARK, D19_MARK, KEYIN3_MARK, IDED11_MARK, D18_MARK, KEYIN2_MARK, IDED10_MARK, D17_MARK, KEYIN1_MARK, IDED9_MARK, D16_MARK, KEYIN0_MARK, IDED8_MARK, /*PTB*/ D31_MARK, TPUTO1_MARK, IDEA1_MARK, D30_MARK, TPUTO0_MARK, IDEA0_MARK, D29_MARK, IODREQ_MARK, D28_MARK, IDECS0_MARK, D27_MARK, IDECS1_MARK, D26_MARK, KEYOUT5_IN5_MARK, IDEIORD_MARK, D25_MARK, KEYOUT4_IN6_MARK, IDEIOWR_MARK, D24_MARK, KEYOUT3_MARK, IDEINT_MARK, /*PTC*/ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK, LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK, /*PTD*/ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK, LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK, /*PTE*/ FSIMCKB_MARK, FSIMCKA_MARK, LCDD21_MARK, SCIF2_L_TXD_MARK, LCDD20_MARK, SCIF4_SCK_MARK, LCDD19_MARK, SCIF4_RXD_MARK, LCDD18_MARK, SCIF4_TXD_MARK, LCDD17_MARK, LCDD16_MARK, /*PTF*/ LCDVSYN_MARK, LCDDISP_MARK, LCDRS_MARK, LCDHSYN_MARK, LCDCS_MARK, LCDDON_MARK, LCDDCK_MARK, LCDWR_MARK, LCDVEPWC_MARK, SCIF0_TXD_MARK, LCDD23_MARK, SCIF2_L_SCK_MARK, LCDD22_MARK, SCIF2_L_RXD_MARK, /*PTG*/ AUDCK_MARK, AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, /*PTH*/ VIO0_VD_MARK, VIO0_CLK_MARK, VIO0_D7_MARK, VIO0_D6_MARK, VIO0_D5_MARK, VIO0_D4_MARK, VIO0_D3_MARK, VIO0_D2_MARK, /*PTJ*/ PDSTATUS_MARK, STATUS2_MARK, STATUS0_MARK, A25_MARK, BS_MARK, A24_MARK, A23_MARK, A22_MARK, /*PTK*/ VIO1_D5_MARK, VIO0_D13_MARK, IDED5_MARK, VIO1_D4_MARK, VIO0_D12_MARK, IDED4_MARK, VIO1_D3_MARK, VIO0_D11_MARK, IDED3_MARK, VIO1_D2_MARK, VIO0_D10_MARK, IDED2_MARK, VIO1_D1_MARK, VIO0_D9_MARK, IDED1_MARK, VIO1_D0_MARK, VIO0_D8_MARK, IDED0_MARK, VIO0_FLD_MARK, VIO0_HD_MARK, /*PTL*/ DV_D5_MARK, SCIF3_V_SCK_MARK, RMII_RXD0_MARK, DV_D4_MARK, SCIF3_V_RXD_MARK, RMII_RXD1_MARK, DV_D3_MARK, SCIF3_V_TXD_MARK, RMII_REF_CLK_MARK, DV_D2_MARK, SCIF1_SCK_MARK, RMII_TX_EN_MARK, DV_D1_MARK, SCIF1_RXD_MARK, RMII_TXD0_MARK, DV_D0_MARK, SCIF1_TXD_MARK, RMII_TXD1_MARK, DV_D15_MARK, DV_D14_MARK, MSIOF0_MCK_MARK, /*PTM*/ DV_D13_MARK, MSIOF0_TSCK_MARK, DV_D12_MARK, MSIOF0_RXD_MARK, DV_D11_MARK, MSIOF0_TXD_MARK, DV_D10_MARK, MSIOF0_TSYNC_MARK, DV_D9_MARK, MSIOF0_SS1_MARK, MSIOF0_RSCK_MARK, DV_D8_MARK, MSIOF0_SS2_MARK, MSIOF0_RSYNC_MARK, LCDVCPWC_MARK, SCIF0_RXD_MARK, LCDRD_MARK, SCIF0_SCK_MARK, /*PTN*/ VIO0_D1_MARK, VIO0_D0_MARK, DV_CLKI_MARK, DV_CLK_MARK, SCIF2_V_SCK_MARK, DV_VSYNC_MARK, SCIF2_V_RXD_MARK, DV_HSYNC_MARK, SCIF2_V_TXD_MARK, DV_D7_MARK, SCIF3_V_CTS_MARK, RMII_RX_ER_MARK, DV_D6_MARK, SCIF3_V_RTS_MARK, RMII_CRS_DV_MARK, /*PTQ*/ D7_MARK, D6_MARK, D5_MARK, D4_MARK, D3_MARK, D2_MARK, D1_MARK, D0_MARK, /*PTR*/ CS6B_CE1B_MARK, CS6A_CE2B_MARK, CS5B_CE1A_MARK, CS5A_CE2A_MARK, IOIS16_MARK, LCDLCLK_MARK, WAIT_MARK, WE3_ICIOWR_MARK, TPUTO3_MARK, TPUTI3_MARK, WE2_ICIORD_MARK, TPUTO2_MARK, IDEA2_MARK, /*PTS*/ VIO_CKO_MARK, VIO1_FLD_MARK, TPUTI2_MARK, IDEIORDY_MARK, VIO1_HD_MARK, SCIF5_SCK_MARK, VIO1_VD_MARK, SCIF5_RXD_MARK, VIO1_CLK_MARK, SCIF5_TXD_MARK, VIO1_D7_MARK, VIO0_D15_MARK, IDED7_MARK, VIO1_D6_MARK, VIO0_D14_MARK, IDED6_MARK, /*PTT*/ D15_MARK, D14_MARK, D13_MARK, D12_MARK, D11_MARK, D10_MARK, D9_MARK, D8_MARK, /*PTU*/ DMAC_DACK0_MARK, DMAC_DREQ0_MARK, FSIOASD_MARK, FSIIABCK_MARK, FSIIALRCK_MARK, FSIOABCK_MARK, FSIOALRCK_MARK, CLKAUDIOAO_MARK, /*PTV*/ FSIIBSD_MARK, MSIOF1_SS2_MARK, MSIOF1_RSYNC_MARK, FSIOBSD_MARK, MSIOF1_SS1_MARK, MSIOF1_RSCK_MARK, FSIIBBCK_MARK, MSIOF1_RXD_MARK, FSIIBLRCK_MARK, MSIOF1_TSYNC_MARK, FSIOBBCK_MARK, MSIOF1_TSCK_MARK, FSIOBLRCK_MARK, MSIOF1_TXD_MARK, CLKAUDIOBO_MARK, MSIOF1_MCK_MARK, FSIIASD_MARK, /*PTW*/ MMC_D7_MARK, SDHI1CD_MARK, IODACK_MARK, MMC_D6_MARK, SDHI1WP_MARK, IDERST_MARK, MMC_D5_MARK, SDHI1D3_MARK, EXBUF_ENB_MARK, MMC_D4_MARK, SDHI1D2_MARK, DIRECTION_MARK, MMC_D3_MARK, SDHI1D1_MARK, MMC_D2_MARK, SDHI1D0_MARK, MMC_D1_MARK, SDHI1CMD_MARK, MMC_D0_MARK, SDHI1CLK_MARK, /*PTX*/ DMAC_DACK1_MARK, IRDA_OUT_MARK, DMAC_DREQ1_MARK, IRDA_IN_MARK, TSIF_TS0_SDAT_MARK, LNKSTA_MARK, TSIF_TS0_SCK_MARK, MDIO_MARK, TSIF_TS0_SDEN_MARK, MDC_MARK, TSIF_TS0_SPSYNC_MARK, MMC_CLK_MARK, MMC_CMD_MARK, /*PTY*/ SDHI0CD_MARK, SDHI0WP_MARK, SDHI0D3_MARK, SDHI0D2_MARK, SDHI0D1_MARK, SDHI0D0_MARK, SDHI0CMD_MARK, SDHI0CLK_MARK, /*PTZ*/ INTC_IRQ7_MARK, SCIF3_I_CTS_MARK, INTC_IRQ6_MARK, SCIF3_I_RTS_MARK, INTC_IRQ5_MARK, SCIF3_I_SCK_MARK, INTC_IRQ4_MARK, SCIF3_I_RXD_MARK, INTC_IRQ3_MARK, SCIF3_I_TXD_MARK, INTC_IRQ2_MARK, INTC_IRQ1_MARK, INTC_IRQ0_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PTA GPIO */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU), /* PTB GPIO */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU), /* PTC GPIO */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU), PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU), PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU), /* PTD GPIO */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU), PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU), PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU), PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU), PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU), PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU), PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU), PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU), /* PTE GPIO */ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU), PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU), PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU), PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU), PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU), PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU), PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU), PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU), /* PTF GPIO */ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU), PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU), PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU), PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU), PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU), PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU), PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU), PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU), /* PTG GPIO */ PINMUX_DATA(PTG5_DATA, PTG5_OUT), PINMUX_DATA(PTG4_DATA, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_OUT), /* PTH GPIO */ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU), PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU), PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU), PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU), PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU), PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU), PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU), PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU), /* PTJ GPIO */ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT), PINMUX_DATA(PTJ6_DATA, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_OUT), PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU), PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU), PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU), PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU), /* PTK GPIO */ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU), PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU), PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU), PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU), PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU), PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU), PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU), /* PTL GPIO */ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU), PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU), PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU), PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU), PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU), PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU), PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU), PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU), /* PTM GPIO */ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU), PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU), PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU), PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU), PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU), PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU), PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU), PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU), /* PTN GPIO */ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU), PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU), PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU), PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU), PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU), PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU), PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU), PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU), /* PTQ GPIO */ PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU), PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU), PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU), PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU), PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU), PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU), PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU), PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU), /* PTR GPIO */ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU), PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU), PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU), PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU), PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU), PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU), PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU), /* PTS GPIO */ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU), PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU), PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU), PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU), PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU), PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU), /* PTT GPIO */ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU), PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU), PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU), PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU), PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU), PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU), PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU), /* PTU GPIO */ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU), PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU), PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU), PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU), PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU), PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU), PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU), /* PTV GPIO */ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU), PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU), PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU), PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU), PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU), PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU), PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU), PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU), /* PTW GPIO */ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU), PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU), PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU), PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU), PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU), PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU), PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU), PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU), /* PTX GPIO */ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU), PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU), PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU), PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU), PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU), PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU), PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU), PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU), /* PTY GPIO */ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU), PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU), PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU), PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU), PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU), PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU), PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU), PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU), /* PTZ GPIO */ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU), PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU), PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU), PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU), PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU), PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU), PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU), PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU), /* PTA FN */ PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN), PINMUX_DATA(D22_MARK, PSA15_0, PSA14_0, PTA6_FN), PINMUX_DATA(D21_MARK, PSA15_0, PSA14_0, PTA5_FN), PINMUX_DATA(D20_MARK, PSA15_0, PSA14_0, PTA4_FN), PINMUX_DATA(D19_MARK, PSA15_0, PSA14_0, PTA3_FN), PINMUX_DATA(D18_MARK, PSA15_0, PSA14_0, PTA2_FN), PINMUX_DATA(D17_MARK, PSA15_0, PSA14_0, PTA1_FN), PINMUX_DATA(D16_MARK, PSA15_0, PSA14_0, PTA0_FN), PINMUX_DATA(KEYOUT2_MARK, PSA15_0, PSA14_1, PTA7_FN), PINMUX_DATA(KEYOUT1_MARK, PSA15_0, PSA14_1, PTA6_FN), PINMUX_DATA(KEYOUT0_MARK, PSA15_0, PSA14_1, PTA5_FN), PINMUX_DATA(KEYIN4_MARK, PSA15_0, PSA14_1, PTA4_FN), PINMUX_DATA(KEYIN3_MARK, PSA15_0, PSA14_1, PTA3_FN), PINMUX_DATA(KEYIN2_MARK, PSA15_0, PSA14_1, PTA2_FN), PINMUX_DATA(KEYIN1_MARK, PSA15_0, PSA14_1, PTA1_FN), PINMUX_DATA(KEYIN0_MARK, PSA15_0, PSA14_1, PTA0_FN), PINMUX_DATA(IDED15_MARK, PSA15_1, PSA14_0, PTA7_FN), PINMUX_DATA(IDED14_MARK, PSA15_1, PSA14_0, PTA6_FN), PINMUX_DATA(IDED13_MARK, PSA15_1, PSA14_0, PTA5_FN), PINMUX_DATA(IDED12_MARK, PSA15_1, PSA14_0, PTA4_FN), PINMUX_DATA(IDED11_MARK, PSA15_1, PSA14_0, PTA3_FN), PINMUX_DATA(IDED10_MARK, PSA15_1, PSA14_0, PTA2_FN), PINMUX_DATA(IDED9_MARK, PSA15_1, PSA14_0, PTA1_FN), PINMUX_DATA(IDED8_MARK, PSA15_1, PSA14_0, PTA0_FN), /* PTB FN */ PINMUX_DATA(D31_MARK, PSE15_0, PSE14_0, PTB7_FN), PINMUX_DATA(D30_MARK, PSE15_0, PSE14_0, PTB6_FN), PINMUX_DATA(D29_MARK, PSE11_0, PTB5_FN), PINMUX_DATA(D28_MARK, PSE11_0, PTB4_FN), PINMUX_DATA(D27_MARK, PSE11_0, PTB3_FN), PINMUX_DATA(D26_MARK, PSA15_0, PSA14_0, PTB2_FN), PINMUX_DATA(D25_MARK, PSA15_0, PSA14_0, PTB1_FN), PINMUX_DATA(D24_MARK, PSA15_0, PSA14_0, PTB0_FN), PINMUX_DATA(IDEA1_MARK, PSE15_1, PSE14_0, PTB7_FN), PINMUX_DATA(IDEA0_MARK, PSE15_1, PSE14_0, PTB6_FN), PINMUX_DATA(IODREQ_MARK, PSE11_1, PTB5_FN), PINMUX_DATA(IDECS0_MARK, PSE11_1, PTB4_FN), PINMUX_DATA(IDECS1_MARK, PSE11_1, PTB3_FN), PINMUX_DATA(IDEIORD_MARK, PSA15_1, PSA14_0, PTB2_FN), PINMUX_DATA(IDEIOWR_MARK, PSA15_1, PSA14_0, PTB1_FN), PINMUX_DATA(IDEINT_MARK, PSA15_1, PSA14_0, PTB0_FN), PINMUX_DATA(TPUTO1_MARK, PSE15_0, PSE14_1, PTB7_FN), PINMUX_DATA(TPUTO0_MARK, PSE15_0, PSE14_1, PTB6_FN), PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_0, PSA14_1, PTB2_FN), PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_0, PSA14_1, PTB1_FN), PINMUX_DATA(KEYOUT3_MARK, PSA15_0, PSA14_1, PTB0_FN), /* PTC FN */ PINMUX_DATA(LCDD7_MARK, PSD5_0, PTC7_FN), PINMUX_DATA(LCDD6_MARK, PSD5_0, PTC6_FN), PINMUX_DATA(LCDD5_MARK, PSD5_0, PTC5_FN), PINMUX_DATA(LCDD4_MARK, PSD5_0, PTC4_FN), PINMUX_DATA(LCDD3_MARK, PSD5_0, PTC3_FN), PINMUX_DATA(LCDD2_MARK, PSD5_0, PTC2_FN), PINMUX_DATA(LCDD1_MARK, PSD5_0, PTC1_FN), PINMUX_DATA(LCDD0_MARK, PSD5_0, PTC0_FN), /* PTD FN */ PINMUX_DATA(LCDD15_MARK, PSD5_0, PTD7_FN), PINMUX_DATA(LCDD14_MARK, PSD5_0, PTD6_FN), PINMUX_DATA(LCDD13_MARK, PSD5_0, PTD5_FN), PINMUX_DATA(LCDD12_MARK, PSD5_0, PTD4_FN), PINMUX_DATA(LCDD11_MARK, PSD5_0, PTD3_FN), PINMUX_DATA(LCDD10_MARK, PSD5_0, PTD2_FN), PINMUX_DATA(LCDD9_MARK, PSD5_0, PTD1_FN), PINMUX_DATA(LCDD8_MARK, PSD5_0, PTD0_FN), /* PTE FN */ PINMUX_DATA(FSIMCKB_MARK, PTE7_FN), PINMUX_DATA(FSIMCKA_MARK, PTE6_FN), PINMUX_DATA(LCDD21_MARK, PSC5_0, PSC4_0, PTE5_FN), PINMUX_DATA(LCDD20_MARK, PSD3_0, PSD2_0, PTE4_FN), PINMUX_DATA(LCDD19_MARK, PSA3_0, PSA2_0, PTE3_FN), PINMUX_DATA(LCDD18_MARK, PSA3_0, PSA2_0, PTE2_FN), PINMUX_DATA(LCDD17_MARK, PSD5_0, PTE1_FN), PINMUX_DATA(LCDD16_MARK, PSD5_0, PTE0_FN), PINMUX_DATA(SCIF2_L_TXD_MARK, PSC5_0, PSC4_1, PTE5_FN), PINMUX_DATA(SCIF4_SCK_MARK, PSD3_0, PSD2_1, PTE4_FN), PINMUX_DATA(SCIF4_RXD_MARK, PSA3_0, PSA2_1, PTE3_FN), PINMUX_DATA(SCIF4_TXD_MARK, PSA3_0, PSA2_1, PTE2_FN), /* PTF FN */ PINMUX_DATA(LCDVSYN_MARK, PSD8_0, PTF7_FN), PINMUX_DATA(LCDDISP_MARK, PSD10_0, PSD9_0, PTF6_FN), PINMUX_DATA(LCDHSYN_MARK, PSD10_0, PSD9_0, PTF5_FN), PINMUX_DATA(LCDDON_MARK, PSD8_0, PTF4_FN), PINMUX_DATA(LCDDCK_MARK, PSD10_0, PSD9_0, PTF3_FN), PINMUX_DATA(LCDVEPWC_MARK, PSA6_0, PTF2_FN), PINMUX_DATA(LCDD23_MARK, PSC7_0, PSC6_0, PTF1_FN), PINMUX_DATA(LCDD22_MARK, PSC5_0, PSC4_0, PTF0_FN), PINMUX_DATA(LCDRS_MARK, PSD10_0, PSD9_1, PTF6_FN), PINMUX_DATA(LCDCS_MARK, PSD10_0, PSD9_1, PTF5_FN), PINMUX_DATA(LCDWR_MARK, PSD10_0, PSD9_1, PTF3_FN), PINMUX_DATA(SCIF0_TXD_MARK, PSA6_1, PTF2_FN), PINMUX_DATA(SCIF2_L_SCK_MARK, PSC7_0, PSC6_1, PTF1_FN), PINMUX_DATA(SCIF2_L_RXD_MARK, PSC5_0, PSC4_1, PTF0_FN), /* PTG FN */ PINMUX_DATA(AUDCK_MARK, PTG5_FN), PINMUX_DATA(AUDSYNC_MARK, PTG4_FN), PINMUX_DATA(AUDATA3_MARK, PTG3_FN), PINMUX_DATA(AUDATA2_MARK, PTG2_FN), PINMUX_DATA(AUDATA1_MARK, PTG1_FN), PINMUX_DATA(AUDATA0_MARK, PTG0_FN), /* PTH FN */ PINMUX_DATA(VIO0_VD_MARK, PTH7_FN), PINMUX_DATA(VIO0_CLK_MARK, PTH6_FN), PINMUX_DATA(VIO0_D7_MARK, PTH5_FN), PINMUX_DATA(VIO0_D6_MARK, PTH4_FN), PINMUX_DATA(VIO0_D5_MARK, PTH3_FN), PINMUX_DATA(VIO0_D4_MARK, PTH2_FN), PINMUX_DATA(VIO0_D3_MARK, PTH1_FN), PINMUX_DATA(VIO0_D2_MARK, PTH0_FN), /* PTJ FN */ PINMUX_DATA(PDSTATUS_MARK, PTJ7_FN), PINMUX_DATA(STATUS2_MARK, PTJ6_FN), PINMUX_DATA(STATUS0_MARK, PTJ5_FN), PINMUX_DATA(A25_MARK, PSA8_0, PTJ3_FN), PINMUX_DATA(BS_MARK, PSA8_1, PTJ3_FN), PINMUX_DATA(A24_MARK, PTJ2_FN), PINMUX_DATA(A23_MARK, PTJ1_FN), PINMUX_DATA(A22_MARK, PTJ0_FN), /* PTK FN */ PINMUX_DATA(VIO1_D5_MARK, PSB7_0, PSB6_0, PTK7_FN), PINMUX_DATA(VIO1_D4_MARK, PSB7_0, PSB6_0, PTK6_FN), PINMUX_DATA(VIO1_D3_MARK, PSB7_0, PSB6_0, PTK5_FN), PINMUX_DATA(VIO1_D2_MARK, PSB7_0, PSB6_0, PTK4_FN), PINMUX_DATA(VIO1_D1_MARK, PSB7_0, PSB6_0, PTK3_FN), PINMUX_DATA(VIO1_D0_MARK, PSB7_0, PSB6_0, PTK2_FN), PINMUX_DATA(VIO0_D13_MARK, PSB7_0, PSB6_1, PTK7_FN), PINMUX_DATA(VIO0_D12_MARK, PSB7_0, PSB6_1, PTK6_FN), PINMUX_DATA(VIO0_D11_MARK, PSB7_0, PSB6_1, PTK5_FN), PINMUX_DATA(VIO0_D10_MARK, PSB7_0, PSB6_1, PTK4_FN), PINMUX_DATA(VIO0_D9_MARK, PSB7_0, PSB6_1, PTK3_FN), PINMUX_DATA(VIO0_D8_MARK, PSB7_0, PSB6_1, PTK2_FN), PINMUX_DATA(IDED5_MARK, PSB7_1, PSB6_0, PTK7_FN), PINMUX_DATA(IDED4_MARK, PSB7_1, PSB6_0, PTK6_FN), PINMUX_DATA(IDED3_MARK, PSB7_1, PSB6_0, PTK5_FN), PINMUX_DATA(IDED2_MARK, PSB7_1, PSB6_0, PTK4_FN), PINMUX_DATA(IDED1_MARK, PSB7_1, PSB6_0, PTK3_FN), PINMUX_DATA(IDED0_MARK, PSB7_1, PSB6_0, PTK2_FN), PINMUX_DATA(VIO0_FLD_MARK, PTK1_FN), PINMUX_DATA(VIO0_HD_MARK, PTK0_FN), /* PTL FN */ PINMUX_DATA(DV_D5_MARK, PSB9_0, PSB8_0, PTL7_FN), PINMUX_DATA(DV_D4_MARK, PSB9_0, PSB8_0, PTL6_FN), PINMUX_DATA(DV_D3_MARK, PSE7_0, PSE6_0, PTL5_FN), PINMUX_DATA(DV_D2_MARK, PSC9_0, PSC8_0, PTL4_FN), PINMUX_DATA(DV_D1_MARK, PSC9_0, PSC8_0, PTL3_FN), PINMUX_DATA(DV_D0_MARK, PSC9_0, PSC8_0, PTL2_FN), PINMUX_DATA(DV_D15_MARK, PSD4_0, PTL1_FN), PINMUX_DATA(DV_D14_MARK, PSE5_0, PSE4_0, PTL0_FN), PINMUX_DATA(SCIF3_V_SCK_MARK, PSB9_0, PSB8_1, PTL7_FN), PINMUX_DATA(SCIF3_V_RXD_MARK, PSB9_0, PSB8_1, PTL6_FN), PINMUX_DATA(SCIF3_V_TXD_MARK, PSE7_0, PSE6_1, PTL5_FN), PINMUX_DATA(SCIF1_SCK_MARK, PSC9_0, PSC8_1, PTL4_FN), PINMUX_DATA(SCIF1_RXD_MARK, PSC9_0, PSC8_1, PTL3_FN), PINMUX_DATA(SCIF1_TXD_MARK, PSC9_0, PSC8_1, PTL2_FN), PINMUX_DATA(RMII_RXD0_MARK, PSB9_1, PSB8_0, PTL7_FN), PINMUX_DATA(RMII_RXD1_MARK, PSB9_1, PSB8_0, PTL6_FN), PINMUX_DATA(RMII_REF_CLK_MARK, PSE7_1, PSE6_0, PTL5_FN), PINMUX_DATA(RMII_TX_EN_MARK, PSC9_1, PSC8_0, PTL4_FN), PINMUX_DATA(RMII_TXD0_MARK, PSC9_1, PSC8_0, PTL3_FN), PINMUX_DATA(RMII_TXD1_MARK, PSC9_1, PSC8_0, PTL2_FN), PINMUX_DATA(MSIOF0_MCK_MARK, PSE5_0, PSE4_1, PTL0_FN), /* PTM FN */ PINMUX_DATA(DV_D13_MARK, PSC13_0, PSC12_0, PTM7_FN), PINMUX_DATA(DV_D12_MARK, PSC13_0, PSC12_0, PTM6_FN), PINMUX_DATA(DV_D11_MARK, PSC13_0, PSC12_0, PTM5_FN), PINMUX_DATA(DV_D10_MARK, PSC13_0, PSC12_0, PTM4_FN), PINMUX_DATA(DV_D9_MARK, PSC11_0, PSC10_0, PTM3_FN), PINMUX_DATA(DV_D8_MARK, PSC11_0, PSC10_0, PTM2_FN), PINMUX_DATA(MSIOF0_TSCK_MARK, PSC13_0, PSC12_1, PTM7_FN), PINMUX_DATA(MSIOF0_RXD_MARK, PSC13_0, PSC12_1, PTM6_FN), PINMUX_DATA(MSIOF0_TXD_MARK, PSC13_0, PSC12_1, PTM5_FN), PINMUX_DATA(MSIOF0_TSYNC_MARK, PSC13_0, PSC12_1, PTM4_FN), PINMUX_DATA(MSIOF0_SS1_MARK, PSC11_0, PSC10_1, PTM3_FN), PINMUX_DATA(MSIOF0_RSCK_MARK, PSC11_1, PSC10_0, PTM3_FN), PINMUX_DATA(MSIOF0_SS2_MARK, PSC11_0, PSC10_1, PTM2_FN), PINMUX_DATA(MSIOF0_RSYNC_MARK, PSC11_1, PSC10_0, PTM2_FN), PINMUX_DATA(LCDVCPWC_MARK, PSA6_0, PTM1_FN), PINMUX_DATA(LCDRD_MARK, PSA7_0, PTM0_FN), PINMUX_DATA(SCIF0_RXD_MARK, PSA6_1, PTM1_FN), PINMUX_DATA(SCIF0_SCK_MARK, PSA7_1, PTM0_FN), /* PTN FN */ PINMUX_DATA(VIO0_D1_MARK, PTN7_FN), PINMUX_DATA(VIO0_D0_MARK, PTN6_FN), PINMUX_DATA(DV_CLKI_MARK, PSD11_0, PTN5_FN), PINMUX_DATA(DV_CLK_MARK, PSD13_0, PSD12_0, PTN4_FN), PINMUX_DATA(DV_VSYNC_MARK, PSD15_0, PSD14_0, PTN3_FN), PINMUX_DATA(DV_HSYNC_MARK, PSB5_0, PSB4_0, PTN2_FN), PINMUX_DATA(DV_D7_MARK, PSB3_0, PSB2_0, PTN1_FN), PINMUX_DATA(DV_D6_MARK, PSB1_0, PSB0_0, PTN0_FN), PINMUX_DATA(SCIF2_V_SCK_MARK, PSD13_0, PSD12_1, PTN4_FN), PINMUX_DATA(SCIF2_V_RXD_MARK, PSD15_0, PSD14_1, PTN3_FN), PINMUX_DATA(SCIF2_V_TXD_MARK, PSB5_0, PSB4_1, PTN2_FN), PINMUX_DATA(SCIF3_V_CTS_MARK, PSB3_0, PSB2_1, PTN1_FN), PINMUX_DATA(SCIF3_V_RTS_MARK, PSB1_0, PSB0_1, PTN0_FN), PINMUX_DATA(RMII_RX_ER_MARK, PSB3_1, PSB2_0, PTN1_FN), PINMUX_DATA(RMII_CRS_DV_MARK, PSB1_1, PSB0_0, PTN0_FN), /* PTQ FN */ PINMUX_DATA(D7_MARK, PTQ7_FN), PINMUX_DATA(D6_MARK, PTQ6_FN), PINMUX_DATA(D5_MARK, PTQ5_FN), PINMUX_DATA(D4_MARK, PTQ4_FN), PINMUX_DATA(D3_MARK, PTQ3_FN), PINMUX_DATA(D2_MARK, PTQ2_FN), PINMUX_DATA(D1_MARK, PTQ1_FN), PINMUX_DATA(D0_MARK, PTQ0_FN), /* PTR FN */ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN), PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN), PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN), PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN), PINMUX_DATA(IOIS16_MARK, PSA5_0, PTR3_FN), PINMUX_DATA(WAIT_MARK, PTR2_FN), PINMUX_DATA(WE3_ICIOWR_MARK, PSA1_0, PSA0_0, PTR1_FN), PINMUX_DATA(WE2_ICIORD_MARK, PSD1_0, PSD0_0, PTR0_FN), PINMUX_DATA(LCDLCLK_MARK, PSA5_1, PTR3_FN), PINMUX_DATA(IDEA2_MARK, PSD1_1, PSD0_0, PTR0_FN), PINMUX_DATA(TPUTO3_MARK, PSA1_0, PSA0_1, PTR1_FN), PINMUX_DATA(TPUTI3_MARK, PSA1_1, PSA0_0, PTR1_FN), PINMUX_DATA(TPUTO2_MARK, PSD1_0, PSD0_1, PTR0_FN), /* PTS FN */ PINMUX_DATA(VIO_CKO_MARK, PTS6_FN), PINMUX_DATA(TPUTI2_MARK, PSE9_0, PSE8_1, PTS5_FN), PINMUX_DATA(IDEIORDY_MARK, PSE9_1, PSE8_0, PTS5_FN), PINMUX_DATA(VIO1_FLD_MARK, PSE9_0, PSE8_0, PTS5_FN), PINMUX_DATA(VIO1_HD_MARK, PSA10_0, PTS4_FN), PINMUX_DATA(VIO1_VD_MARK, PSA9_0, PTS3_FN), PINMUX_DATA(VIO1_CLK_MARK, PSA9_0, PTS2_FN), PINMUX_DATA(VIO1_D7_MARK, PSB7_0, PSB6_0, PTS1_FN), PINMUX_DATA(VIO1_D6_MARK, PSB7_0, PSB6_0, PTS0_FN), PINMUX_DATA(SCIF5_SCK_MARK, PSA10_1, PTS4_FN), PINMUX_DATA(SCIF5_RXD_MARK, PSA9_1, PTS3_FN), PINMUX_DATA(SCIF5_TXD_MARK, PSA9_1, PTS2_FN), PINMUX_DATA(VIO0_D15_MARK, PSB7_0, PSB6_1, PTS1_FN), PINMUX_DATA(VIO0_D14_MARK, PSB7_0, PSB6_1, PTS0_FN), PINMUX_DATA(IDED7_MARK, PSB7_1, PSB6_0, PTS1_FN), PINMUX_DATA(IDED6_MARK, PSB7_1, PSB6_0, PTS0_FN), /* PTT FN */ PINMUX_DATA(D15_MARK, PTT7_FN), PINMUX_DATA(D14_MARK, PTT6_FN), PINMUX_DATA(D13_MARK, PTT5_FN), PINMUX_DATA(D12_MARK, PTT4_FN), PINMUX_DATA(D11_MARK, PTT3_FN), PINMUX_DATA(D10_MARK, PTT2_FN), PINMUX_DATA(D9_MARK, PTT1_FN), PINMUX_DATA(D8_MARK, PTT0_FN), /* PTU FN */ PINMUX_DATA(DMAC_DACK0_MARK, PTU7_FN), PINMUX_DATA(DMAC_DREQ0_MARK, PTU6_FN), PINMUX_DATA(FSIOASD_MARK, PSE1_0, PTU5_FN), PINMUX_DATA(FSIIABCK_MARK, PSE1_0, PTU4_FN), PINMUX_DATA(FSIIALRCK_MARK, PSE1_0, PTU3_FN), PINMUX_DATA(FSIOABCK_MARK, PSE1_0, PTU2_FN), PINMUX_DATA(FSIOALRCK_MARK, PSE1_0, PTU1_FN), PINMUX_DATA(CLKAUDIOAO_MARK, PSE0_0, PTU0_FN), /* PTV FN */ PINMUX_DATA(FSIIBSD_MARK, PSD7_0, PSD6_0, PTV7_FN), PINMUX_DATA(FSIOBSD_MARK, PSD7_0, PSD6_0, PTV6_FN), PINMUX_DATA(FSIIBBCK_MARK, PSC15_0, PSC14_0, PTV5_FN), PINMUX_DATA(FSIIBLRCK_MARK, PSC15_0, PSC14_0, PTV4_FN), PINMUX_DATA(FSIOBBCK_MARK, PSC15_0, PSC14_0, PTV3_FN), PINMUX_DATA(FSIOBLRCK_MARK, PSC15_0, PSC14_0, PTV2_FN), PINMUX_DATA(CLKAUDIOBO_MARK, PSE3_0, PSE2_0, PTV1_FN), PINMUX_DATA(FSIIASD_MARK, PSE10_0, PTV0_FN), PINMUX_DATA(MSIOF1_SS2_MARK, PSD7_0, PSD6_1, PTV7_FN), PINMUX_DATA(MSIOF1_RSYNC_MARK, PSD7_1, PSD6_0, PTV7_FN), PINMUX_DATA(MSIOF1_SS1_MARK, PSD7_0, PSD6_1, PTV6_FN), PINMUX_DATA(MSIOF1_RSCK_MARK, PSD7_1, PSD6_0, PTV6_FN), PINMUX_DATA(MSIOF1_RXD_MARK, PSC15_0, PSC14_1, PTV5_FN), PINMUX_DATA(MSIOF1_TSYNC_MARK, PSC15_0, PSC14_1, PTV4_FN), PINMUX_DATA(MSIOF1_TSCK_MARK, PSC15_0, PSC14_1, PTV3_FN), PINMUX_DATA(MSIOF1_TXD_MARK, PSC15_0, PSC14_1, PTV2_FN), PINMUX_DATA(MSIOF1_MCK_MARK, PSE3_0, PSE2_1, PTV1_FN), /* PTW FN */ PINMUX_DATA(MMC_D7_MARK, PSE13_0, PSE12_0, PTW7_FN), PINMUX_DATA(MMC_D6_MARK, PSE13_0, PSE12_0, PTW6_FN), PINMUX_DATA(MMC_D5_MARK, PSE13_0, PSE12_0, PTW5_FN), PINMUX_DATA(MMC_D4_MARK, PSE13_0, PSE12_0, PTW4_FN), PINMUX_DATA(MMC_D3_MARK, PSA13_0, PTW3_FN), PINMUX_DATA(MMC_D2_MARK, PSA13_0, PTW2_FN), PINMUX_DATA(MMC_D1_MARK, PSA13_0, PTW1_FN), PINMUX_DATA(MMC_D0_MARK, PSA13_0, PTW0_FN), PINMUX_DATA(SDHI1CD_MARK, PSE13_0, PSE12_1, PTW7_FN), PINMUX_DATA(SDHI1WP_MARK, PSE13_0, PSE12_1, PTW6_FN), PINMUX_DATA(SDHI1D3_MARK, PSE13_0, PSE12_1, PTW5_FN), PINMUX_DATA(SDHI1D2_MARK, PSE13_0, PSE12_1, PTW4_FN), PINMUX_DATA(SDHI1D1_MARK, PSA13_1, PTW3_FN), PINMUX_DATA(SDHI1D0_MARK, PSA13_1, PTW2_FN), PINMUX_DATA(SDHI1CMD_MARK, PSA13_1, PTW1_FN), PINMUX_DATA(SDHI1CLK_MARK, PSA13_1, PTW0_FN), PINMUX_DATA(IODACK_MARK, PSE13_1, PSE12_0, PTW7_FN), PINMUX_DATA(IDERST_MARK, PSE13_1, PSE12_0, PTW6_FN), PINMUX_DATA(EXBUF_ENB_MARK, PSE13_1, PSE12_0, PTW5_FN), PINMUX_DATA(DIRECTION_MARK, PSE13_1, PSE12_0, PTW4_FN), /* PTX FN */ PINMUX_DATA(DMAC_DACK1_MARK, PSA12_0, PTX7_FN), PINMUX_DATA(DMAC_DREQ1_MARK, PSA12_0, PTX6_FN), PINMUX_DATA(IRDA_OUT_MARK, PSA12_1, PTX7_FN), PINMUX_DATA(IRDA_IN_MARK, PSA12_1, PTX6_FN), PINMUX_DATA(TSIF_TS0_SDAT_MARK, PSC0_0, PTX5_FN), PINMUX_DATA(TSIF_TS0_SCK_MARK, PSC1_0, PTX4_FN), PINMUX_DATA(TSIF_TS0_SDEN_MARK, PSC2_0, PTX3_FN), PINMUX_DATA(TSIF_TS0_SPSYNC_MARK, PTX2_FN), PINMUX_DATA(LNKSTA_MARK, PSC0_1, PTX5_FN), PINMUX_DATA(MDIO_MARK, PSC1_1, PTX4_FN), PINMUX_DATA(MDC_MARK, PSC2_1, PTX3_FN), PINMUX_DATA(MMC_CLK_MARK, PTX1_FN), PINMUX_DATA(MMC_CMD_MARK, PTX0_FN), /* PTY FN */ PINMUX_DATA(SDHI0CD_MARK, PTY7_FN), PINMUX_DATA(SDHI0WP_MARK, PTY6_FN), PINMUX_DATA(SDHI0D3_MARK, PTY5_FN), PINMUX_DATA(SDHI0D2_MARK, PTY4_FN), PINMUX_DATA(SDHI0D1_MARK, PTY3_FN), PINMUX_DATA(SDHI0D0_MARK, PTY2_FN), PINMUX_DATA(SDHI0CMD_MARK, PTY1_FN), PINMUX_DATA(SDHI0CLK_MARK, PTY0_FN), /* PTZ FN */ PINMUX_DATA(INTC_IRQ7_MARK, PSB10_0, PTZ7_FN), PINMUX_DATA(INTC_IRQ6_MARK, PSB11_0, PTZ6_FN), PINMUX_DATA(INTC_IRQ5_MARK, PSB12_0, PTZ5_FN), PINMUX_DATA(INTC_IRQ4_MARK, PSB13_0, PTZ4_FN), PINMUX_DATA(INTC_IRQ3_MARK, PSB14_0, PTZ3_FN), PINMUX_DATA(INTC_IRQ2_MARK, PTZ2_FN), PINMUX_DATA(INTC_IRQ1_MARK, PTZ1_FN), PINMUX_DATA(INTC_IRQ0_MARK, PTZ0_FN), PINMUX_DATA(SCIF3_I_CTS_MARK, PSB10_1, PTZ7_FN), PINMUX_DATA(SCIF3_I_RTS_MARK, PSB11_1, PTZ6_FN), PINMUX_DATA(SCIF3_I_SCK_MARK, PSB12_1, PTZ5_FN), PINMUX_DATA(SCIF3_I_RXD_MARK, PSB13_1, PTZ4_FN), PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN), }; static struct pinmux_gpio pinmux_gpios[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC6, PTC6_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC1, PTC1_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE3, PTE3_DATA), PINMUX_GPIO(GPIO_PTE2, PTE2_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA), PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA), PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA), PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA), PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA), PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA), PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTN */ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA), PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), /* PTQ */ PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA), PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA), PINMUX_GPIO(GPIO_PTR6, PTR6_DATA), PINMUX_GPIO(GPIO_PTR5, PTR5_DATA), PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA), PINMUX_GPIO(GPIO_PTS5, PTS5_DATA), PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA), PINMUX_GPIO(GPIO_PTT6, PTT6_DATA), PINMUX_GPIO(GPIO_PTT5, PTT5_DATA), PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA), PINMUX_GPIO(GPIO_PTU6, PTU6_DATA), PINMUX_GPIO(GPIO_PTU5, PTU5_DATA), PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA), PINMUX_GPIO(GPIO_PTV6, PTV6_DATA), PINMUX_GPIO(GPIO_PTV5, PTV5_DATA), PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* PTW */ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA), PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), /* PTX */ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA), PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), /* PTY */ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA), PINMUX_GPIO(GPIO_PTY6, PTY6_DATA), PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), /* PTZ */ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA), PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA), PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA), /* BSC */ PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_D15, D15_MARK), PINMUX_GPIO(GPIO_FN_D14, D14_MARK), PINMUX_GPIO(GPIO_FN_D13, D13_MARK), PINMUX_GPIO(GPIO_FN_D12, D12_MARK), PINMUX_GPIO(GPIO_FN_D11, D11_MARK), PINMUX_GPIO(GPIO_FN_D10, D10_MARK), PINMUX_GPIO(GPIO_FN_D9, D9_MARK), PINMUX_GPIO(GPIO_FN_D8, D8_MARK), PINMUX_GPIO(GPIO_FN_D7, D7_MARK), PINMUX_GPIO(GPIO_FN_D6, D6_MARK), PINMUX_GPIO(GPIO_FN_D5, D5_MARK), PINMUX_GPIO(GPIO_FN_D4, D4_MARK), PINMUX_GPIO(GPIO_FN_D3, D3_MARK), PINMUX_GPIO(GPIO_FN_D2, D2_MARK), PINMUX_GPIO(GPIO_FN_D1, D1_MARK), PINMUX_GPIO(GPIO_FN_D0, D0_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK), PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK), PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK), PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK), PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK), PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK), PINMUX_GPIO(GPIO_FN_BS, BS_MARK), /* KEYSC */ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK), PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK), PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK), PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK), PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK), PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK), /* ATAPI */ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK), PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK), PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK), PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK), PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK), PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK), PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK), PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK), PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK), PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK), PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK), PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK), PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK), PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK), PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK), PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK), PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK), PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK), PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK), PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK), PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK), PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK), PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK), PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK), PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK), PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK), PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK), PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK), PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK), PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK), /* TPU */ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK), PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK), PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK), PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK), PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK), PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK), PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK), PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK), PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK), PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK), PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK), PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK), PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK), PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK), PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK), PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK), PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK), PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK), PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK), PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK), PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK), PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK), PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK), PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK), PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK), PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK), PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK), PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK), PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK), PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK), PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK), PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK), PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK), PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK), PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK), PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK), PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK), PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK), PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK), /* SCIF0 */ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), /* SCIF1 */ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), /* SCIF2 */ PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK), /* SCIF3 */ PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK), /* SCIF4 */ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK), /* SCIF5 */ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK), /* FSI */ PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK), PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK), PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK), PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK), PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK), PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK), PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK), PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK), PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK), PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK), PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK), PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK), PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK), PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK), PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK), PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK), /* AUD */ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK), PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), /* VIO */ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK), /* VIO0 */ PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK), PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK), PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK), PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK), PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK), PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK), /* VIO1 */ PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK), PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK), PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK), PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK), PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK), PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK), /* Eth */ PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK), PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK), PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK), PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK), PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK), PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK), PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK), /* System */ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK), PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK), PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), /* VOU */ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK), PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK), PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK), PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK), PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK), PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK), PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK), PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK), PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK), PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK), PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK), PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK), PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK), PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK), PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK), PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK), PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK), PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK), PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK), PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK), /* MSIOF0 */ PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK), /* MSIOF1 */ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK), PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK), PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK), PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK), PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK), /* SDHI0 */ PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK), PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK), PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK), PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK), PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK), PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK), PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK), PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK), /* SDHI1 */ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK), PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK), PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK), PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK), PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK), PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK), PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK), PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK), /* MMC */ PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK), PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK), PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK), PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK), PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK), PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK), PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK), PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK), PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK), PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK), /* IrDA */ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK), PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK), /* TSIF */ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK), PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK), PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK), PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK), /* IRQ */ PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK), PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) { PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN, PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN, PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN, PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN, PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN, PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN, PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN, PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN } }, { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) { PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN, PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN, PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN, PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN, PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN, PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN, PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN, PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN } }, { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) { PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN, PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN, PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN, PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN, PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN, PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN, PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN, PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN } }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) { PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN, PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN, PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN, PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN, PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN, PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN, PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN, PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN } }, { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) { PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN, PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN, PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN, PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN, PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN, PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN, PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN, PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN } }, { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) { PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN, PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN, PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN, PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN, PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN, PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN, PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN, PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN } }, { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, PTG5_FN, PTG5_OUT, 0, 0, PTG4_FN, PTG4_OUT, 0, 0, PTG3_FN, PTG3_OUT, 0, 0, PTG2_FN, PTG2_OUT, 0, 0, PTG1_FN, PTG1_OUT, 0, 0, PTG0_FN, PTG0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) { PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN, PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN, PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN, PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN, PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN, PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN, PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN, PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN } }, { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) { PTJ7_FN, PTJ7_OUT, 0, 0, PTJ6_FN, PTJ6_OUT, 0, 0, PTJ5_FN, PTJ5_OUT, 0, 0, 0, 0, 0, 0, PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN, PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN, PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN, PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN } }, { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) { PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN, PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN, PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN, PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN, PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN, PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN, PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN, PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN } }, { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) { PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN, PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN, PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN, PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN, PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN, PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN, PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN, PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN } }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) { PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN, PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN, PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN, PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN, PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN, PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN, PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN, PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN } }, { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) { PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN, PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN, PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN, PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN, PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN, PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN, PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN, PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN } }, { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) { PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN, PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN, PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN, PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN, PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN, PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN, PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN, PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN } }, { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) { PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN, PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN, PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN, PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN, PTR3_FN, 0, PTR3_IN_PU, PTR3_IN, PTR2_FN, 0, PTR2_IN_PU, PTR2_IN, PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN, PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN } }, { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) { 0, 0, 0, 0, PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN, PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN, PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN, PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN, PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN, PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN, PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN } }, { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) { PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN, PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN, PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN, PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN, PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN, PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN, PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN, PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN } }, { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) { PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN, PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN, PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN, PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN, PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN, PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN, PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN, PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN } }, { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) { PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN, PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN, PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN, PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN, PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN, PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN, PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN, PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN } }, { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) { PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN, PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN, PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN, PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN, PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN, PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN, PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN, PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN } }, { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) { PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN, PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN, PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN, PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN, PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN, PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN, PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN, PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN } }, { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) { PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN, PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN, PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN, PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN, PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN, PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN, PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN, PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN } }, { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) { PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN, PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN, PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN, PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN, PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN, PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN, PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN, PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN } }, { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) { PSA15_0, PSA15_1, PSA14_0, PSA14_1, PSA13_0, PSA13_1, PSA12_0, PSA12_1, 0, 0, PSA10_0, PSA10_1, PSA9_0, PSA9_1, PSA8_0, PSA8_1, PSA7_0, PSA7_1, PSA6_0, PSA6_1, PSA5_0, PSA5_1, 0, 0, PSA3_0, PSA3_1, PSA2_0, PSA2_1, PSA1_0, PSA1_1, PSA0_0, PSA0_1} }, { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) { 0, 0, PSB14_0, PSB14_1, PSB13_0, PSB13_1, PSB12_0, PSB12_1, PSB11_0, PSB11_1, PSB10_0, PSB10_1, PSB9_0, PSB9_1, PSB8_0, PSB8_1, PSB7_0, PSB7_1, PSB6_0, PSB6_1, PSB5_0, PSB5_1, PSB4_0, PSB4_1, PSB3_0, PSB3_1, PSB2_0, PSB2_1, PSB1_0, PSB1_1, PSB0_0, PSB0_1} }, { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) { PSC15_0, PSC15_1, PSC14_0, PSC14_1, PSC13_0, PSC13_1, PSC12_0, PSC12_1, PSC11_0, PSC11_1, PSC10_0, PSC10_1, PSC9_0, PSC9_1, PSC8_0, PSC8_1, PSC7_0, PSC7_1, PSC6_0, PSC6_1, PSC5_0, PSC5_1, PSC4_0, PSC4_1, 0, 0, PSC2_0, PSC2_1, PSC1_0, PSC1_1, PSC0_0, PSC0_1} }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) { PSD15_0, PSD15_1, PSD14_0, PSD14_1, PSD13_0, PSD13_1, PSD12_0, PSD12_1, PSD11_0, PSD11_1, PSD10_0, PSD10_1, PSD9_0, PSD9_1, PSD8_0, PSD8_1, PSD7_0, PSD7_1, PSD6_0, PSD6_1, PSD5_0, PSD5_1, PSD4_0, PSD4_1, PSD3_0, PSD3_1, PSD2_0, PSD2_1, PSD1_0, PSD1_1, PSD0_0, PSD0_1} }, { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) { PSE15_0, PSE15_1, PSE14_0, PSE14_1, PSE13_0, PSE13_1, PSE12_0, PSE12_1, PSE11_0, PSE11_1, PSE10_0, PSE10_1, PSE9_0, PSE9_1, PSE8_0, PSE8_1, PSE7_0, PSE7_1, PSE6_0, PSE6_1, PSE5_0, PSE5_1, PSE4_0, PSE4_1, PSE3_0, PSE3_1, PSE2_0, PSE2_1, PSE1_0, PSE1_1, PSE0_0, PSE0_1} }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xa4050120, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) { PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) { PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) { 0, 0, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) { PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) { PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) { PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) { PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) { PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) { PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) { 0, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) { PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) { PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) { PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) { PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) { PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) { PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) { PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; static struct pinmux_info sh7724_pinmux_info = { .name = "sh7724_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PTA7, .last_gpio = GPIO_FN_INTC_IRQ0, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7724_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
davidmueller13/flo-1
kernel/gcov/fs.c
10584
19303
/* * This code exports profiling data as debugfs files to userspace. * * Copyright IBM Corp. 2009 * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * * Uses gcc-internal data definitions. * Based on the gcov-kernel patch by: * Hubertus Franke <frankeh@us.ibm.com> * Nigel Hinds <nhinds@us.ibm.com> * Rajan Ravindran <rajancr@us.ibm.com> * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * Paul Larson * Yi CDL Yang */ #define pr_fmt(fmt) "gcov: " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include "gcov.h" /** * struct gcov_node - represents a debugfs entry * @list: list head for child node list * @children: child nodes * @all: list head for list of all nodes * @parent: parent node * @loaded_info: array of pointers to profiling data sets for loaded object * files. * @num_loaded: number of profiling data sets for loaded object files. * @unloaded_info: accumulated copy of profiling data sets for unloaded * object files. Used only when gcov_persist=1. * @dentry: main debugfs entry, either a directory or data file * @links: associated symbolic links * @name: data file basename * * struct gcov_node represents an entity within the gcov/ subdirectory * of debugfs. There are directory and data file nodes. The latter represent * the actual synthesized data file plus any associated symbolic links which * are needed by the gcov tool to work correctly. */ struct gcov_node { struct list_head list; struct list_head children; struct list_head all; struct gcov_node *parent; struct gcov_info **loaded_info; struct gcov_info *unloaded_info; struct dentry *dentry; struct dentry **links; int num_loaded; char name[0]; }; static const char objtree[] = OBJTREE; static const char srctree[] = SRCTREE; static struct gcov_node root_node; static struct dentry *reset_dentry; static LIST_HEAD(all_head); static DEFINE_MUTEX(node_lock); /* If non-zero, keep copies of profiling data for unloaded modules. */ static int gcov_persist = 1; static int __init gcov_persist_setup(char *str) { unsigned long val; if (strict_strtoul(str, 0, &val)) { pr_warning("invalid gcov_persist parameter '%s'\n", str); return 0; } gcov_persist = val; pr_info("setting gcov_persist to %d\n", gcov_persist); return 1; } __setup("gcov_persist=", gcov_persist_setup); /* * seq_file.start() implementation for gcov data files. Note that the * gcov_iterator interface is designed to be more restrictive than seq_file * (no start from arbitrary position, etc.), to simplify the iterator * implementation. */ static void *gcov_seq_start(struct seq_file *seq, loff_t *pos) { loff_t i; gcov_iter_start(seq->private); for (i = 0; i < *pos; i++) { if (gcov_iter_next(seq->private)) return NULL; } return seq->private; } /* seq_file.next() implementation for gcov data files. */ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; if (gcov_iter_next(iter)) return NULL; (*pos)++; return iter; } /* seq_file.show() implementation for gcov data files. */ static int gcov_seq_show(struct seq_file *seq, void *data) { struct gcov_iterator *iter = data; if (gcov_iter_write(iter, seq)) return -EINVAL; return 0; } static void gcov_seq_stop(struct seq_file *seq, void *data) { /* Unused. */ } static const struct seq_operations gcov_seq_ops = { .start = gcov_seq_start, .next = gcov_seq_next, .show = gcov_seq_show, .stop = gcov_seq_stop, }; /* * Return a profiling data set associated with the given node. This is * either a data set for a loaded object file or a data set copy in case * all associated object files have been unloaded. */ static struct gcov_info *get_node_info(struct gcov_node *node) { if (node->num_loaded > 0) return node->loaded_info[0]; return node->unloaded_info; } /* * Return a newly allocated profiling data set which contains the sum of * all profiling data associated with the given node. */ static struct gcov_info *get_accumulated_info(struct gcov_node *node) { struct gcov_info *info; int i = 0; if (node->unloaded_info) info = gcov_info_dup(node->unloaded_info); else info = gcov_info_dup(node->loaded_info[i++]); if (!info) return NULL; for (; i < node->num_loaded; i++) gcov_info_add(info, node->loaded_info[i]); return info; } /* * open() implementation for gcov data files. Create a copy of the profiling * data set and initialize the iterator and seq_file interface. */ static int gcov_seq_open(struct inode *inode, struct file *file) { struct gcov_node *node = inode->i_private; struct gcov_iterator *iter; struct seq_file *seq; struct gcov_info *info; int rc = -ENOMEM; mutex_lock(&node_lock); /* * Read from a profiling data copy to minimize reference tracking * complexity and concurrent access and to keep accumulating multiple * profiling data sets associated with one node simple. */ info = get_accumulated_info(node); if (!info) goto out_unlock; iter = gcov_iter_new(info); if (!iter) goto err_free_info; rc = seq_open(file, &gcov_seq_ops); if (rc) goto err_free_iter_info; seq = file->private_data; seq->private = iter; out_unlock: mutex_unlock(&node_lock); return rc; err_free_iter_info: gcov_iter_free(iter); err_free_info: gcov_info_free(info); goto out_unlock; } /* * release() implementation for gcov data files. Release resources allocated * by open(). */ static int gcov_seq_release(struct inode *inode, struct file *file) { struct gcov_iterator *iter; struct gcov_info *info; struct seq_file *seq; seq = file->private_data; iter = seq->private; info = gcov_iter_get_info(iter); gcov_iter_free(iter); gcov_info_free(info); seq_release(inode, file); return 0; } /* * Find a node by the associated data file name. Needs to be called with * node_lock held. */ static struct gcov_node *get_node_by_name(const char *name) { struct gcov_node *node; struct gcov_info *info; list_for_each_entry(node, &all_head, all) { info = get_node_info(node); if (info && (strcmp(info->filename, name) == 0)) return node; } return NULL; } /* * Reset all profiling data associated with the specified node. */ static void reset_node(struct gcov_node *node) { int i; if (node->unloaded_info) gcov_info_reset(node->unloaded_info); for (i = 0; i < node->num_loaded; i++) gcov_info_reset(node->loaded_info[i]); } static void remove_node(struct gcov_node *node); /* * write() implementation for gcov data files. Reset profiling data for the * corresponding file. If all associated object files have been unloaded, * remove the debug fs node as well. */ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct seq_file *seq; struct gcov_info *info; struct gcov_node *node; seq = file->private_data; info = gcov_iter_get_info(seq->private); mutex_lock(&node_lock); node = get_node_by_name(info->filename); if (node) { /* Reset counts or remove node for unloaded modules. */ if (node->num_loaded == 0) remove_node(node); else reset_node(node); } /* Reset counts for open file. */ gcov_info_reset(info); mutex_unlock(&node_lock); return len; } /* * Given a string <path> representing a file path of format: * path/to/file.gcda * construct and return a new string: * <dir/>path/to/file.<ext> */ static char *link_target(const char *dir, const char *path, const char *ext) { char *target; char *old_ext; char *copy; copy = kstrdup(path, GFP_KERNEL); if (!copy) return NULL; old_ext = strrchr(copy, '.'); if (old_ext) *old_ext = '\0'; if (dir) target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); else target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); kfree(copy); return target; } /* * Construct a string representing the symbolic link target for the given * gcov data file name and link type. Depending on the link type and the * location of the data file, the link target can either point to a * subdirectory of srctree, objtree or in an external location. */ static char *get_link_target(const char *filename, const struct gcov_link *ext) { const char *rel; char *result; if (strncmp(filename, objtree, strlen(objtree)) == 0) { rel = filename + strlen(objtree) + 1; if (ext->dir == SRC_TREE) result = link_target(srctree, rel, ext->ext); else result = link_target(objtree, rel, ext->ext); } else { /* External compilation. */ result = link_target(NULL, filename, ext->ext); } return result; } #define SKEW_PREFIX ".tmp_" /* * For a filename .tmp_filename.ext return filename.ext. Needed to compensate * for filename skewing caused by the mod-versioning mechanism. */ static const char *deskew(const char *basename) { if (strncmp(basename, SKEW_PREFIX, sizeof(SKEW_PREFIX) - 1) == 0) return basename + sizeof(SKEW_PREFIX) - 1; return basename; } /* * Create links to additional files (usually .c and .gcno files) which the * gcov tool expects to find in the same directory as the gcov data file. */ static void add_links(struct gcov_node *node, struct dentry *parent) { char *basename; char *target; int num; int i; for (num = 0; gcov_link[num].ext; num++) /* Nothing. */; node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); if (!node->links) return; for (i = 0; i < num; i++) { target = get_link_target(get_node_info(node)->filename, &gcov_link[i]); if (!target) goto out_err; basename = strrchr(target, '/'); if (!basename) goto out_err; basename++; node->links[i] = debugfs_create_symlink(deskew(basename), parent, target); if (!node->links[i]) goto out_err; kfree(target); } return; out_err: kfree(target); while (i-- > 0) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } static const struct file_operations gcov_data_fops = { .open = gcov_seq_open, .release = gcov_seq_release, .read = seq_read, .llseek = seq_lseek, .write = gcov_seq_write, }; /* Basic initialization of a new node. */ static void init_node(struct gcov_node *node, struct gcov_info *info, const char *name, struct gcov_node *parent) { INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->all); if (node->loaded_info) { node->loaded_info[0] = info; node->num_loaded = 1; } node->parent = parent; if (name) strcpy(node->name, name); } /* * Create a new node and associated debugfs entry. Needs to be called with * node_lock held. */ static struct gcov_node *new_node(struct gcov_node *parent, struct gcov_info *info, const char *name) { struct gcov_node *node; node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); if (!node) goto err_nomem; if (info) { node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), GFP_KERNEL); if (!node->loaded_info) goto err_nomem; } init_node(node, info, name, parent); /* Differentiate between gcov data file nodes and directory nodes. */ if (info) { node->dentry = debugfs_create_file(deskew(node->name), 0600, parent->dentry, node, &gcov_data_fops); } else node->dentry = debugfs_create_dir(node->name, parent->dentry); if (!node->dentry) { pr_warning("could not create file\n"); kfree(node); return NULL; } if (info) add_links(node, parent->dentry); list_add(&node->list, &parent->children); list_add(&node->all, &all_head); return node; err_nomem: kfree(node); pr_warning("out of memory\n"); return NULL; } /* Remove symbolic links associated with node. */ static void remove_links(struct gcov_node *node) { int i; if (!node->links) return; for (i = 0; gcov_link[i].ext; i++) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } /* * Remove node from all lists and debugfs and release associated resources. * Needs to be called with node_lock held. */ static void release_node(struct gcov_node *node) { list_del(&node->list); list_del(&node->all); debugfs_remove(node->dentry); remove_links(node); kfree(node->loaded_info); if (node->unloaded_info) gcov_info_free(node->unloaded_info); kfree(node); } /* Release node and empty parents. Needs to be called with node_lock held. */ static void remove_node(struct gcov_node *node) { struct gcov_node *parent; while ((node != &root_node) && list_empty(&node->children)) { parent = node->parent; release_node(node); node = parent; } } /* * Find child node with given basename. Needs to be called with node_lock * held. */ static struct gcov_node *get_child_by_name(struct gcov_node *parent, const char *name) { struct gcov_node *node; list_for_each_entry(node, &parent->children, list) { if (strcmp(node->name, name) == 0) return node; } return NULL; } /* * write() implementation for reset file. Reset all profiling data to zero * and remove nodes for which all associated object files are unloaded. */ static ssize_t reset_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct gcov_node *node; mutex_lock(&node_lock); restart: list_for_each_entry(node, &all_head, all) { if (node->num_loaded > 0) reset_node(node); else if (list_empty(&node->children)) { remove_node(node); /* Several nodes may have gone - restart loop. */ goto restart; } } mutex_unlock(&node_lock); return len; } /* read() implementation for reset file. Unused. */ static ssize_t reset_read(struct file *file, char __user *addr, size_t len, loff_t *pos) { /* Allow read operation so that a recursive copy won't fail. */ return 0; } static const struct file_operations gcov_reset_fops = { .write = reset_write, .read = reset_read, .llseek = noop_llseek, }; /* * Create a node for a given profiling data set and add it to all lists and * debugfs. Needs to be called with node_lock held. */ static void add_node(struct gcov_info *info) { char *filename; char *curr; char *next; struct gcov_node *parent; struct gcov_node *node; filename = kstrdup(info->filename, GFP_KERNEL); if (!filename) return; parent = &root_node; /* Create directory nodes along the path. */ for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { if (curr == next) continue; *next = 0; if (strcmp(curr, ".") == 0) continue; if (strcmp(curr, "..") == 0) { if (!parent->parent) goto err_remove; parent = parent->parent; continue; } node = get_child_by_name(parent, curr); if (!node) { node = new_node(parent, NULL, curr); if (!node) goto err_remove; } parent = node; } /* Create file node. */ node = new_node(parent, info, curr); if (!node) goto err_remove; out: kfree(filename); return; err_remove: remove_node(parent); goto out; } /* * Associate a profiling data set with an existing node. Needs to be called * with node_lock held. */ static void add_info(struct gcov_node *node, struct gcov_info *info) { struct gcov_info **loaded_info; int num = node->num_loaded; /* * Prepare new array. This is done first to simplify cleanup in * case the new data set is incompatible, the node only contains * unloaded data sets and there's not enough memory for the array. */ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); if (!loaded_info) { pr_warning("could not add '%s' (out of memory)\n", info->filename); return; } memcpy(loaded_info, node->loaded_info, num * sizeof(struct gcov_info *)); loaded_info[num] = info; /* Check if the new data set is compatible. */ if (num == 0) { /* * A module was unloaded, modified and reloaded. The new * data set replaces the copy of the last one. */ if (!gcov_info_is_compatible(node->unloaded_info, info)) { pr_warning("discarding saved data for %s " "(incompatible version)\n", info->filename); gcov_info_free(node->unloaded_info); node->unloaded_info = NULL; } } else { /* * Two different versions of the same object file are loaded. * The initial one takes precedence. */ if (!gcov_info_is_compatible(node->loaded_info[0], info)) { pr_warning("could not add '%s' (incompatible " "version)\n", info->filename); kfree(loaded_info); return; } } /* Overwrite previous array. */ kfree(node->loaded_info); node->loaded_info = loaded_info; node->num_loaded = num + 1; } /* * Return the index of a profiling data set associated with a node. */ static int get_info_index(struct gcov_node *node, struct gcov_info *info) { int i; for (i = 0; i < node->num_loaded; i++) { if (node->loaded_info[i] == info) return i; } return -ENOENT; } /* * Save the data of a profiling data set which is being unloaded. */ static void save_info(struct gcov_node *node, struct gcov_info *info) { if (node->unloaded_info) gcov_info_add(node->unloaded_info, info); else { node->unloaded_info = gcov_info_dup(info); if (!node->unloaded_info) { pr_warning("could not save data for '%s' " "(out of memory)\n", info->filename); } } } /* * Disassociate a profiling data set from a node. Needs to be called with * node_lock held. */ static void remove_info(struct gcov_node *node, struct gcov_info *info) { int i; i = get_info_index(node, info); if (i < 0) { pr_warning("could not remove '%s' (not found)\n", info->filename); return; } if (gcov_persist) save_info(node, info); /* Shrink array. */ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; node->num_loaded--; if (node->num_loaded > 0) return; /* Last loaded data set was removed. */ kfree(node->loaded_info); node->loaded_info = NULL; node->num_loaded = 0; if (!node->unloaded_info) remove_node(node); } /* * Callback to create/remove profiling files when code compiled with * -fprofile-arcs is loaded/unloaded. */ void gcov_event(enum gcov_action action, struct gcov_info *info) { struct gcov_node *node; mutex_lock(&node_lock); node = get_node_by_name(info->filename); switch (action) { case GCOV_ADD: if (node) add_info(node, info); else add_node(info); break; case GCOV_REMOVE: if (node) remove_info(node, info); else { pr_warning("could not remove '%s' (not found)\n", info->filename); } break; } mutex_unlock(&node_lock); } /* Create debugfs entries. */ static __init int gcov_fs_init(void) { int rc = -EIO; init_node(&root_node, NULL, NULL, NULL); /* * /sys/kernel/debug/gcov will be parent for the reset control file * and all profiling files. */ root_node.dentry = debugfs_create_dir("gcov", NULL); if (!root_node.dentry) goto err_remove; /* * Create reset file which resets all profiling counts when written * to. */ reset_dentry = debugfs_create_file("reset", 0600, root_node.dentry, NULL, &gcov_reset_fops); if (!reset_dentry) goto err_remove; /* Replay previous events to get our fs hierarchy up-to-date. */ gcov_enable_events(); return 0; err_remove: pr_err("init failed\n"); if (root_node.dentry) debugfs_remove(root_node.dentry); return rc; } device_initcall(gcov_fs_init);
gpl-2.0
iamroot11c/kernel_source
sound/soc/fsl/imx-wm8962.c
89
8291
/* * Copyright 2013 Freescale Semiconductor, Inc. * * Based on imx-sgtl5000.c * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/clk.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <sound/soc-dapm.h> #include <linux/pinctrl/consumer.h> #include "../codecs/wm8962.h" #include "imx-audmux.h" #define DAI_NAME_SIZE 32 struct imx_wm8962_data { struct snd_soc_dai_link dai; struct snd_soc_card card; char codec_dai_name[DAI_NAME_SIZE]; char platform_name[DAI_NAME_SIZE]; struct clk *codec_clk; unsigned int clk_frequency; }; struct imx_priv { struct platform_device *pdev; }; static struct imx_priv card_priv; static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_MIC("AMIC", NULL), SND_SOC_DAPM_MIC("DMIC", NULL), }; static int sample_rate = 44100; static snd_pcm_format_t sample_format = SNDRV_PCM_FORMAT_S16_LE; static int imx_hifi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { sample_rate = params_rate(params); sample_format = params_format(params); return 0; } static struct snd_soc_ops imx_hifi_ops = { .hw_params = imx_hifi_hw_params, }; static int imx_wm8962_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; struct imx_priv *priv = &card_priv; struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); struct device *dev = &priv->pdev->dev; unsigned int pll_out; int ret; if (dapm->dev != codec_dai->dev) return 0; switch (level) { case SND_SOC_BIAS_PREPARE: if (dapm->bias_level == SND_SOC_BIAS_STANDBY) { if (sample_format == SNDRV_PCM_FORMAT_S24_LE) pll_out = sample_rate * 384; else pll_out = sample_rate * 256; ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, WM8962_FLL_MCLK, data->clk_frequency, pll_out); if (ret < 0) { dev_err(dev, "failed to start FLL: %d\n", ret); return ret; } ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_FLL, pll_out, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(dev, "failed to set SYSCLK: %d\n", ret); return ret; } } break; case SND_SOC_BIAS_STANDBY: if (dapm->bias_level == SND_SOC_BIAS_PREPARE) { ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK, data->clk_frequency, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(dev, "failed to switch away from FLL: %d\n", ret); return ret; } ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, 0, 0, 0); if (ret < 0) { dev_err(dev, "failed to stop FLL: %d\n", ret); return ret; } } break; default: break; } dapm->bias_level = level; return 0; } static int imx_wm8962_late_probe(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; struct imx_priv *priv = &card_priv; struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); struct device *dev = &priv->pdev->dev; int ret; ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK, data->clk_frequency, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(dev, "failed to set sysclk in %s\n", __func__); return ret; } static int imx_wm8962_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *ssi_np, *codec_np; struct platform_device *ssi_pdev; struct imx_priv *priv = &card_priv; struct i2c_client *codec_dev; struct imx_wm8962_data *data; int int_port, ext_port; int ret; priv->pdev = pdev; ret = of_property_read_u32(np, "mux-int-port", &int_port); if (ret) { dev_err(&pdev->dev, "mux-int-port missing or invalid\n"); return ret; } ret = of_property_read_u32(np, "mux-ext-port", &ext_port); if (ret) { dev_err(&pdev->dev, "mux-ext-port missing or invalid\n"); return ret; } /* * The port numbering in the hardware manual starts at 1, while * the audmux API expects it starts at 0. */ int_port--; ext_port--; ret = imx_audmux_v2_configure_port(int_port, IMX_AUDMUX_V2_PTCR_SYN | IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) | IMX_AUDMUX_V2_PTCR_TCSEL(ext_port) | IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TCLKDIR, IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port)); if (ret) { dev_err(&pdev->dev, "audmux internal port setup failed\n"); return ret; } imx_audmux_v2_configure_port(ext_port, IMX_AUDMUX_V2_PTCR_SYN, IMX_AUDMUX_V2_PDCR_RXDSEL(int_port)); if (ret) { dev_err(&pdev->dev, "audmux external port setup failed\n"); return ret; } ssi_np = of_parse_phandle(pdev->dev.of_node, "ssi-controller", 0); codec_np = of_parse_phandle(pdev->dev.of_node, "audio-codec", 0); if (!ssi_np || !codec_np) { dev_err(&pdev->dev, "phandle missing or invalid\n"); ret = -EINVAL; goto fail; } ssi_pdev = of_find_device_by_node(ssi_np); if (!ssi_pdev) { dev_err(&pdev->dev, "failed to find SSI platform device\n"); ret = -EINVAL; goto fail; } codec_dev = of_find_i2c_device_by_node(codec_np); if (!codec_dev || !codec_dev->driver) { dev_err(&pdev->dev, "failed to find codec platform device\n"); ret = -EINVAL; goto fail; } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto fail; } data->codec_clk = devm_clk_get(&codec_dev->dev, NULL); if (IS_ERR(data->codec_clk)) { ret = PTR_ERR(data->codec_clk); dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret); goto fail; } data->clk_frequency = clk_get_rate(data->codec_clk); ret = clk_prepare_enable(data->codec_clk); if (ret) { dev_err(&codec_dev->dev, "failed to enable codec clk: %d\n", ret); goto fail; } data->dai.name = "HiFi"; data->dai.stream_name = "HiFi"; data->dai.codec_dai_name = "wm8962"; data->dai.codec_of_node = codec_np; data->dai.cpu_dai_name = dev_name(&ssi_pdev->dev); data->dai.platform_of_node = ssi_np; data->dai.ops = &imx_hifi_ops; data->dai.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; data->card.dev = &pdev->dev; ret = snd_soc_of_parse_card_name(&data->card, "model"); if (ret) goto clk_fail; ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing"); if (ret) goto clk_fail; data->card.num_links = 1; data->card.dai_link = &data->dai; data->card.dapm_widgets = imx_wm8962_dapm_widgets; data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); data->card.late_probe = imx_wm8962_late_probe; data->card.set_bias_level = imx_wm8962_set_bias_level; ret = snd_soc_register_card(&data->card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); goto clk_fail; } platform_set_drvdata(pdev, data); of_node_put(ssi_np); of_node_put(codec_np); return 0; clk_fail: if (!IS_ERR(data->codec_clk)) clk_disable_unprepare(data->codec_clk); fail: if (ssi_np) of_node_put(ssi_np); if (codec_np) of_node_put(codec_np); return ret; } static int imx_wm8962_remove(struct platform_device *pdev) { struct imx_wm8962_data *data = platform_get_drvdata(pdev); if (!IS_ERR(data->codec_clk)) clk_disable_unprepare(data->codec_clk); snd_soc_unregister_card(&data->card); return 0; } static const struct of_device_id imx_wm8962_dt_ids[] = { { .compatible = "fsl,imx-audio-wm8962", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_wm8962_dt_ids); static struct platform_driver imx_wm8962_driver = { .driver = { .name = "imx-wm8962", .owner = THIS_MODULE, .of_match_table = imx_wm8962_dt_ids, }, .probe = imx_wm8962_probe, .remove = imx_wm8962_remove, }; module_platform_driver(imx_wm8962_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("Freescale i.MX WM8962 ASoC machine driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:imx-wm8962");
gpl-2.0
timshen91/gcc
gcc/testsuite/gfortran.dg/enum_10.f90
89
1162
! { dg-do run } ! { dg-options "-fshort-enums -w" } ! { dg-options "-fshort-enums -w -Wl,--no-enum-size-warning" { target arm_eabi } } ! { dg-additional-sources enum_10.c } ! Make sure short enums are indeed interoperable with the ! corresponding C type. module enum_10 enum, bind( c ) enumerator :: one1 = 1, two1, max1 = huge(1_1) end enum enum, bind( c ) enumerator :: one2 = 1, two2, max2 = huge(1_2) end enum enum, bind( c ) enumerator :: one4 = 1, two4, max4 = huge(1_4) end enum end module enum_10 use enum_10 interface f1 subroutine f1(i,j) use enum_10 integer (kind(max1)) :: i integer :: j end subroutine f1 end interface interface f2 subroutine f2(i,j) use enum_10 integer (kind(max2)) :: i integer :: j end subroutine f2 end interface interface f4 subroutine f4(i,j) use enum_10 integer (kind(max4)) :: i integer :: j end subroutine f4 end interface call f1 (one1, 1) call f1 (two1, 2) call f1 (max1, huge(1_1)+0) ! Adding 0 to get default integer call f2 (one2, 1) call f2 (two2, 2) call f2 (max2, huge(1_2)+0) call f4 (one4, 1) call f4 (two4, 2) call f4 (max4, huge(1_4)+0) end
gpl-2.0
OpenInkpot-archive/iplinux-binutils
bfd/elf32-i960.c
89
5374
/* Intel 960 specific support for 32-bit ELF Copyright 1999, 2000, 2001, 2002, 2003, 2005, 2007 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #include "libbfd.h" #include "elf-bfd.h" #include "elf/i960.h" #define USE_REL 1 #define bfd_elf32_bfd_reloc_type_lookup elf32_i960_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup \ elf32_i960_reloc_name_lookup #define elf_info_to_howto elf32_i960_info_to_howto #define elf_info_to_howto_rel elf32_i960_info_to_howto_rel /* ELF relocs are against symbols. If we are producing relocatable output, and the reloc is against an external symbol, and nothing has given us any additional addend, the resulting reloc will also be against the same symbol. In such a case, we don't want to change anything about the way the reloc is handled, since it will all be done at final link time. Rather than put special case code into bfd_perform_relocation, all the reloc types use this howto function. It just short circuits the reloc if producing relocatable output against an external symbol. */ static bfd_reloc_status_type elf32_i960_relocate (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc_entry, asymbol *symbol, PTR data ATTRIBUTE_UNUSED, asection *input_section, bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED) { /* HACK: I think this first condition is necessary when producing relocatable output. After the end of HACK, the code is identical to bfd_elf_generic_reloc(). I would _guess_ the first change belongs there rather than here. martindo 1998-10-23. */ if (output_bfd != (bfd *) NULL && reloc_entry->howto->pc_relative && !reloc_entry->howto->pcrel_offset) reloc_entry->addend -= symbol->value; /* This is more dubious. */ else if (output_bfd != (bfd *) NULL && (symbol->flags & BSF_SECTION_SYM) != 0) reloc_entry->addend -= symbol->section->output_section->vma; else { /* ...end of HACK. */ if (output_bfd != (bfd *) NULL && (symbol->flags & BSF_SECTION_SYM) == 0 && (! reloc_entry->howto->partial_inplace || reloc_entry->addend == 0)) { reloc_entry->address += input_section->output_offset; return bfd_reloc_ok; } } return bfd_reloc_continue; } static reloc_howto_type elf_howto_table[]= { HOWTO (R_960_NONE, 0, 0, 0, FALSE, 0, complain_overflow_bitfield, elf32_i960_relocate, "R_960_NONE", TRUE, 0x00000000, 0x00000000, FALSE), EMPTY_HOWTO (1), HOWTO (R_960_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield, elf32_i960_relocate, "R_960_32", TRUE, 0xffffffff, 0xffffffff, FALSE), HOWTO (R_960_IP24, 0, 2, 24, TRUE, 0, complain_overflow_signed, elf32_i960_relocate, "R_960_IP24 ", TRUE, 0x00ffffff, 0x00ffffff, FALSE), EMPTY_HOWTO (4), EMPTY_HOWTO (5), EMPTY_HOWTO (6), EMPTY_HOWTO (7) }; static enum elf_i960_reloc_type elf32_i960_bfd_to_reloc_type (bfd_reloc_code_real_type code) { switch (code) { default: return R_960_NONE; case BFD_RELOC_I960_CALLJ: return R_960_OPTCALL; case BFD_RELOC_32: case BFD_RELOC_CTOR: return R_960_32; case BFD_RELOC_24_PCREL: return R_960_IP24; } } static void elf32_i960_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * cache_ptr ATTRIBUTE_UNUSED, Elf_Internal_Rela * dst ATTRIBUTE_UNUSED) { abort (); } static void elf32_i960_info_to_howto_rel (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr, Elf_Internal_Rela *dst) { enum elf_i960_reloc_type type; type = (enum elf_i960_reloc_type) ELF32_R_TYPE (dst->r_info); BFD_ASSERT (type < R_960_max); cache_ptr->howto = &elf_howto_table[(int) type]; } static reloc_howto_type * elf32_i960_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, bfd_reloc_code_real_type code) { return elf_howto_table + elf32_i960_bfd_to_reloc_type (code); } static reloc_howto_type * elf32_i960_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name) { unsigned int i; for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++) if (elf_howto_table[i].name != NULL && strcasecmp (elf_howto_table[i].name, r_name) == 0) return &elf_howto_table[i]; return NULL; } #define TARGET_LITTLE_SYM bfd_elf32_i960_vec #define TARGET_LITTLE_NAME "elf32-i960" #define ELF_ARCH bfd_arch_i960 #define ELF_MACHINE_CODE EM_960 #define ELF_MAXPAGESIZE 1 /* FIXME: This number is wrong, It should be the page size in bytes. */ #include "elf32-target.h"
gpl-2.0
Wenzel/kvm
drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
89
6472
/* * Copyright (C) 2015 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this * source tree or the BSD 2-Clause License provided below. You have the * option to license this software under the complete terms of either license. * * The BSD 2-Clause License: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include "nfp_net.h" static struct dentry *nfp_dir; static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) { struct nfp_net_rx_ring *rx_ring = file->private; int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; struct nfp_net_rx_desc *rxd; struct sk_buff *skb; struct nfp_net *nn; int i; rtnl_lock(); if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) goto out; nn = rx_ring->r_vec->nfp_net; if (!netif_running(nn->netdev)) goto out; rxd_cnt = rx_ring->cnt; fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); for (i = 0; i < rxd_cnt; i++) { rxd = &rx_ring->rxds[i]; seq_printf(file, "%04d: 0x%08x 0x%08x", i, rxd->vals[0], rxd->vals[1]); skb = READ_ONCE(rx_ring->rxbufs[i].skb); if (skb) seq_printf(file, " skb->head=%p skb->data=%p", skb->head, skb->data); if (rx_ring->rxbufs[i].dma_addr) seq_printf(file, " dma_addr=%pad", &rx_ring->rxbufs[i].dma_addr); if (i == rx_ring->rd_p % rxd_cnt) seq_puts(file, " H_RD "); if (i == rx_ring->wr_p % rxd_cnt) seq_puts(file, " H_WR "); if (i == fl_rd_p % rxd_cnt) seq_puts(file, " FL_RD"); if (i == fl_wr_p % rxd_cnt) seq_puts(file, " FL_WR"); if (i == rx_rd_p % rxd_cnt) seq_puts(file, " RX_RD"); if (i == rx_wr_p % rxd_cnt) seq_puts(file, " RX_WR"); seq_putc(file, '\n'); } out: rtnl_unlock(); return 0; } static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f) { return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private); } static const struct file_operations nfp_rx_q_fops = { .owner = THIS_MODULE, .open = nfp_net_debugfs_rx_q_open, .release = single_release, .read = seq_read, .llseek = seq_lseek }; static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) { struct nfp_net_tx_ring *tx_ring = file->private; struct nfp_net_tx_desc *txd; int d_rd_p, d_wr_p, txd_cnt; struct sk_buff *skb; struct nfp_net *nn; int i; rtnl_lock(); if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) goto out; nn = tx_ring->r_vec->nfp_net; if (!netif_running(nn->netdev)) goto out; txd_cnt = tx_ring->cnt; d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); for (i = 0; i < txd_cnt; i++) { txd = &tx_ring->txds[i]; seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i, txd->vals[0], txd->vals[1], txd->vals[2], txd->vals[3]); skb = READ_ONCE(tx_ring->txbufs[i].skb); if (skb) seq_printf(file, " skb->head=%p skb->data=%p", skb->head, skb->data); if (tx_ring->txbufs[i].dma_addr) seq_printf(file, " dma_addr=%pad", &tx_ring->txbufs[i].dma_addr); if (i == tx_ring->rd_p % txd_cnt) seq_puts(file, " H_RD"); if (i == tx_ring->wr_p % txd_cnt) seq_puts(file, " H_WR"); if (i == d_rd_p % txd_cnt) seq_puts(file, " D_RD"); if (i == d_wr_p % txd_cnt) seq_puts(file, " D_WR"); seq_putc(file, '\n'); } out: rtnl_unlock(); return 0; } static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f) { return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private); } static const struct file_operations nfp_tx_q_fops = { .owner = THIS_MODULE, .open = nfp_net_debugfs_tx_q_open, .release = single_release, .read = seq_read, .llseek = seq_lseek }; void nfp_net_debugfs_adapter_add(struct nfp_net *nn) { static struct dentry *queues, *tx, *rx; char int_name[16]; int i; if (IS_ERR_OR_NULL(nfp_dir)) return; nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); if (IS_ERR_OR_NULL(nn->debugfs_dir)) return; /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); if (IS_ERR_OR_NULL(nn->debugfs_dir)) return; rx = debugfs_create_dir("rx", queues); tx = debugfs_create_dir("tx", queues); if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx)) return; for (i = 0; i < nn->num_rx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, rx, &nn->rx_rings[i], &nfp_rx_q_fops); } for (i = 0; i < nn->num_tx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, tx, &nn->tx_rings[i], &nfp_tx_q_fops); } } void nfp_net_debugfs_adapter_del(struct nfp_net *nn) { debugfs_remove_recursive(nn->debugfs_dir); nn->debugfs_dir = NULL; } void nfp_net_debugfs_create(void) { nfp_dir = debugfs_create_dir("nfp_net", NULL); } void nfp_net_debugfs_destroy(void) { debugfs_remove_recursive(nfp_dir); nfp_dir = NULL; }
gpl-2.0
hexiaolong2008/linux-arm
drivers/platform/x86/sony-laptop.c
345
122689
/* * ACPI Sony Notebook Control Driver (SNC and SPIC) * * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net> * Copyright (C) 2007-2009 Mattia Dongili <malattia@linux.it> * * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c * which are copyrighted by their respective authors. * * The SNY6001 driver part is based on the sonypi driver which includes * material from: * * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> * * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> * * Copyright (C) 2001-2002 Alcôve <www.alcove.com> * * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> * * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> * * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> * * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> * * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/backlight.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/dmi.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/kfifo.h> #include <linux/workqueue.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/sonypi.h> #include <linux/sony-laptop.h> #include <linux/rfkill.h> #ifdef CONFIG_SONYPI_COMPAT #include <linux/poll.h> #include <linux/miscdevice.h> #endif #include <asm/uaccess.h> #define dprintk(fmt, ...) \ do { \ if (debug) \ pr_warn(fmt, ##__VA_ARGS__); \ } while (0) #define SONY_NC_CLASS "sony-nc" #define SONY_NC_HID "SNY5001" #define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver" #define SONY_PIC_CLASS "sony-pic" #define SONY_PIC_HID "SNY6001" #define SONY_PIC_DRIVER_NAME "Sony Programmable IO Control Driver" MODULE_AUTHOR("Stelian Pop, Mattia Dongili"); MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help " "the development of this driver"); static int no_spic; /* = 0 */ module_param(no_spic, int, 0444); MODULE_PARM_DESC(no_spic, "set this if you don't want to enable the SPIC device"); static int compat; /* = 0 */ module_param(compat, int, 0444); MODULE_PARM_DESC(compat, "set this if you want to enable backward compatibility mode"); static unsigned long mask = 0xffffffff; module_param(mask, ulong, 0644); MODULE_PARM_DESC(mask, "set this to the mask of event you want to enable (see doc)"); static int camera; /* = 0 */ module_param(camera, int, 0444); MODULE_PARM_DESC(camera, "set this to 1 to enable Motion Eye camera controls " "(only use it if you have a C1VE or C1VN model)"); #ifdef CONFIG_SONYPI_COMPAT static int minor = -1; module_param(minor, int, 0); MODULE_PARM_DESC(minor, "minor number of the misc device for the SPIC compatibility code, " "default is -1 (automatic)"); #endif static int kbd_backlight = -1; module_param(kbd_backlight, int, 0444); MODULE_PARM_DESC(kbd_backlight, "set this to 0 to disable keyboard backlight, " "1 to enable it with automatic control and 2 to have it always " "on (default: no change from current value)"); static int kbd_backlight_timeout = -1; module_param(kbd_backlight_timeout, int, 0444); MODULE_PARM_DESC(kbd_backlight_timeout, "meaningful values vary from 0 to 3 and their meaning depends " "on the model (default: no change from current value)"); #ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void); #endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd, unsigned int handle); static int sony_nc_battery_care_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_battery_care_cleanup(struct platform_device *pd); static int sony_nc_thermal_setup(struct platform_device *pd); static void sony_nc_thermal_cleanup(struct platform_device *pd); static int sony_nc_lid_resume_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_lid_resume_cleanup(struct platform_device *pd); static int sony_nc_gfx_switch_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_gfx_switch_cleanup(struct platform_device *pd); static int __sony_nc_gfx_switch_status_get(void); static int sony_nc_highspeed_charging_setup(struct platform_device *pd); static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd); static int sony_nc_lowbatt_setup(struct platform_device *pd); static void sony_nc_lowbatt_cleanup(struct platform_device *pd); static int sony_nc_fanspeed_setup(struct platform_device *pd); static void sony_nc_fanspeed_cleanup(struct platform_device *pd); static int sony_nc_usb_charge_setup(struct platform_device *pd); static void sony_nc_usb_charge_cleanup(struct platform_device *pd); static int sony_nc_panelid_setup(struct platform_device *pd); static void sony_nc_panelid_cleanup(struct platform_device *pd); static int sony_nc_smart_conn_setup(struct platform_device *pd); static void sony_nc_smart_conn_cleanup(struct platform_device *pd); static int sony_nc_touchpad_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_touchpad_cleanup(struct platform_device *pd); enum sony_nc_rfkill { SONY_WIFI, SONY_BLUETOOTH, SONY_WWAN, SONY_WIMAX, N_SONY_RFKILL, }; static int sony_rfkill_handle; static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL]; static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900}; static int sony_nc_rfkill_setup(struct acpi_device *device, unsigned int handle); static void sony_nc_rfkill_cleanup(void); static void sony_nc_rfkill_update(void); /*********** Input Devices ***********/ #define SONY_LAPTOP_BUF_SIZE 128 struct sony_laptop_input_s { atomic_t users; struct input_dev *jog_dev; struct input_dev *key_dev; struct kfifo fifo; spinlock_t fifo_lock; struct timer_list release_key_timer; }; static struct sony_laptop_input_s sony_laptop_input = { .users = ATOMIC_INIT(0), }; struct sony_laptop_keypress { struct input_dev *dev; int key; }; /* Correspondance table between sonypi events * and input layer indexes in the keymap */ static int sony_laptop_input_index[] = { -1, /* 0 no event */ -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */ -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */ -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */ -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */ -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */ -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */ 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */ 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */ 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ 4, /* 11 SONYPI_EVENT_FNKEY_ESC */ 5, /* 12 SONYPI_EVENT_FNKEY_F1 */ 6, /* 13 SONYPI_EVENT_FNKEY_F2 */ 7, /* 14 SONYPI_EVENT_FNKEY_F3 */ 8, /* 15 SONYPI_EVENT_FNKEY_F4 */ 9, /* 16 SONYPI_EVENT_FNKEY_F5 */ 10, /* 17 SONYPI_EVENT_FNKEY_F6 */ 11, /* 18 SONYPI_EVENT_FNKEY_F7 */ 12, /* 19 SONYPI_EVENT_FNKEY_F8 */ 13, /* 20 SONYPI_EVENT_FNKEY_F9 */ 14, /* 21 SONYPI_EVENT_FNKEY_F10 */ 15, /* 22 SONYPI_EVENT_FNKEY_F11 */ 16, /* 23 SONYPI_EVENT_FNKEY_F12 */ 17, /* 24 SONYPI_EVENT_FNKEY_1 */ 18, /* 25 SONYPI_EVENT_FNKEY_2 */ 19, /* 26 SONYPI_EVENT_FNKEY_D */ 20, /* 27 SONYPI_EVENT_FNKEY_E */ 21, /* 28 SONYPI_EVENT_FNKEY_F */ 22, /* 29 SONYPI_EVENT_FNKEY_S */ 23, /* 30 SONYPI_EVENT_FNKEY_B */ 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */ 25, /* 32 SONYPI_EVENT_PKEY_P1 */ 26, /* 33 SONYPI_EVENT_PKEY_P2 */ 27, /* 34 SONYPI_EVENT_PKEY_P3 */ 28, /* 35 SONYPI_EVENT_BACK_PRESSED */ -1, /* 36 SONYPI_EVENT_LID_CLOSED */ -1, /* 37 SONYPI_EVENT_LID_OPENED */ 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */ 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */ 31, /* 40 SONYPI_EVENT_HELP_PRESSED */ 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */ 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */ 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */ 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */ 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */ 43, /* 52 SONYPI_EVENT_MEYE_FACE */ 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */ 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */ 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */ -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */ -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */ -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */ -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */ 47, /* 60 SONYPI_EVENT_WIRELESS_ON */ 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */ 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */ 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */ 51, /* 64 SONYPI_EVENT_CD_EJECT_PRESSED */ 52, /* 65 SONYPI_EVENT_MODEKEY_PRESSED */ 53, /* 66 SONYPI_EVENT_PKEY_P4 */ 54, /* 67 SONYPI_EVENT_PKEY_P5 */ 55, /* 68 SONYPI_EVENT_SETTINGKEY_PRESSED */ 56, /* 69 SONYPI_EVENT_VOLUME_INC_PRESSED */ 57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */ -1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */ 58, /* 72 SONYPI_EVENT_MEDIA_PRESSED */ 59, /* 72 SONYPI_EVENT_VENDOR_PRESSED */ }; static int sony_laptop_input_keycode_map[] = { KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */ KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */ KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */ KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */ KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */ KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */ KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */ KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */ KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */ KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */ KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */ KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */ KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */ KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */ KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */ KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */ KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */ KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */ KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */ KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */ KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */ KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */ KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */ KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */ KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */ KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */ KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */ KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */ BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */ KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */ KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */ KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */ KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */ KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */ KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */ KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */ KEY_ZOOMOUT, /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */ KEY_EJECTCD, /* 51 SONYPI_EVENT_CD_EJECT_PRESSED */ KEY_F13, /* 52 SONYPI_EVENT_MODEKEY_PRESSED */ KEY_PROG4, /* 53 SONYPI_EVENT_PKEY_P4 */ KEY_F14, /* 54 SONYPI_EVENT_PKEY_P5 */ KEY_F15, /* 55 SONYPI_EVENT_SETTINGKEY_PRESSED */ KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */ KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */ KEY_MEDIA, /* 58 SONYPI_EVENT_MEDIA_PRESSED */ KEY_VENDOR, /* 59 SONYPI_EVENT_VENDOR_PRESSED */ }; /* release buttons after a short delay if pressed */ static void do_sony_laptop_release_key(unsigned long unused) { struct sony_laptop_keypress kp; unsigned long flags; spin_lock_irqsave(&sony_laptop_input.fifo_lock, flags); if (kfifo_out(&sony_laptop_input.fifo, (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) { input_report_key(kp.dev, kp.key, 0); input_sync(kp.dev); } /* If there is something in the fifo schedule next release. */ if (kfifo_len(&sony_laptop_input.fifo) != 0) mod_timer(&sony_laptop_input.release_key_timer, jiffies + msecs_to_jiffies(10)); spin_unlock_irqrestore(&sony_laptop_input.fifo_lock, flags); } /* forward event to the input subsystem */ static void sony_laptop_report_input_event(u8 event) { struct input_dev *jog_dev = sony_laptop_input.jog_dev; struct input_dev *key_dev = sony_laptop_input.key_dev; struct sony_laptop_keypress kp = { NULL }; int scancode = -1; if (event == SONYPI_EVENT_FNKEY_RELEASED || event == SONYPI_EVENT_ANYBUTTON_RELEASED) { /* Nothing, not all VAIOs generate this event */ return; } /* report events */ switch (event) { /* jog_dev events */ case SONYPI_EVENT_JOGDIAL_UP: case SONYPI_EVENT_JOGDIAL_UP_PRESSED: input_report_rel(jog_dev, REL_WHEEL, 1); input_sync(jog_dev); return; case SONYPI_EVENT_JOGDIAL_DOWN: case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: input_report_rel(jog_dev, REL_WHEEL, -1); input_sync(jog_dev); return; /* key_dev events */ case SONYPI_EVENT_JOGDIAL_PRESSED: kp.key = BTN_MIDDLE; kp.dev = jog_dev; break; default: if (event >= ARRAY_SIZE(sony_laptop_input_index)) { dprintk("sony_laptop_report_input_event, event not known: %d\n", event); break; } if ((scancode = sony_laptop_input_index[event]) != -1) { kp.key = sony_laptop_input_keycode_map[scancode]; if (kp.key != KEY_UNKNOWN) kp.dev = key_dev; } break; } if (kp.dev) { /* if we have a scancode we emit it so we can always remap the key */ if (scancode != -1) input_event(kp.dev, EV_MSC, MSC_SCAN, scancode); input_report_key(kp.dev, kp.key, 1); input_sync(kp.dev); /* schedule key release */ kfifo_in_locked(&sony_laptop_input.fifo, (unsigned char *)&kp, sizeof(kp), &sony_laptop_input.fifo_lock); mod_timer(&sony_laptop_input.release_key_timer, jiffies + msecs_to_jiffies(10)); } else dprintk("unknown input event %.2x\n", event); } static int sony_laptop_setup_input(struct acpi_device *acpi_device) { struct input_dev *jog_dev; struct input_dev *key_dev; int i; int error; /* don't run again if already initialized */ if (atomic_add_return(1, &sony_laptop_input.users) > 1) return 0; /* kfifo */ spin_lock_init(&sony_laptop_input.fifo_lock); error = kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); if (error) { pr_err("kfifo_alloc failed\n"); goto err_dec_users; } setup_timer(&sony_laptop_input.release_key_timer, do_sony_laptop_release_key, 0); /* input keys */ key_dev = input_allocate_device(); if (!key_dev) { error = -ENOMEM; goto err_free_kfifo; } key_dev->name = "Sony Vaio Keys"; key_dev->id.bustype = BUS_ISA; key_dev->id.vendor = PCI_VENDOR_ID_SONY; key_dev->dev.parent = &acpi_device->dev; /* Initialize the Input Drivers: special keys */ input_set_capability(key_dev, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, key_dev->evbit); key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]); key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map); key_dev->keycode = &sony_laptop_input_keycode_map; for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) __set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit); __clear_bit(KEY_RESERVED, key_dev->keybit); error = input_register_device(key_dev); if (error) goto err_free_keydev; sony_laptop_input.key_dev = key_dev; /* jogdial */ jog_dev = input_allocate_device(); if (!jog_dev) { error = -ENOMEM; goto err_unregister_keydev; } jog_dev->name = "Sony Vaio Jogdial"; jog_dev->id.bustype = BUS_ISA; jog_dev->id.vendor = PCI_VENDOR_ID_SONY; jog_dev->dev.parent = &acpi_device->dev; input_set_capability(jog_dev, EV_KEY, BTN_MIDDLE); input_set_capability(jog_dev, EV_REL, REL_WHEEL); error = input_register_device(jog_dev); if (error) goto err_free_jogdev; sony_laptop_input.jog_dev = jog_dev; return 0; err_free_jogdev: input_free_device(jog_dev); err_unregister_keydev: input_unregister_device(key_dev); /* to avoid kref underflow below at input_free_device */ key_dev = NULL; err_free_keydev: input_free_device(key_dev); err_free_kfifo: kfifo_free(&sony_laptop_input.fifo); err_dec_users: atomic_dec(&sony_laptop_input.users); return error; } static void sony_laptop_remove_input(void) { struct sony_laptop_keypress kp = { NULL }; /* Cleanup only after the last user has gone */ if (!atomic_dec_and_test(&sony_laptop_input.users)) return; del_timer_sync(&sony_laptop_input.release_key_timer); /* * Generate key-up events for remaining keys. Note that we don't * need locking since nobody is adding new events to the kfifo. */ while (kfifo_out(&sony_laptop_input.fifo, (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) { input_report_key(kp.dev, kp.key, 0); input_sync(kp.dev); } /* destroy input devs */ input_unregister_device(sony_laptop_input.key_dev); sony_laptop_input.key_dev = NULL; if (sony_laptop_input.jog_dev) { input_unregister_device(sony_laptop_input.jog_dev); sony_laptop_input.jog_dev = NULL; } kfifo_free(&sony_laptop_input.fifo); } /*********** Platform Device ***********/ static atomic_t sony_pf_users = ATOMIC_INIT(0); static struct platform_driver sony_pf_driver = { .driver = { .name = "sony-laptop", } }; static struct platform_device *sony_pf_device; static int sony_pf_add(void) { int ret = 0; /* don't run again if already initialized */ if (atomic_add_return(1, &sony_pf_users) > 1) return 0; ret = platform_driver_register(&sony_pf_driver); if (ret) goto out; sony_pf_device = platform_device_alloc("sony-laptop", -1); if (!sony_pf_device) { ret = -ENOMEM; goto out_platform_registered; } ret = platform_device_add(sony_pf_device); if (ret) goto out_platform_alloced; return 0; out_platform_alloced: platform_device_put(sony_pf_device); sony_pf_device = NULL; out_platform_registered: platform_driver_unregister(&sony_pf_driver); out: atomic_dec(&sony_pf_users); return ret; } static void sony_pf_remove(void) { /* deregister only after the last user has gone */ if (!atomic_dec_and_test(&sony_pf_users)) return; platform_device_unregister(sony_pf_device); platform_driver_unregister(&sony_pf_driver); } /*********** SNC (SNY5001) Device ***********/ /* the device uses 1-based values, while the backlight subsystem uses 0-based values */ #define SONY_MAX_BRIGHTNESS 8 #define SNC_VALIDATE_IN 0 #define SNC_VALIDATE_OUT 1 static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *, char *); static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *, const char *, size_t); static int boolean_validate(const int, const int); static int brightness_default_validate(const int, const int); struct sony_nc_value { char *name; /* name of the entry */ char **acpiget; /* names of the ACPI get function */ char **acpiset; /* names of the ACPI set function */ int (*validate)(const int, const int); /* input/output validation */ int value; /* current setting */ int valid; /* Has ever been set */ int debug; /* active only in debug mode ? */ struct device_attribute devattr; /* sysfs attribute */ }; #define SNC_HANDLE_NAMES(_name, _values...) \ static char *snc_##_name[] = { _values, NULL } #define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \ { \ .name = __stringify(_name), \ .acpiget = _getters, \ .acpiset = _setters, \ .validate = _validate, \ .debug = _debug, \ .devattr = __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \ } #define SNC_HANDLE_NULL { .name = NULL } SNC_HANDLE_NAMES(fnkey_get, "GHKE"); SNC_HANDLE_NAMES(brightness_def_get, "GPBR"); SNC_HANDLE_NAMES(brightness_def_set, "SPBR"); SNC_HANDLE_NAMES(cdpower_get, "GCDP"); SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW"); SNC_HANDLE_NAMES(audiopower_get, "GAZP"); SNC_HANDLE_NAMES(audiopower_set, "AZPW"); SNC_HANDLE_NAMES(lanpower_get, "GLNP"); SNC_HANDLE_NAMES(lanpower_set, "LNPW"); SNC_HANDLE_NAMES(lidstate_get, "GLID"); SNC_HANDLE_NAMES(indicatorlamp_get, "GILS"); SNC_HANDLE_NAMES(indicatorlamp_set, "SILS"); SNC_HANDLE_NAMES(gainbass_get, "GMGB"); SNC_HANDLE_NAMES(gainbass_set, "CMGB"); SNC_HANDLE_NAMES(PID_get, "GPID"); SNC_HANDLE_NAMES(CTR_get, "GCTR"); SNC_HANDLE_NAMES(CTR_set, "SCTR"); SNC_HANDLE_NAMES(PCR_get, "GPCR"); SNC_HANDLE_NAMES(PCR_set, "SPCR"); SNC_HANDLE_NAMES(CMI_get, "GCMI"); SNC_HANDLE_NAMES(CMI_set, "SCMI"); static struct sony_nc_value sony_nc_values[] = { SNC_HANDLE(brightness_default, snc_brightness_def_get, snc_brightness_def_set, brightness_default_validate, 0), SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0), SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0), SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set, boolean_validate, 0), SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set, boolean_validate, 1), SNC_HANDLE(lidstate, snc_lidstate_get, NULL, boolean_validate, 0), SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set, boolean_validate, 0), SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set, boolean_validate, 0), /* unknown methods */ SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1), SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1), SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1), SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1), SNC_HANDLE_NULL }; static acpi_handle sony_nc_acpi_handle; static struct acpi_device *sony_nc_acpi_device = NULL; /* * acpi_evaluate_object wrappers * all useful calls into SNC methods take one or zero parameters and return * integers or arrays. */ static union acpi_object *__call_snc_method(acpi_handle handle, char *method, u64 *value) { union acpi_object *result = NULL; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; if (value) { struct acpi_object_list params; union acpi_object in; in.type = ACPI_TYPE_INTEGER; in.integer.value = *value; params.count = 1; params.pointer = &in; status = acpi_evaluate_object(handle, method, &params, &output); dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method, (unsigned int)(*value >> 32), (unsigned int)*value & 0xffffffff); } else { status = acpi_evaluate_object(handle, method, NULL, &output); dprintk("__call_snc_method: [%s]\n", method); } if (ACPI_FAILURE(status)) { pr_err("Failed to evaluate [%s]\n", method); return NULL; } result = (union acpi_object *) output.pointer; if (!result) dprintk("No return object [%s]\n", method); return result; } static int sony_nc_int_call(acpi_handle handle, char *name, int *value, int *result) { union acpi_object *object = NULL; if (value) { u64 v = *value; object = __call_snc_method(handle, name, &v); } else object = __call_snc_method(handle, name, NULL); if (!object) return -EINVAL; if (object->type != ACPI_TYPE_INTEGER) { pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n", ACPI_TYPE_INTEGER, object->type); kfree(object); return -EINVAL; } if (result) *result = object->integer.value; kfree(object); return 0; } #define MIN(a, b) (a > b ? b : a) static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value, void *buffer, size_t buflen) { int ret = 0; size_t len; union acpi_object *object = __call_snc_method(handle, name, value); if (!object) return -EINVAL; if (object->type == ACPI_TYPE_BUFFER) { len = MIN(buflen, object->buffer.length); memcpy(buffer, object->buffer.pointer, len); } else if (object->type == ACPI_TYPE_INTEGER) { len = MIN(buflen, sizeof(object->integer.value)); memcpy(buffer, &object->integer.value, len); } else { pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n", ACPI_TYPE_BUFFER, object->type); ret = -EINVAL; } kfree(object); return ret; } struct sony_nc_handles { u16 cap[0x10]; struct device_attribute devattr; }; static struct sony_nc_handles *handles; static ssize_t sony_nc_handles_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t len = 0; int i; for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ", handles->cap[i]); } len += snprintf(buffer + len, PAGE_SIZE - len, "\n"); return len; } static int sony_nc_handles_setup(struct platform_device *pd) { int i, r, result, arg; handles = kzalloc(sizeof(*handles), GFP_KERNEL); if (!handles) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { arg = i + 0x20; r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &result); if (!r) { dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", result, i); handles->cap[i] = result; } } if (debug) { sysfs_attr_init(&handles->devattr.attr); handles->devattr.attr.name = "handles"; handles->devattr.attr.mode = S_IRUGO; handles->devattr.show = sony_nc_handles_show; /* allow reading capabilities via sysfs */ if (device_create_file(&pd->dev, &handles->devattr)) { kfree(handles); handles = NULL; return -1; } } return 0; } static int sony_nc_handles_cleanup(struct platform_device *pd) { if (handles) { if (debug) device_remove_file(&pd->dev, &handles->devattr); kfree(handles); handles = NULL; } return 0; } static int sony_find_snc_handle(int handle) { int i; /* not initialized yet, return early */ if (!handles || !handle) return -EINVAL; for (i = 0; i < 0x10; i++) { if (handles->cap[i] == handle) { dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", handle, i); return i; } } dprintk("handle 0x%.4x not found\n", handle); return -EINVAL; } static int sony_call_snc_handle(int handle, int argument, int *result) { int arg, ret = 0; int offset = sony_find_snc_handle(handle); if (offset < 0) return offset; arg = offset | argument; ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result); dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result); return ret; } /* * sony_nc_values input/output validate functions */ /* brightness_default_validate: * * manipulate input output values to keep consistency with the * backlight framework for which brightness values are 0-based. */ static int brightness_default_validate(const int direction, const int value) { switch (direction) { case SNC_VALIDATE_OUT: return value - 1; case SNC_VALIDATE_IN: if (value >= 0 && value < SONY_MAX_BRIGHTNESS) return value + 1; } return -EINVAL; } /* boolean_validate: * * on input validate boolean values 0/1, on output just pass the * received value. */ static int boolean_validate(const int direction, const int value) { if (direction == SNC_VALIDATE_IN) { if (value != 0 && value != 1) return -EINVAL; } return value; } /* * Sysfs show/store common to all sony_nc_values */ static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, char *buffer) { int value, ret = 0; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); if (!*item->acpiget) return -EIO; ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL, &value); if (ret < 0) return -EIO; if (item->validate) value = item->validate(SNC_VALIDATE_OUT, value); return snprintf(buffer, PAGE_SIZE, "%d\n", value); } static ssize_t sony_nc_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { int value; int ret = 0; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); if (!item->acpiset) return -EIO; if (count > 31) return -EINVAL; if (kstrtoint(buffer, 10, &value)) return -EINVAL; if (item->validate) value = item->validate(SNC_VALIDATE_IN, value); if (value < 0) return value; ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset, &value, NULL); if (ret < 0) return -EIO; item->value = value; item->valid = 1; return count; } /* * Backlight device */ struct sony_backlight_props { struct backlight_device *dev; int handle; int cmd_base; u8 offset; u8 maxlvl; }; static struct sony_backlight_props sony_bl_props; static int sony_backlight_update_status(struct backlight_device *bd) { int arg = bd->props.brightness + 1; return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL); } static int sony_backlight_get_brightness(struct backlight_device *bd) { int value; if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value)) return 0; /* brightness levels are 1-based, while backlight ones are 0-based */ return value - 1; } static int sony_nc_get_brightness_ng(struct backlight_device *bd) { int result; struct sony_backlight_props *sdev = (struct sony_backlight_props *)bl_get_data(bd); sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result); return (result & 0xff) - sdev->offset; } static int sony_nc_update_status_ng(struct backlight_device *bd) { int value, result; struct sony_backlight_props *sdev = (struct sony_backlight_props *)bl_get_data(bd); value = bd->props.brightness + sdev->offset; if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10), &result)) return -EIO; return value; } static const struct backlight_ops sony_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = sony_backlight_update_status, .get_brightness = sony_backlight_get_brightness, }; static const struct backlight_ops sony_backlight_ng_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = sony_nc_update_status_ng, .get_brightness = sony_nc_get_brightness_ng, }; /* * New SNC-only Vaios event mapping to driver known keys */ struct sony_nc_event { u8 data; u8 event; }; static struct sony_nc_event sony_100_events[] = { { 0x90, SONYPI_EVENT_PKEY_P1 }, { 0x10, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x91, SONYPI_EVENT_PKEY_P2 }, { 0x11, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x81, SONYPI_EVENT_FNKEY_F1 }, { 0x01, SONYPI_EVENT_FNKEY_RELEASED }, { 0x82, SONYPI_EVENT_FNKEY_F2 }, { 0x02, SONYPI_EVENT_FNKEY_RELEASED }, { 0x83, SONYPI_EVENT_FNKEY_F3 }, { 0x03, SONYPI_EVENT_FNKEY_RELEASED }, { 0x84, SONYPI_EVENT_FNKEY_F4 }, { 0x04, SONYPI_EVENT_FNKEY_RELEASED }, { 0x85, SONYPI_EVENT_FNKEY_F5 }, { 0x05, SONYPI_EVENT_FNKEY_RELEASED }, { 0x86, SONYPI_EVENT_FNKEY_F6 }, { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, { 0x87, SONYPI_EVENT_FNKEY_F7 }, { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, { 0x88, SONYPI_EVENT_FNKEY_F8 }, { 0x08, SONYPI_EVENT_FNKEY_RELEASED }, { 0x89, SONYPI_EVENT_FNKEY_F9 }, { 0x09, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8A, SONYPI_EVENT_FNKEY_F10 }, { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8B, SONYPI_EVENT_FNKEY_F11 }, { 0x0B, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8C, SONYPI_EVENT_FNKEY_F12 }, { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, { 0x9d, SONYPI_EVENT_ZOOM_PRESSED }, { 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED }, { 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0xa1, SONYPI_EVENT_MEDIA_PRESSED }, { 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED }, { 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0xa5, SONYPI_EVENT_VENDOR_PRESSED }, { 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0xa6, SONYPI_EVENT_HELP_PRESSED }, { 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0xa8, SONYPI_EVENT_FNKEY_1 }, { 0x28, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 }, }; static struct sony_nc_event sony_127_events[] = { { 0x81, SONYPI_EVENT_MODEKEY_PRESSED }, { 0x01, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x82, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x83, SONYPI_EVENT_PKEY_P2 }, { 0x03, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x84, SONYPI_EVENT_PKEY_P3 }, { 0x04, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x85, SONYPI_EVENT_PKEY_P4 }, { 0x05, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x86, SONYPI_EVENT_PKEY_P5 }, { 0x06, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x87, SONYPI_EVENT_SETTINGKEY_PRESSED }, { 0x07, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 }, }; static int sony_nc_hotkeys_decode(u32 event, unsigned int handle) { int ret = -EINVAL; unsigned int result = 0; struct sony_nc_event *key_event; if (sony_call_snc_handle(handle, 0x200, &result)) { dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle, event); return -EINVAL; } result &= 0xFF; if (handle == 0x0100) key_event = sony_100_events; else key_event = sony_127_events; for (; key_event->data; key_event++) { if (key_event->data == result) { ret = key_event->event; break; } } if (!key_event->data) pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n", event, result, handle); return ret; } /* * ACPI callbacks */ enum event_types { HOTKEY = 1, KILLSWITCH, GFX_SWITCH }; static void sony_nc_notify(struct acpi_device *device, u32 event) { u32 real_ev = event; u8 ev_type = 0; dprintk("sony_nc_notify, event: 0x%.2x\n", event); if (event >= 0x90) { unsigned int result = 0; unsigned int arg = 0; unsigned int handle = 0; unsigned int offset = event - 0x90; if (offset >= ARRAY_SIZE(handles->cap)) { pr_err("Event 0x%x outside of capabilities list\n", event); return; } handle = handles->cap[offset]; /* list of handles known for generating events */ switch (handle) { /* hotkey event */ case 0x0100: case 0x0127: ev_type = HOTKEY; real_ev = sony_nc_hotkeys_decode(event, handle); if (real_ev > 0) sony_laptop_report_input_event(real_ev); else /* restore the original event for reporting */ real_ev = event; break; /* wlan switch */ case 0x0124: case 0x0135: /* events on this handle are reported when the * switch changes position or for battery * events. We'll notify both of them but only * update the rfkill device status when the * switch is moved. */ ev_type = KILLSWITCH; sony_call_snc_handle(handle, 0x0100, &result); real_ev = result & 0x03; /* hw switch event */ if (real_ev == 1) sony_nc_rfkill_update(); break; case 0x0128: case 0x0146: /* Hybrid GFX switching */ sony_call_snc_handle(handle, 0x0000, &result); dprintk("GFX switch event received (reason: %s)\n", (result == 0x1) ? "switch change" : (result == 0x2) ? "output switch" : (result == 0x3) ? "output switch" : ""); ev_type = GFX_SWITCH; real_ev = __sony_nc_gfx_switch_status_get(); break; case 0x015B: /* Hybrid GFX switching SVS151290S */ ev_type = GFX_SWITCH; real_ev = __sony_nc_gfx_switch_status_get(); break; default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); break; } /* clear the event (and the event reason when present) */ arg = 1 << offset; sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result); } else { /* old style event */ ev_type = HOTKEY; sony_laptop_report_input_event(real_ev); } acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class, dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev); } static acpi_status sony_walk_callback(acpi_handle handle, u32 level, void *context, void **return_value) { struct acpi_device_info *info; if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { pr_warn("method: name: %4.4s, args %X\n", (char *)&info->name, info->param_count); kfree(info); } return AE_OK; } /* * ACPI device */ static void sony_nc_function_setup(struct acpi_device *device, struct platform_device *pf_device) { unsigned int i, result, bitmask, arg; if (!handles) return; /* setup found handles here */ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { unsigned int handle = handles->cap[i]; if (!handle) continue; dprintk("setting up handle 0x%.4x\n", handle); switch (handle) { case 0x0100: case 0x0101: case 0x0127: /* setup hotkeys */ sony_call_snc_handle(handle, 0, &result); break; case 0x0102: /* setup hotkeys */ sony_call_snc_handle(handle, 0x100, &result); break; case 0x0105: case 0x0148: /* touchpad enable/disable */ result = sony_nc_touchpad_setup(pf_device, handle); if (result) pr_err("couldn't set up touchpad control function (%d)\n", result); break; case 0x0115: case 0x0136: case 0x013f: result = sony_nc_battery_care_setup(pf_device, handle); if (result) pr_err("couldn't set up battery care function (%d)\n", result); break; case 0x0119: case 0x015D: result = sony_nc_lid_resume_setup(pf_device, handle); if (result) pr_err("couldn't set up lid resume function (%d)\n", result); break; case 0x0122: result = sony_nc_thermal_setup(pf_device); if (result) pr_err("couldn't set up thermal profile function (%d)\n", result); break; case 0x0128: case 0x0146: case 0x015B: result = sony_nc_gfx_switch_setup(pf_device, handle); if (result) pr_err("couldn't set up GFX Switch status (%d)\n", result); break; case 0x0131: result = sony_nc_highspeed_charging_setup(pf_device); if (result) pr_err("couldn't set up high speed charging function (%d)\n", result); break; case 0x0124: case 0x0135: result = sony_nc_rfkill_setup(device, handle); if (result) pr_err("couldn't set up rfkill support (%d)\n", result); break; case 0x0137: case 0x0143: case 0x014b: case 0x014c: case 0x0163: result = sony_nc_kbd_backlight_setup(pf_device, handle); if (result) pr_err("couldn't set up keyboard backlight function (%d)\n", result); break; case 0x0121: result = sony_nc_lowbatt_setup(pf_device); if (result) pr_err("couldn't set up low battery function (%d)\n", result); break; case 0x0149: result = sony_nc_fanspeed_setup(pf_device); if (result) pr_err("couldn't set up fan speed function (%d)\n", result); break; case 0x0155: result = sony_nc_usb_charge_setup(pf_device); if (result) pr_err("couldn't set up USB charge support (%d)\n", result); break; case 0x011D: result = sony_nc_panelid_setup(pf_device); if (result) pr_err("couldn't set up panel ID function (%d)\n", result); break; case 0x0168: result = sony_nc_smart_conn_setup(pf_device); if (result) pr_err("couldn't set up smart connect support (%d)\n", result); break; default: continue; } } /* Enable all events */ arg = 0x10; if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask)) sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask, &result); } static void sony_nc_function_cleanup(struct platform_device *pd) { unsigned int i, result, bitmask, handle; /* get enabled events and disable them */ sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); /* cleanup handles here */ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { handle = handles->cap[i]; if (!handle) continue; switch (handle) { case 0x0105: case 0x0148: sony_nc_touchpad_cleanup(pd); break; case 0x0115: case 0x0136: case 0x013f: sony_nc_battery_care_cleanup(pd); break; case 0x0119: case 0x015D: sony_nc_lid_resume_cleanup(pd); break; case 0x0122: sony_nc_thermal_cleanup(pd); break; case 0x0128: case 0x0146: case 0x015B: sony_nc_gfx_switch_cleanup(pd); break; case 0x0131: sony_nc_highspeed_charging_cleanup(pd); break; case 0x0124: case 0x0135: sony_nc_rfkill_cleanup(); break; case 0x0137: case 0x0143: case 0x014b: case 0x014c: case 0x0163: sony_nc_kbd_backlight_cleanup(pd, handle); break; case 0x0121: sony_nc_lowbatt_cleanup(pd); break; case 0x0149: sony_nc_fanspeed_cleanup(pd); break; case 0x0155: sony_nc_usb_charge_cleanup(pd); break; case 0x011D: sony_nc_panelid_cleanup(pd); break; case 0x0168: sony_nc_smart_conn_cleanup(pd); break; default: continue; } } /* finally cleanup the handles list */ sony_nc_handles_cleanup(pd); } #ifdef CONFIG_PM_SLEEP static void sony_nc_function_resume(void) { unsigned int i, result, bitmask, arg; dprintk("Resuming SNC device\n"); for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { unsigned int handle = handles->cap[i]; if (!handle) continue; switch (handle) { case 0x0100: case 0x0101: case 0x0127: /* re-enable hotkeys */ sony_call_snc_handle(handle, 0, &result); break; case 0x0102: /* re-enable hotkeys */ sony_call_snc_handle(handle, 0x100, &result); break; case 0x0122: sony_nc_thermal_resume(); break; case 0x0124: case 0x0135: sony_nc_rfkill_update(); break; default: continue; } } /* Enable all events */ arg = 0x10; if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask)) sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask, &result); } static int sony_nc_resume(struct device *dev) { struct sony_nc_value *item; for (item = sony_nc_values; item->name; item++) { int ret; if (!item->valid) continue; ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset, &item->value, NULL); if (ret < 0) { pr_err("%s: %d\n", __func__, ret); break; } } if (acpi_has_method(sony_nc_acpi_handle, "ECON")) { int arg = 1; if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) dprintk("ECON Method failed\n"); } if (acpi_has_method(sony_nc_acpi_handle, "SN00")) sony_nc_function_resume(); return 0; } #endif static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); static void sony_nc_rfkill_cleanup(void) { int i; for (i = 0; i < N_SONY_RFKILL; i++) { if (sony_rfkill_devices[i]) { rfkill_unregister(sony_rfkill_devices[i]); rfkill_destroy(sony_rfkill_devices[i]); } } } static int sony_nc_rfkill_set(void *data, bool blocked) { int result; int argument = sony_rfkill_address[(long) data] + 0x100; if (!blocked) argument |= 0x070000; return sony_call_snc_handle(sony_rfkill_handle, argument, &result); } static const struct rfkill_ops sony_rfkill_ops = { .set_block = sony_nc_rfkill_set, }; static int sony_nc_setup_rfkill(struct acpi_device *device, enum sony_nc_rfkill nc_type) { int err = 0; struct rfkill *rfk; enum rfkill_type type; const char *name; int result; bool hwblock, swblock; switch (nc_type) { case SONY_WIFI: type = RFKILL_TYPE_WLAN; name = "sony-wifi"; break; case SONY_BLUETOOTH: type = RFKILL_TYPE_BLUETOOTH; name = "sony-bluetooth"; break; case SONY_WWAN: type = RFKILL_TYPE_WWAN; name = "sony-wwan"; break; case SONY_WIMAX: type = RFKILL_TYPE_WIMAX; name = "sony-wimax"; break; default: return -EINVAL; } rfk = rfkill_alloc(name, &device->dev, type, &sony_rfkill_ops, (void *)nc_type); if (!rfk) return -ENOMEM; if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) { rfkill_destroy(rfk); return -1; } hwblock = !(result & 0x1); if (sony_call_snc_handle(sony_rfkill_handle, sony_rfkill_address[nc_type], &result) < 0) { rfkill_destroy(rfk); return -1; } swblock = !(result & 0x2); rfkill_init_sw_state(rfk, swblock); rfkill_set_hw_state(rfk, hwblock); err = rfkill_register(rfk); if (err) { rfkill_destroy(rfk); return err; } sony_rfkill_devices[nc_type] = rfk; return err; } static void sony_nc_rfkill_update(void) { enum sony_nc_rfkill i; int result; bool hwblock; sony_call_snc_handle(sony_rfkill_handle, 0x200, &result); hwblock = !(result & 0x1); for (i = 0; i < N_SONY_RFKILL; i++) { int argument = sony_rfkill_address[i]; if (!sony_rfkill_devices[i]) continue; if (hwblock) { if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) { /* we already know we're blocked */ } continue; } sony_call_snc_handle(sony_rfkill_handle, argument, &result); rfkill_set_states(sony_rfkill_devices[i], !(result & 0x2), false); } } static int sony_nc_rfkill_setup(struct acpi_device *device, unsigned int handle) { u64 offset; int i; unsigned char buffer[32] = { 0 }; offset = sony_find_snc_handle(handle); sony_rfkill_handle = handle; i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer, 32); if (i < 0) return i; /* The buffer is filled with magic numbers describing the devices * available, 0xff terminates the enumeration. * Known codes: * 0x00 WLAN * 0x10 BLUETOOTH * 0x20 WWAN GPRS-EDGE * 0x21 WWAN HSDPA * 0x22 WWAN EV-DO * 0x23 WWAN GPS * 0x25 Gobi WWAN no GPS * 0x26 Gobi WWAN + GPS * 0x28 Gobi WWAN no GPS * 0x29 Gobi WWAN + GPS * 0x30 WIMAX * 0x50 Gobi WWAN no GPS * 0x51 Gobi WWAN + GPS * 0x70 no SIM card slot * 0x71 SIM card slot */ for (i = 0; i < ARRAY_SIZE(buffer); i++) { if (buffer[i] == 0xff) break; dprintk("Radio devices, found 0x%.2x\n", buffer[i]); if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI]) sony_nc_setup_rfkill(device, SONY_WIFI); if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH]) sony_nc_setup_rfkill(device, SONY_BLUETOOTH); if (((0xf0 & buffer[i]) == 0x20 || (0xf0 & buffer[i]) == 0x50) && !sony_rfkill_devices[SONY_WWAN]) sony_nc_setup_rfkill(device, SONY_WWAN); if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX]) sony_nc_setup_rfkill(device, SONY_WIMAX); } return 0; } /* Keyboard backlight feature */ struct kbd_backlight { unsigned int handle; unsigned int base; unsigned int mode; unsigned int timeout; struct device_attribute mode_attr; struct device_attribute timeout_attr; }; static struct kbd_backlight *kbdbl_ctl; static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) { int result; if (value > 2) return -EINVAL; if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) | (kbdbl_ctl->base), &result)) return -EIO; /* Try to turn the light on/off immediately */ if (value != 1) sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x0f) | (kbdbl_ctl->base + 0x100), &result); kbdbl_ctl->mode = value; return 0; } static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { int ret = 0; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; ret = __sony_nc_kbd_backlight_mode_set(value); if (ret < 0) return ret; return count; } static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode); return count; } static int __sony_nc_kbd_backlight_timeout_set(u8 value) { int result; if (value > 3) return -EINVAL; if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) | (kbdbl_ctl->base + 0x200), &result)) return -EIO; kbdbl_ctl->timeout = value; return 0; } static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { int ret = 0; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; ret = __sony_nc_kbd_backlight_timeout_set(value); if (ret < 0) return ret; return count; } static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout); return count; } static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle) { int result; int ret = 0; if (kbdbl_ctl) { pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n", handle, kbdbl_ctl->handle); return -EBUSY; } /* verify the kbd backlight presence, these handles are not used for * keyboard backlight only */ ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100, &result); if (ret) return ret; if ((handle == 0x0137 && !(result & 0x02)) || !(result & 0x01)) { dprintk("no backlight keyboard found\n"); return 0; } kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL); if (!kbdbl_ctl) return -ENOMEM; kbdbl_ctl->mode = kbd_backlight; kbdbl_ctl->timeout = kbd_backlight_timeout; kbdbl_ctl->handle = handle; if (handle == 0x0137) kbdbl_ctl->base = 0x0C00; else kbdbl_ctl->base = 0x4000; sysfs_attr_init(&kbdbl_ctl->mode_attr.attr); kbdbl_ctl->mode_attr.attr.name = "kbd_backlight"; kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR; kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show; kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store; sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr); kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout"; kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr); if (ret) goto outkzalloc; ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr); if (ret) goto outmode; __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode); __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout); return 0; outmode: device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); outkzalloc: kfree(kbdbl_ctl); kbdbl_ctl = NULL; return ret; } static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd, unsigned int handle) { if (kbdbl_ctl && handle == kbdbl_ctl->handle) { device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); kfree(kbdbl_ctl); kbdbl_ctl = NULL; } } struct battery_care_control { struct device_attribute attrs[2]; unsigned int handle; }; static struct battery_care_control *bcare_ctl; static ssize_t sony_nc_battery_care_limit_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result, cmd; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; /* limit values (2 bits): * 00 - none * 01 - 80% * 10 - 50% * 11 - 100% * * bit 0: 0 disable BCL, 1 enable BCL * bit 1: 1 tell to store the battery limit (see bits 6,7) too * bits 2,3: reserved * bits 4,5: store the limit into the EC * bits 6,7: store the limit into the battery */ cmd = 0; if (value > 0) { if (value <= 50) cmd = 0x20; else if (value <= 80) cmd = 0x10; else if (value <= 100) cmd = 0x30; else return -EINVAL; /* * handle 0x0115 should allow storing on battery too; * handle 0x0136 same as 0x0115 + health status; * handle 0x013f, same as 0x0136 but no storing on the battery */ if (bcare_ctl->handle != 0x013f) cmd = cmd | (cmd << 2); cmd = (cmd | 0x1) << 0x10; } if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result)) return -EIO; return count; } static ssize_t sony_nc_battery_care_limit_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result, status; if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result)) return -EIO; status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0; switch (status) { case 1: status = 80; break; case 2: status = 50; break; case 3: status = 100; break; default: status = 0; break; } return snprintf(buffer, PAGE_SIZE, "%d\n", status); } static ssize_t sony_nc_battery_care_health_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; unsigned int health; if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health)) return -EIO; count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff); return count; } static int sony_nc_battery_care_setup(struct platform_device *pd, unsigned int handle) { int ret = 0; bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL); if (!bcare_ctl) return -ENOMEM; bcare_ctl->handle = handle; sysfs_attr_init(&bcare_ctl->attrs[0].attr); bcare_ctl->attrs[0].attr.name = "battery_care_limiter"; bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR; bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show; bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store; ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]); if (ret) goto outkzalloc; /* 0x0115 is for models with no health reporting capability */ if (handle == 0x0115) return 0; sysfs_attr_init(&bcare_ctl->attrs[1].attr); bcare_ctl->attrs[1].attr.name = "battery_care_health"; bcare_ctl->attrs[1].attr.mode = S_IRUGO; bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show; ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]); if (ret) goto outlimiter; return 0; outlimiter: device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); outkzalloc: kfree(bcare_ctl); bcare_ctl = NULL; return ret; } static void sony_nc_battery_care_cleanup(struct platform_device *pd) { if (bcare_ctl) { device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); if (bcare_ctl->handle != 0x0115) device_remove_file(&pd->dev, &bcare_ctl->attrs[1]); kfree(bcare_ctl); bcare_ctl = NULL; } } struct snc_thermal_ctrl { unsigned int mode; unsigned int profiles; struct device_attribute mode_attr; struct device_attribute profiles_attr; }; static struct snc_thermal_ctrl *th_handle; #define THM_PROFILE_MAX 3 static const char * const snc_thermal_profiles[] = { "balanced", "silent", "performance" }; static int sony_nc_thermal_mode_set(unsigned short mode) { unsigned int result; /* the thermal profile seems to be a two bit bitmask: * lsb -> silent * msb -> performance * no bit set is the normal operation and is always valid * Some vaio models only have "balanced" and "performance" */ if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX) return -EINVAL; if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result)) return -EIO; th_handle->mode = mode; return 0; } static int sony_nc_thermal_mode_get(void) { unsigned int result; if (sony_call_snc_handle(0x0122, 0x0100, &result)) return -EIO; return result & 0xff; } static ssize_t sony_nc_thermal_profiles_show(struct device *dev, struct device_attribute *attr, char *buffer) { short cnt; size_t idx = 0; for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) { if (!cnt || (th_handle->profiles & cnt)) idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ", snc_thermal_profiles[cnt]); } idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n"); return idx; } static ssize_t sony_nc_thermal_mode_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned short cmd; size_t len = count; if (count == 0) return -EINVAL; /* skip the newline if present */ if (buffer[len - 1] == '\n') len--; for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++) if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0) break; if (sony_nc_thermal_mode_set(cmd)) return -EIO; return count; } static ssize_t sony_nc_thermal_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; int mode = sony_nc_thermal_mode_get(); if (mode < 0) return mode; count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]); return count; } static int sony_nc_thermal_setup(struct platform_device *pd) { int ret = 0; th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL); if (!th_handle) return -ENOMEM; ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles); if (ret) { pr_warn("couldn't to read the thermal profiles\n"); goto outkzalloc; } ret = sony_nc_thermal_mode_get(); if (ret < 0) { pr_warn("couldn't to read the current thermal profile"); goto outkzalloc; } th_handle->mode = ret; sysfs_attr_init(&th_handle->profiles_attr.attr); th_handle->profiles_attr.attr.name = "thermal_profiles"; th_handle->profiles_attr.attr.mode = S_IRUGO; th_handle->profiles_attr.show = sony_nc_thermal_profiles_show; sysfs_attr_init(&th_handle->mode_attr.attr); th_handle->mode_attr.attr.name = "thermal_control"; th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; th_handle->mode_attr.show = sony_nc_thermal_mode_show; th_handle->mode_attr.store = sony_nc_thermal_mode_store; ret = device_create_file(&pd->dev, &th_handle->profiles_attr); if (ret) goto outkzalloc; ret = device_create_file(&pd->dev, &th_handle->mode_attr); if (ret) goto outprofiles; return 0; outprofiles: device_remove_file(&pd->dev, &th_handle->profiles_attr); outkzalloc: kfree(th_handle); th_handle = NULL; return ret; } static void sony_nc_thermal_cleanup(struct platform_device *pd) { if (th_handle) { device_remove_file(&pd->dev, &th_handle->profiles_attr); device_remove_file(&pd->dev, &th_handle->mode_attr); kfree(th_handle); th_handle = NULL; } } #ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void) { unsigned int status = sony_nc_thermal_mode_get(); if (status != th_handle->mode) sony_nc_thermal_mode_set(th_handle->mode); } #endif /* resume on LID open */ #define LID_RESUME_S5 0 #define LID_RESUME_S4 1 #define LID_RESUME_S3 2 #define LID_RESUME_MAX 3 struct snc_lid_resume_control { struct device_attribute attrs[LID_RESUME_MAX]; unsigned int status; int handle; }; static struct snc_lid_resume_control *lid_ctl; static ssize_t sony_nc_lid_resume_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; unsigned int pos = LID_RESUME_S5; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; /* the value we have to write to SNC is a bitmask: * +--------------+ * | S3 | S4 | S5 | * +--------------+ * 2 1 0 */ while (pos < LID_RESUME_MAX) { if (&lid_ctl->attrs[pos].attr == &attr->attr) break; pos++; } if (pos == LID_RESUME_MAX) return -EINVAL; if (value) value = lid_ctl->status | (1 << pos); else value = lid_ctl->status & ~(1 << pos); if (sony_call_snc_handle(lid_ctl->handle, value << 0x10 | 0x0100, &result)) return -EIO; lid_ctl->status = value; return count; } static ssize_t sony_nc_lid_resume_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int pos = LID_RESUME_S5; while (pos < LID_RESUME_MAX) { if (&lid_ctl->attrs[pos].attr == &attr->attr) return snprintf(buffer, PAGE_SIZE, "%d\n", (lid_ctl->status >> pos) & 0x01); pos++; } return -EINVAL; } static int sony_nc_lid_resume_setup(struct platform_device *pd, unsigned int handle) { unsigned int result; int i; if (sony_call_snc_handle(handle, 0x0000, &result)) return -EIO; lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL); if (!lid_ctl) return -ENOMEM; lid_ctl->status = result & 0x7; lid_ctl->handle = handle; sysfs_attr_init(&lid_ctl->attrs[0].attr); lid_ctl->attrs[LID_RESUME_S5].attr.name = "lid_resume_S5"; lid_ctl->attrs[LID_RESUME_S5].attr.mode = S_IRUGO | S_IWUSR; lid_ctl->attrs[LID_RESUME_S5].show = sony_nc_lid_resume_show; lid_ctl->attrs[LID_RESUME_S5].store = sony_nc_lid_resume_store; if (handle == 0x0119) { sysfs_attr_init(&lid_ctl->attrs[1].attr); lid_ctl->attrs[LID_RESUME_S4].attr.name = "lid_resume_S4"; lid_ctl->attrs[LID_RESUME_S4].attr.mode = S_IRUGO | S_IWUSR; lid_ctl->attrs[LID_RESUME_S4].show = sony_nc_lid_resume_show; lid_ctl->attrs[LID_RESUME_S4].store = sony_nc_lid_resume_store; sysfs_attr_init(&lid_ctl->attrs[2].attr); lid_ctl->attrs[LID_RESUME_S3].attr.name = "lid_resume_S3"; lid_ctl->attrs[LID_RESUME_S3].attr.mode = S_IRUGO | S_IWUSR; lid_ctl->attrs[LID_RESUME_S3].show = sony_nc_lid_resume_show; lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store; } for (i = 0; i < LID_RESUME_MAX && lid_ctl->attrs[i].attr.name; i++) { result = device_create_file(&pd->dev, &lid_ctl->attrs[i]); if (result) goto liderror; } return 0; liderror: for (i--; i >= 0; i--) device_remove_file(&pd->dev, &lid_ctl->attrs[i]); kfree(lid_ctl); lid_ctl = NULL; return result; } static void sony_nc_lid_resume_cleanup(struct platform_device *pd) { int i; if (lid_ctl) { for (i = 0; i < LID_RESUME_MAX; i++) { if (!lid_ctl->attrs[i].attr.name) break; device_remove_file(&pd->dev, &lid_ctl->attrs[i]); } kfree(lid_ctl); lid_ctl = NULL; } } /* GFX Switch position */ enum gfx_switch { SPEED, STAMINA, AUTO }; struct snc_gfx_switch_control { struct device_attribute attr; unsigned int handle; }; static struct snc_gfx_switch_control *gfxs_ctl; /* returns 0 for speed, 1 for stamina */ static int __sony_nc_gfx_switch_status_get(void) { unsigned int result; if (sony_call_snc_handle(gfxs_ctl->handle, gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100, &result)) return -EIO; switch (gfxs_ctl->handle) { case 0x0146: /* 1: discrete GFX (speed) * 0: integrated GFX (stamina) */ return result & 0x1 ? SPEED : STAMINA; break; case 0x015B: /* 0: discrete GFX (speed) * 1: integrated GFX (stamina) */ return result & 0x1 ? STAMINA : SPEED; break; case 0x0128: /* it's a more elaborated bitmask, for now: * 2: integrated GFX (stamina) * 0: discrete GFX (speed) */ dprintk("GFX Status: 0x%x\n", result); return result & 0x80 ? AUTO : result & 0x02 ? STAMINA : SPEED; break; } return -EINVAL; } static ssize_t sony_nc_gfx_switch_status_show(struct device *dev, struct device_attribute *attr, char *buffer) { int pos = __sony_nc_gfx_switch_status_get(); if (pos < 0) return pos; return snprintf(buffer, PAGE_SIZE, "%s\n", pos == SPEED ? "speed" : pos == STAMINA ? "stamina" : pos == AUTO ? "auto" : "unknown"); } static int sony_nc_gfx_switch_setup(struct platform_device *pd, unsigned int handle) { unsigned int result; gfxs_ctl = kzalloc(sizeof(struct snc_gfx_switch_control), GFP_KERNEL); if (!gfxs_ctl) return -ENOMEM; gfxs_ctl->handle = handle; sysfs_attr_init(&gfxs_ctl->attr.attr); gfxs_ctl->attr.attr.name = "gfx_switch_status"; gfxs_ctl->attr.attr.mode = S_IRUGO; gfxs_ctl->attr.show = sony_nc_gfx_switch_status_show; result = device_create_file(&pd->dev, &gfxs_ctl->attr); if (result) goto gfxerror; return 0; gfxerror: kfree(gfxs_ctl); gfxs_ctl = NULL; return result; } static void sony_nc_gfx_switch_cleanup(struct platform_device *pd) { if (gfxs_ctl) { device_remove_file(&pd->dev, &gfxs_ctl->attr); kfree(gfxs_ctl); gfxs_ctl = NULL; } } /* High speed charging function */ static struct device_attribute *hsc_handle; static ssize_t sony_nc_highspeed_charging_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result)) return -EIO; return count; } static ssize_t sony_nc_highspeed_charging_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x0131, 0x0100, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01); } static int sony_nc_highspeed_charging_setup(struct platform_device *pd) { unsigned int result; if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) { /* some models advertise the handle but have no implementation * for it */ pr_info("No High Speed Charging capability found\n"); return 0; } hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!hsc_handle) return -ENOMEM; sysfs_attr_init(&hsc_handle->attr); hsc_handle->attr.name = "battery_highspeed_charging"; hsc_handle->attr.mode = S_IRUGO | S_IWUSR; hsc_handle->show = sony_nc_highspeed_charging_show; hsc_handle->store = sony_nc_highspeed_charging_store; result = device_create_file(&pd->dev, hsc_handle); if (result) { kfree(hsc_handle); hsc_handle = NULL; return result; } return 0; } static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd) { if (hsc_handle) { device_remove_file(&pd->dev, hsc_handle); kfree(hsc_handle); hsc_handle = NULL; } } /* low battery function */ static struct device_attribute *lowbatt_handle; static ssize_t sony_nc_lowbatt_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; if (sony_call_snc_handle(0x0121, value << 8, &result)) return -EIO; return count; } static ssize_t sony_nc_lowbatt_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x0121, 0x0200, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result & 1); } static int sony_nc_lowbatt_setup(struct platform_device *pd) { unsigned int result; lowbatt_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!lowbatt_handle) return -ENOMEM; sysfs_attr_init(&lowbatt_handle->attr); lowbatt_handle->attr.name = "lowbatt_hibernate"; lowbatt_handle->attr.mode = S_IRUGO | S_IWUSR; lowbatt_handle->show = sony_nc_lowbatt_show; lowbatt_handle->store = sony_nc_lowbatt_store; result = device_create_file(&pd->dev, lowbatt_handle); if (result) { kfree(lowbatt_handle); lowbatt_handle = NULL; return result; } return 0; } static void sony_nc_lowbatt_cleanup(struct platform_device *pd) { if (lowbatt_handle) { device_remove_file(&pd->dev, lowbatt_handle); kfree(lowbatt_handle); lowbatt_handle = NULL; } } /* fan speed function */ static struct device_attribute *fan_handle, *hsf_handle; static ssize_t sony_nc_hsfan_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; if (sony_call_snc_handle(0x0149, value << 0x10 | 0x0200, &result)) return -EIO; return count; } static ssize_t sony_nc_hsfan_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x0149, 0x0100, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01); } static ssize_t sony_nc_fanspeed_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x0149, 0x0300, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0xff); } static int sony_nc_fanspeed_setup(struct platform_device *pd) { unsigned int result; fan_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!fan_handle) return -ENOMEM; hsf_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!hsf_handle) { result = -ENOMEM; goto out_hsf_handle_alloc; } sysfs_attr_init(&fan_handle->attr); fan_handle->attr.name = "fanspeed"; fan_handle->attr.mode = S_IRUGO; fan_handle->show = sony_nc_fanspeed_show; fan_handle->store = NULL; sysfs_attr_init(&hsf_handle->attr); hsf_handle->attr.name = "fan_forced"; hsf_handle->attr.mode = S_IRUGO | S_IWUSR; hsf_handle->show = sony_nc_hsfan_show; hsf_handle->store = sony_nc_hsfan_store; result = device_create_file(&pd->dev, fan_handle); if (result) goto out_fan_handle; result = device_create_file(&pd->dev, hsf_handle); if (result) goto out_hsf_handle; return 0; out_hsf_handle: device_remove_file(&pd->dev, fan_handle); out_fan_handle: kfree(hsf_handle); hsf_handle = NULL; out_hsf_handle_alloc: kfree(fan_handle); fan_handle = NULL; return result; } static void sony_nc_fanspeed_cleanup(struct platform_device *pd) { if (fan_handle) { device_remove_file(&pd->dev, fan_handle); kfree(fan_handle); fan_handle = NULL; } if (hsf_handle) { device_remove_file(&pd->dev, hsf_handle); kfree(hsf_handle); hsf_handle = NULL; } } /* USB charge function */ static struct device_attribute *uc_handle; static ssize_t sony_nc_usb_charge_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; if (sony_call_snc_handle(0x0155, value << 0x10 | 0x0100, &result)) return -EIO; return count; } static ssize_t sony_nc_usb_charge_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x0155, 0x0000, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01); } static int sony_nc_usb_charge_setup(struct platform_device *pd) { unsigned int result; if (sony_call_snc_handle(0x0155, 0x0000, &result) || !(result & 0x01)) { /* some models advertise the handle but have no implementation * for it */ pr_info("No USB Charge capability found\n"); return 0; } uc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!uc_handle) return -ENOMEM; sysfs_attr_init(&uc_handle->attr); uc_handle->attr.name = "usb_charge"; uc_handle->attr.mode = S_IRUGO | S_IWUSR; uc_handle->show = sony_nc_usb_charge_show; uc_handle->store = sony_nc_usb_charge_store; result = device_create_file(&pd->dev, uc_handle); if (result) { kfree(uc_handle); uc_handle = NULL; return result; } return 0; } static void sony_nc_usb_charge_cleanup(struct platform_device *pd) { if (uc_handle) { device_remove_file(&pd->dev, uc_handle); kfree(uc_handle); uc_handle = NULL; } } /* Panel ID function */ static struct device_attribute *panel_handle; static ssize_t sony_nc_panelid_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(0x011D, 0x0000, &result)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", result); } static int sony_nc_panelid_setup(struct platform_device *pd) { unsigned int result; panel_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!panel_handle) return -ENOMEM; sysfs_attr_init(&panel_handle->attr); panel_handle->attr.name = "panel_id"; panel_handle->attr.mode = S_IRUGO; panel_handle->show = sony_nc_panelid_show; panel_handle->store = NULL; result = device_create_file(&pd->dev, panel_handle); if (result) { kfree(panel_handle); panel_handle = NULL; return result; } return 0; } static void sony_nc_panelid_cleanup(struct platform_device *pd) { if (panel_handle) { device_remove_file(&pd->dev, panel_handle); kfree(panel_handle); panel_handle = NULL; } } /* smart connect function */ static struct device_attribute *sc_handle; static ssize_t sony_nc_smart_conn_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; if (sony_call_snc_handle(0x0168, value << 0x10, &result)) return -EIO; return count; } static int sony_nc_smart_conn_setup(struct platform_device *pd) { unsigned int result; sc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); if (!sc_handle) return -ENOMEM; sysfs_attr_init(&sc_handle->attr); sc_handle->attr.name = "smart_connect"; sc_handle->attr.mode = S_IWUSR; sc_handle->show = NULL; sc_handle->store = sony_nc_smart_conn_store; result = device_create_file(&pd->dev, sc_handle); if (result) { kfree(sc_handle); sc_handle = NULL; return result; } return 0; } static void sony_nc_smart_conn_cleanup(struct platform_device *pd) { if (sc_handle) { device_remove_file(&pd->dev, sc_handle); kfree(sc_handle); sc_handle = NULL; } } /* Touchpad enable/disable */ struct touchpad_control { struct device_attribute attr; int handle; }; static struct touchpad_control *tp_ctl; static ssize_t sony_nc_touchpad_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned int result; unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value) || value > 1) return -EINVAL; /* sysfs: 0 disabled, 1 enabled * EC: 0 enabled, 1 disabled */ if (sony_call_snc_handle(tp_ctl->handle, (!value << 0x10) | 0x100, &result)) return -EIO; return count; } static ssize_t sony_nc_touchpad_show(struct device *dev, struct device_attribute *attr, char *buffer) { unsigned int result; if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result)) return -EINVAL; return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01)); } static int sony_nc_touchpad_setup(struct platform_device *pd, unsigned int handle) { int ret = 0; tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL); if (!tp_ctl) return -ENOMEM; tp_ctl->handle = handle; sysfs_attr_init(&tp_ctl->attr.attr); tp_ctl->attr.attr.name = "touchpad"; tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR; tp_ctl->attr.show = sony_nc_touchpad_show; tp_ctl->attr.store = sony_nc_touchpad_store; ret = device_create_file(&pd->dev, &tp_ctl->attr); if (ret) { kfree(tp_ctl); tp_ctl = NULL; } return ret; } static void sony_nc_touchpad_cleanup(struct platform_device *pd) { if (tp_ctl) { device_remove_file(&pd->dev, &tp_ctl->attr); kfree(tp_ctl); tp_ctl = NULL; } } static void sony_nc_backlight_ng_read_limits(int handle, struct sony_backlight_props *props) { u64 offset; int i; int lvl_table_len = 0; u8 min = 0xff, max = 0x00; unsigned char buffer[32] = { 0 }; props->handle = handle; props->offset = 0; props->maxlvl = 0xff; offset = sony_find_snc_handle(handle); /* try to read the boundaries from ACPI tables, if we fail the above * defaults should be reasonable */ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer, 32); if (i < 0) return; switch (handle) { case 0x012f: case 0x0137: lvl_table_len = 9; break; case 0x143: case 0x14b: case 0x14c: lvl_table_len = 16; break; } /* the buffer lists brightness levels available, brightness levels are * from position 0 to 8 in the array, other values are used by ALS * control. */ for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) { dprintk("Brightness level: %d\n", buffer[i]); if (!buffer[i]) break; if (buffer[i] > max) max = buffer[i]; if (buffer[i] < min) min = buffer[i]; } props->offset = min; props->maxlvl = max; dprintk("Brightness levels: min=%d max=%d\n", props->offset, props->maxlvl); } static void sony_nc_backlight_setup(void) { int max_brightness = 0; const struct backlight_ops *ops = NULL; struct backlight_properties props; if (sony_find_snc_handle(0x12f) >= 0) { ops = &sony_backlight_ng_ops; sony_bl_props.cmd_base = 0x0100; sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x137) >= 0) { ops = &sony_backlight_ng_ops; sony_bl_props.cmd_base = 0x0100; sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x143) >= 0) { ops = &sony_backlight_ng_ops; sony_bl_props.cmd_base = 0x3000; sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x14b) >= 0) { ops = &sony_backlight_ng_ops; sony_bl_props.cmd_base = 0x3000; sony_nc_backlight_ng_read_limits(0x14b, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x14c) >= 0) { ops = &sony_backlight_ng_ops; sony_bl_props.cmd_base = 0x3000; sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) { ops = &sony_backlight_ops; max_brightness = SONY_MAX_BRIGHTNESS - 1; } else return; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; sony_bl_props.dev = backlight_device_register("sony", NULL, &sony_bl_props, ops, &props); if (IS_ERR(sony_bl_props.dev)) { pr_warn("unable to register backlight device\n"); sony_bl_props.dev = NULL; } else sony_bl_props.dev->props.brightness = ops->get_brightness(sony_bl_props.dev); } static void sony_nc_backlight_cleanup(void) { backlight_device_unregister(sony_bl_props.dev); } static int sony_nc_add(struct acpi_device *device) { acpi_status status; int result = 0; struct sony_nc_value *item; sony_nc_acpi_device = device; strcpy(acpi_device_class(device), "sony/hotkey"); sony_nc_acpi_handle = device->handle; /* read device status */ result = acpi_bus_get_status(device); /* bail IFF the above call was successful and the device is not present */ if (!result && !device->status.present) { dprintk("Device not present\n"); result = -ENODEV; goto outwalk; } result = sony_pf_add(); if (result) goto outpresent; if (debug) { status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, 1, sony_walk_callback, NULL, NULL, NULL); if (ACPI_FAILURE(status)) { pr_warn("unable to walk acpi resources\n"); result = -ENODEV; goto outpresent; } } result = sony_laptop_setup_input(device); if (result) { pr_err("Unable to create input devices\n"); goto outplatform; } if (acpi_has_method(sony_nc_acpi_handle, "ECON")) { int arg = 1; if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) dprintk("ECON Method failed\n"); } if (acpi_has_method(sony_nc_acpi_handle, "SN00")) { dprintk("Doing SNC setup\n"); /* retrieve the available handles */ result = sony_nc_handles_setup(sony_pf_device); if (!result) sony_nc_function_setup(device, sony_pf_device); } /* setup input devices and helper fifo */ if (acpi_video_backlight_support()) { pr_info("brightness ignored, must be controlled by ACPI video driver\n"); } else { sony_nc_backlight_setup(); } /* create sony_pf sysfs attributes related to the SNC device */ for (item = sony_nc_values; item->name; ++item) { if (!debug && item->debug) continue; /* find the available acpiget as described in the DSDT */ for (; item->acpiget && *item->acpiget; ++item->acpiget) { if (acpi_has_method(sony_nc_acpi_handle, *item->acpiget)) { dprintk("Found %s getter: %s\n", item->name, *item->acpiget); item->devattr.attr.mode |= S_IRUGO; break; } } /* find the available acpiset as described in the DSDT */ for (; item->acpiset && *item->acpiset; ++item->acpiset) { if (acpi_has_method(sony_nc_acpi_handle, *item->acpiset)) { dprintk("Found %s setter: %s\n", item->name, *item->acpiset); item->devattr.attr.mode |= S_IWUSR; break; } } if (item->devattr.attr.mode != 0) { result = device_create_file(&sony_pf_device->dev, &item->devattr); if (result) goto out_sysfs; } } pr_info("SNC setup done.\n"); return 0; out_sysfs: for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } sony_nc_backlight_cleanup(); sony_nc_function_cleanup(sony_pf_device); sony_nc_handles_cleanup(sony_pf_device); outplatform: sony_laptop_remove_input(); outpresent: sony_pf_remove(); outwalk: sony_nc_rfkill_cleanup(); return result; } static int sony_nc_remove(struct acpi_device *device) { struct sony_nc_value *item; sony_nc_backlight_cleanup(); sony_nc_acpi_device = NULL; for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } sony_nc_function_cleanup(sony_pf_device); sony_nc_handles_cleanup(sony_pf_device); sony_pf_remove(); sony_laptop_remove_input(); dprintk(SONY_NC_DRIVER_NAME " removed.\n"); return 0; } static const struct acpi_device_id sony_device_ids[] = { {SONY_NC_HID, 0}, {SONY_PIC_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, sony_device_ids); static const struct acpi_device_id sony_nc_device_ids[] = { {SONY_NC_HID, 0}, {"", 0}, }; static struct acpi_driver sony_nc_driver = { .name = SONY_NC_DRIVER_NAME, .class = SONY_NC_CLASS, .ids = sony_nc_device_ids, .owner = THIS_MODULE, .ops = { .add = sony_nc_add, .remove = sony_nc_remove, .notify = sony_nc_notify, }, .drv.pm = &sony_nc_pm, }; /*********** SPIC (SNY6001) Device ***********/ #define SONYPI_DEVICE_TYPE1 0x00000001 #define SONYPI_DEVICE_TYPE2 0x00000002 #define SONYPI_DEVICE_TYPE3 0x00000004 #define SONYPI_TYPE1_OFFSET 0x04 #define SONYPI_TYPE2_OFFSET 0x12 #define SONYPI_TYPE3_OFFSET 0x12 struct sony_pic_ioport { struct acpi_resource_io io1; struct acpi_resource_io io2; struct list_head list; }; struct sony_pic_irq { struct acpi_resource_irq irq; struct list_head list; }; struct sonypi_eventtypes { u8 data; unsigned long mask; struct sonypi_event *events; }; struct sony_pic_dev { struct acpi_device *acpi_dev; struct sony_pic_irq *cur_irq; struct sony_pic_ioport *cur_ioport; struct list_head interrupts; struct list_head ioports; struct mutex lock; struct sonypi_eventtypes *event_types; int (*handle_irq)(const u8, const u8); int model; u16 evport_offset; u8 camera_power; u8 bluetooth_power; u8 wwan_power; }; static struct sony_pic_dev spic_dev = { .interrupts = LIST_HEAD_INIT(spic_dev.interrupts), .ioports = LIST_HEAD_INIT(spic_dev.ioports), }; static int spic_drv_registered; /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 #define SONYPI_FNKEY_MASK 0x00000004 #define SONYPI_BLUETOOTH_MASK 0x00000008 #define SONYPI_PKEY_MASK 0x00000010 #define SONYPI_BACK_MASK 0x00000020 #define SONYPI_HELP_MASK 0x00000040 #define SONYPI_LID_MASK 0x00000080 #define SONYPI_ZOOM_MASK 0x00000100 #define SONYPI_THUMBPHRASE_MASK 0x00000200 #define SONYPI_MEYE_MASK 0x00000400 #define SONYPI_MEMORYSTICK_MASK 0x00000800 #define SONYPI_BATTERY_MASK 0x00001000 #define SONYPI_WIRELESS_MASK 0x00002000 struct sonypi_event { u8 data; u8 event; }; /* The set of possible button release events */ static struct sonypi_event sonypi_releaseev[] = { { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 } }; /* The set of possible jogger events */ static struct sonypi_event sonypi_joggerev[] = { { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, { 0, 0 } }; /* The set of possible capture button events */ static struct sonypi_event sonypi_captureev[] = { { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x40, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, { 0, 0 } }; /* The set of possible fnkeys events */ static struct sonypi_event sonypi_fnkeyev[] = { { 0x10, SONYPI_EVENT_FNKEY_ESC }, { 0x11, SONYPI_EVENT_FNKEY_F1 }, { 0x12, SONYPI_EVENT_FNKEY_F2 }, { 0x13, SONYPI_EVENT_FNKEY_F3 }, { 0x14, SONYPI_EVENT_FNKEY_F4 }, { 0x15, SONYPI_EVENT_FNKEY_F5 }, { 0x16, SONYPI_EVENT_FNKEY_F6 }, { 0x17, SONYPI_EVENT_FNKEY_F7 }, { 0x18, SONYPI_EVENT_FNKEY_F8 }, { 0x19, SONYPI_EVENT_FNKEY_F9 }, { 0x1a, SONYPI_EVENT_FNKEY_F10 }, { 0x1b, SONYPI_EVENT_FNKEY_F11 }, { 0x1c, SONYPI_EVENT_FNKEY_F12 }, { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, { 0x21, SONYPI_EVENT_FNKEY_1 }, { 0x22, SONYPI_EVENT_FNKEY_2 }, { 0x31, SONYPI_EVENT_FNKEY_D }, { 0x32, SONYPI_EVENT_FNKEY_E }, { 0x33, SONYPI_EVENT_FNKEY_F }, { 0x34, SONYPI_EVENT_FNKEY_S }, { 0x35, SONYPI_EVENT_FNKEY_B }, { 0x36, SONYPI_EVENT_FNKEY_ONLY }, { 0, 0 } }; /* The set of possible program key events */ static struct sonypi_event sonypi_pkeyev[] = { { 0x01, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_PKEY_P2 }, { 0x04, SONYPI_EVENT_PKEY_P3 }, { 0x20, SONYPI_EVENT_PKEY_P1 }, { 0, 0 } }; /* The set of possible bluetooth events */ static struct sonypi_event sonypi_blueev[] = { { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, { 0, 0 } }; /* The set of possible wireless events */ static struct sonypi_event sonypi_wlessev[] = { { 0x59, SONYPI_EVENT_IGNORE }, { 0x5a, SONYPI_EVENT_IGNORE }, { 0, 0 } }; /* The set of possible back button events */ static struct sonypi_event sonypi_backev[] = { { 0x20, SONYPI_EVENT_BACK_PRESSED }, { 0, 0 } }; /* The set of possible help button events */ static struct sonypi_event sonypi_helpev[] = { { 0x3b, SONYPI_EVENT_HELP_PRESSED }, { 0, 0 } }; /* The set of possible lid events */ static struct sonypi_event sonypi_lidev[] = { { 0x51, SONYPI_EVENT_LID_CLOSED }, { 0x50, SONYPI_EVENT_LID_OPENED }, { 0, 0 } }; /* The set of possible zoom events */ static struct sonypi_event sonypi_zoomev[] = { { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED }, { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED }, { 0x04, SONYPI_EVENT_ZOOM_PRESSED }, { 0, 0 } }; /* The set of possible thumbphrase events */ static struct sonypi_event sonypi_thumbphraseev[] = { { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, { 0, 0 } }; /* The set of possible motioneye camera events */ static struct sonypi_event sonypi_meyeev[] = { { 0x00, SONYPI_EVENT_MEYE_FACE }, { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, { 0, 0 } }; /* The set of possible memorystick events */ static struct sonypi_event sonypi_memorystickev[] = { { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, { 0, 0 } }; /* The set of possible battery events */ static struct sonypi_event sonypi_batteryev[] = { { 0x20, SONYPI_EVENT_BATTERY_INSERT }, { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, { 0, 0 } }; /* The set of possible volume events */ static struct sonypi_event sonypi_volumeev[] = { { 0x01, SONYPI_EVENT_VOLUME_INC_PRESSED }, { 0x02, SONYPI_EVENT_VOLUME_DEC_PRESSED }, { 0, 0 } }; /* The set of possible brightness events */ static struct sonypi_event sonypi_brightnessev[] = { { 0x80, SONYPI_EVENT_BRIGHTNESS_PRESSED }, { 0, 0 } }; static struct sonypi_eventtypes type1_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, { 0x30, SONYPI_LID_MASK, sonypi_lidev }, { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0 }, }; static struct sonypi_eventtypes type2_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x38, SONYPI_LID_MASK, sonypi_lidev }, { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x11, SONYPI_BACK_MASK, sonypi_backev }, { 0x21, SONYPI_HELP_MASK, sonypi_helpev }, { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0 }, }; static struct sonypi_eventtypes type3_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev }, { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x05, SONYPI_PKEY_MASK, sonypi_volumeev }, { 0x05, SONYPI_PKEY_MASK, sonypi_brightnessev }, { 0 }, }; /* low level spic calls */ #define ITERATIONS_LONG 10000 #define ITERATIONS_SHORT 10 #define wait_on_command(command, iterations) { \ unsigned int n = iterations; \ while (--n && (command)) \ udelay(1); \ if (!n) \ dprintk("command failed at %s : %s (line %d)\n", \ __FILE__, __func__, __LINE__); \ } static u8 sony_pic_call1(u8 dev) { u8 v1, v2; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4); v2 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1); return v2; } static u8 sony_pic_call2(u8 dev, u8 fn) { u8 v1; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(fn, spic_dev.cur_ioport->io1.minimum); v1 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1); return v1; } static u8 sony_pic_call3(u8 dev, u8 fn, u8 v) { u8 v1; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(fn, spic_dev.cur_ioport->io1.minimum); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(v, spic_dev.cur_ioport->io1.minimum); v1 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v, v1); return v1; } /* * minidrivers for SPIC models */ static int type3_handle_irq(const u8 data_mask, const u8 ev) { /* * 0x31 could mean we have to take some extra action and wait for * the next irq for some Type3 models, it will generate a new * irq and we can read new data from the device: * - 0x5c and 0x5f requires 0xA0 * - 0x61 requires 0xB3 */ if (data_mask == 0x31) { if (ev == 0x5c || ev == 0x5f) sony_pic_call1(0xA0); else if (ev == 0x61) sony_pic_call1(0xB3); return 0; } return 1; } static void sony_pic_detect_device_type(struct sony_pic_dev *dev) { struct pci_dev *pcidev; pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE1; dev->evport_offset = SONYPI_TYPE1_OFFSET; dev->event_types = type1_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE2; dev->evport_offset = SONYPI_TYPE2_OFFSET; dev->event_types = type2_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } /* default */ dev->model = SONYPI_DEVICE_TYPE2; dev->evport_offset = SONYPI_TYPE2_OFFSET; dev->event_types = type2_events; out: pci_dev_put(pcidev); pr_info("detected Type%d model\n", dev->model == SONYPI_DEVICE_TYPE1 ? 1 : dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); } /* camera tests and poweron/poweroff */ #define SONYPI_CAMERA_PICTURE 5 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_BRIGHTNESS 0 #define SONYPI_CAMERA_CONTRAST 1 #define SONYPI_CAMERA_HUE 2 #define SONYPI_CAMERA_COLOR 3 #define SONYPI_CAMERA_SHARPNESS 4 #define SONYPI_CAMERA_EXPOSURE_MASK 0xC #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 #define SONYPI_CAMERA_MUTE_MASK 0x40 /* the rest don't need a loop until not 0xff */ #define SONYPI_CAMERA_AGC 6 #define SONYPI_CAMERA_AGC_MASK 0x30 #define SONYPI_CAMERA_SHUTTER_MASK 0x7 #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_STATUS 7 #define SONYPI_CAMERA_STATUS_READY 0x2 #define SONYPI_CAMERA_STATUS_POSITION 0x4 #define SONYPI_DIRECTION_BACKWARDS 0x4 #define SONYPI_CAMERA_REVISION 8 #define SONYPI_CAMERA_ROMVERSION 9 static int __sony_pic_camera_ready(void) { u8 v; v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS); return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); } static int __sony_pic_camera_off(void) { if (!camera) { pr_warn("camera control not enabled\n"); return -ENODEV; } wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK), ITERATIONS_SHORT); if (spic_dev.camera_power) { sony_pic_call2(0x91, 0); spic_dev.camera_power = 0; } return 0; } static int __sony_pic_camera_on(void) { int i, j, x; if (!camera) { pr_warn("camera control not enabled\n"); return -ENODEV; } if (spic_dev.camera_power) return 0; for (j = 5; j > 0; j--) { for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++) msleep(10); sony_pic_call1(0x93); for (i = 400; i > 0; i--) { if (__sony_pic_camera_ready()) break; msleep(10); } if (i) break; } if (j == 0) { pr_warn("failed to power on camera\n"); return -ENODEV; } wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL, 0x5a), ITERATIONS_SHORT); spic_dev.camera_power = 1; return 0; } /* External camera command (exported to the motion eye v4l driver) */ int sony_pic_camera_command(int command, u8 value) { if (!camera) return -EIO; mutex_lock(&spic_dev.lock); switch (command) { case SONY_PIC_COMMAND_SETCAMERA: if (value) __sony_pic_camera_on(); else __sony_pic_camera_off(); break; case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERACONTRAST: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAHUE: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERACOLOR: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERASHARPNESS: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAPICTURE: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAAGC: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value), ITERATIONS_SHORT); break; default: pr_err("sony_pic_camera_command invalid: %d\n", command); break; } mutex_unlock(&spic_dev.lock); return 0; } EXPORT_SYMBOL(sony_pic_camera_command); /* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */ static void __sony_pic_set_wwanpower(u8 state) { state = !!state; if (spic_dev.wwan_power == state) return; sony_pic_call2(0xB0, state); sony_pic_call1(0x82); spic_dev.wwan_power = state; } static ssize_t sony_pic_wwanpower_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; mutex_lock(&spic_dev.lock); __sony_pic_set_wwanpower(value); mutex_unlock(&spic_dev.lock); return count; } static ssize_t sony_pic_wwanpower_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count; mutex_lock(&spic_dev.lock); count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power); mutex_unlock(&spic_dev.lock); return count; } /* bluetooth subsystem power state */ static void __sony_pic_set_bluetoothpower(u8 state) { state = !!state; if (spic_dev.bluetooth_power == state) return; sony_pic_call2(0x96, state); sony_pic_call1(0x82); spic_dev.bluetooth_power = state; } static ssize_t sony_pic_bluetoothpower_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; mutex_lock(&spic_dev.lock); __sony_pic_set_bluetoothpower(value); mutex_unlock(&spic_dev.lock); return count; } static ssize_t sony_pic_bluetoothpower_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; mutex_lock(&spic_dev.lock); count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power); mutex_unlock(&spic_dev.lock); return count; } /* fan speed */ /* FAN0 information (reverse engineered from ACPI tables) */ #define SONY_PIC_FAN0_STATUS 0x93 static int sony_pic_set_fanspeed(unsigned long value) { return ec_write(SONY_PIC_FAN0_STATUS, value); } static int sony_pic_get_fanspeed(u8 *value) { return ec_read(SONY_PIC_FAN0_STATUS, value); } static ssize_t sony_pic_fanspeed_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; if (kstrtoul(buffer, 10, &value)) return -EINVAL; if (sony_pic_set_fanspeed(value)) return -EIO; return count; } static ssize_t sony_pic_fanspeed_show(struct device *dev, struct device_attribute *attr, char *buffer) { u8 value = 0; if (sony_pic_get_fanspeed(&value)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", value); } #define SPIC_ATTR(_name, _mode) \ struct device_attribute spic_attr_##_name = __ATTR(_name, \ _mode, sony_pic_## _name ##_show, \ sony_pic_## _name ##_store) static SPIC_ATTR(bluetoothpower, 0644); static SPIC_ATTR(wwanpower, 0644); static SPIC_ATTR(fanspeed, 0644); static struct attribute *spic_attributes[] = { &spic_attr_bluetoothpower.attr, &spic_attr_wwanpower.attr, &spic_attr_fanspeed.attr, NULL }; static struct attribute_group spic_attribute_group = { .attrs = spic_attributes }; /******** SONYPI compatibility **********/ #ifdef CONFIG_SONYPI_COMPAT /* battery / brightness / temperature addresses */ #define SONYPI_BAT_FLAGS 0x81 #define SONYPI_LCD_LIGHT 0x96 #define SONYPI_BAT1_PCTRM 0xa0 #define SONYPI_BAT1_LEFT 0xa2 #define SONYPI_BAT1_MAXRT 0xa4 #define SONYPI_BAT2_PCTRM 0xa8 #define SONYPI_BAT2_LEFT 0xaa #define SONYPI_BAT2_MAXRT 0xac #define SONYPI_BAT1_MAXTK 0xb0 #define SONYPI_BAT1_FULL 0xb2 #define SONYPI_BAT2_MAXTK 0xb8 #define SONYPI_BAT2_FULL 0xba #define SONYPI_TEMP_STATUS 0xC1 struct sonypi_compat_s { struct fasync_struct *fifo_async; struct kfifo fifo; spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; atomic_t open_count; }; static struct sonypi_compat_s sonypi_compat = { .open_count = ATOMIC_INIT(0), }; static int sonypi_misc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &sonypi_compat.fifo_async); } static int sonypi_misc_release(struct inode *inode, struct file *file) { atomic_dec(&sonypi_compat.open_count); return 0; } static int sonypi_misc_open(struct inode *inode, struct file *file) { /* Flush input queue on first open */ unsigned long flags; spin_lock_irqsave(&sonypi_compat.fifo_lock, flags); if (atomic_inc_return(&sonypi_compat.open_count) == 1) kfifo_reset(&sonypi_compat.fifo); spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags); return 0; } static ssize_t sonypi_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { ssize_t ret; unsigned char c; if ((kfifo_len(&sonypi_compat.fifo) == 0) && (file->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, kfifo_len(&sonypi_compat.fifo) != 0); if (ret) return ret; while (ret < count && (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c), &sonypi_compat.fifo_lock) == sizeof(c))) { if (put_user(c, buf++)) return -EFAULT; ret++; } if (ret > 0) { struct inode *inode = file_inode(file); inode->i_atime = current_fs_time(inode->i_sb); } return ret; } static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_compat.fifo_proc_list, wait); if (kfifo_len(&sonypi_compat.fifo)) return POLLIN | POLLRDNORM; return 0; } static int ec_read16(u8 addr, u16 *value) { u8 val_lb, val_hb; if (ec_read(addr, &val_lb)) return -1; if (ec_read(addr + 1, &val_hb)) return -1; *value = val_lb | (val_hb << 8); return 0; } static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret = 0; void __user *argp = (void __user *)arg; u8 val8; u16 val16; int value; mutex_lock(&spic_dev.lock); switch (cmd) { case SONYPI_IOCGBRT: if (sony_bl_props.dev == NULL) { ret = -EIO; break; } if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value)) { ret = -EIO; break; } val8 = ((value & 0xff) - 1) << 5; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBRT: if (sony_bl_props.dev == NULL) { ret = -EIO; break; } if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } value = (val8 >> 5) + 1; if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value, NULL)) { ret = -EIO; break; } /* sync the backlight device status */ sony_bl_props.dev->props.brightness = sony_backlight_get_brightness(sony_bl_props.dev); break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT1REM: if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2CAP: if (ec_read16(SONYPI_BAT2_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2REM: if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBATFLAGS: if (ec_read(SONYPI_BAT_FLAGS, &val8)) { ret = -EIO; break; } val8 &= 0x07; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCGBLUE: val8 = spic_dev.bluetooth_power; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBLUE: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } __sony_pic_set_bluetoothpower(val8); break; /* FAN Controls */ case SONYPI_IOCGFAN: if (sony_pic_get_fanspeed(&val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSFAN: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sony_pic_set_fanspeed(val8)) ret = -EIO; break; /* GET Temperature (useful under APM) */ case SONYPI_IOCGTEMP: if (ec_read(SONYPI_TEMP_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&spic_dev.lock); return ret; } static const struct file_operations sonypi_misc_fops = { .owner = THIS_MODULE, .read = sonypi_misc_read, .poll = sonypi_misc_poll, .open = sonypi_misc_open, .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, .llseek = noop_llseek, }; static struct miscdevice sonypi_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "sonypi", .fops = &sonypi_misc_fops, }; static void sonypi_compat_report_event(u8 event) { kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event, sizeof(event), &sonypi_compat.fifo_lock); kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); wake_up_interruptible(&sonypi_compat.fifo_proc_list); } static int sonypi_compat_init(void) { int error; spin_lock_init(&sonypi_compat.fifo_lock); error = kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); if (error) { pr_err("kfifo_alloc failed\n"); return error; } init_waitqueue_head(&sonypi_compat.fifo_proc_list); if (minor != -1) sonypi_misc_device.minor = minor; error = misc_register(&sonypi_misc_device); if (error) { pr_err("misc_register failed\n"); goto err_free_kfifo; } if (minor == -1) pr_info("device allocated minor is %d\n", sonypi_misc_device.minor); return 0; err_free_kfifo: kfifo_free(&sonypi_compat.fifo); return error; } static void sonypi_compat_exit(void) { misc_deregister(&sonypi_misc_device); kfifo_free(&sonypi_compat.fifo); } #else static int sonypi_compat_init(void) { return 0; } static void sonypi_compat_exit(void) { } static void sonypi_compat_report_event(u8 event) { } #endif /* CONFIG_SONYPI_COMPAT */ /* * ACPI callbacks */ static acpi_status sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) { u32 i; struct sony_pic_dev *dev = (struct sony_pic_dev *)context; switch (resource->type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: { /* start IO enumeration */ struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); if (!ioport) return AE_ERROR; list_add(&ioport->list, &dev->ioports); return AE_OK; } case ACPI_RESOURCE_TYPE_END_DEPENDENT: /* end IO enumeration */ return AE_OK; case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; struct sony_pic_irq *interrupt = NULL; if (!p || !p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, * particularly those those w/ _STA disabled */ dprintk("Blank IRQ resource\n"); return AE_OK; } for (i = 0; i < p->interrupt_count; i++) { if (!p->interrupts[i]) { pr_warn("Invalid IRQ %d\n", p->interrupts[i]); continue; } interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL); if (!interrupt) return AE_ERROR; list_add(&interrupt->list, &dev->interrupts); interrupt->irq.triggering = p->triggering; interrupt->irq.polarity = p->polarity; interrupt->irq.sharable = p->sharable; interrupt->irq.interrupt_count = 1; interrupt->irq.interrupts[0] = p->interrupts[i]; } return AE_OK; } case ACPI_RESOURCE_TYPE_IO: { struct acpi_resource_io *io = &resource->data.io; struct sony_pic_ioport *ioport = list_first_entry(&dev->ioports, struct sony_pic_ioport, list); if (!io) { dprintk("Blank IO resource\n"); return AE_OK; } if (!ioport->io1.minimum) { memcpy(&ioport->io1, io, sizeof(*io)); dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum, ioport->io1.address_length); } else if (!ioport->io2.minimum) { memcpy(&ioport->io2, io, sizeof(*io)); dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum, ioport->io2.address_length); } else { pr_err("Unknown SPIC Type, more than 2 IO Ports\n"); return AE_ERROR; } return AE_OK; } default: dprintk("Resource %d isn't an IRQ nor an IO port\n", resource->type); case ACPI_RESOURCE_TYPE_END_TAG: return AE_OK; } return AE_CTRL_TERMINATE; } static int sony_pic_possible_resources(struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; if (!device) return -EINVAL; /* get device status */ /* see acpi_pci_link_get_current acpi_pci_link_get_possible */ dprintk("Evaluating _STA\n"); result = acpi_bus_get_status(device); if (result) { pr_warn("Unable to read status\n"); goto end; } if (!device->status.enabled) dprintk("Device disabled\n"); else dprintk("Device enabled\n"); /* * Query and parse 'method' */ dprintk("Evaluating %s\n", METHOD_NAME__PRS); status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, sony_pic_read_possible_resource, &spic_dev); if (ACPI_FAILURE(status)) { pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS); result = -ENODEV; } end: return result; } /* * Disable the spic device by calling its _DIS method */ static int sony_pic_disable(struct acpi_device *device) { acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL, NULL); if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND) return -ENXIO; dprintk("Device disabled\n"); return 0; } /* * Based on drivers/acpi/pci_link.c:acpi_pci_link_set * * Call _SRS to set current resources */ static int sony_pic_enable(struct acpi_device *device, struct sony_pic_ioport *ioport, struct sony_pic_irq *irq) { acpi_status status; int result = 0; /* Type 1 resource layout is: * IO * IO * IRQNoFlags * End * * Type 2 and 3 resource layout is: * IO * IRQNoFlags * End */ struct { struct acpi_resource res1; struct acpi_resource res2; struct acpi_resource res3; struct acpi_resource res4; } *resource; struct acpi_buffer buffer = { 0, NULL }; if (!ioport || !irq) return -EINVAL; /* init acpi_buffer */ resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL); if (!resource) return -ENOMEM; buffer.length = sizeof(*resource) + 1; buffer.pointer = resource; /* setup Type 1 resources */ if (spic_dev.model == SONYPI_DEVICE_TYPE1) { /* setup io resources */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; resource->res1.length = sizeof(struct acpi_resource); memcpy(&resource->res1.data.io, &ioport->io1, sizeof(struct acpi_resource_io)); resource->res2.type = ACPI_RESOURCE_TYPE_IO; resource->res2.length = sizeof(struct acpi_resource); memcpy(&resource->res2.data.io, &ioport->io2, sizeof(struct acpi_resource_io)); /* setup irq resource */ resource->res3.type = ACPI_RESOURCE_TYPE_IRQ; resource->res3.length = sizeof(struct acpi_resource); memcpy(&resource->res3.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ resource->res3.data.irq.sharable = ACPI_SHARED; resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; resource->res4.length = sizeof(struct acpi_resource); } /* setup Type 2/3 resources */ else { /* setup io resource */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; resource->res1.length = sizeof(struct acpi_resource); memcpy(&resource->res1.data.io, &ioport->io1, sizeof(struct acpi_resource_io)); /* setup irq resource */ resource->res2.type = ACPI_RESOURCE_TYPE_IRQ; resource->res2.length = sizeof(struct acpi_resource); memcpy(&resource->res2.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ resource->res2.data.irq.sharable = ACPI_SHARED; resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; resource->res3.length = sizeof(struct acpi_resource); } /* Attempt to set the resource */ dprintk("Evaluating _SRS\n"); status = acpi_set_current_resources(device->handle, &buffer); /* check for total failure */ if (ACPI_FAILURE(status)) { pr_err("Error evaluating _SRS\n"); result = -ENODEV; goto end; } /* Necessary device initializations calls (from sonypi) */ sony_pic_call1(0x82); sony_pic_call2(0x81, 0xff); sony_pic_call1(compat ? 0x92 : 0x82); end: kfree(resource); return result; } /***************** * * ISR: some event is available * *****************/ static irqreturn_t sony_pic_irq(int irq, void *dev_id) { int i, j; u8 ev = 0; u8 data_mask = 0; u8 device_event = 0; struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; ev = inb_p(dev->cur_ioport->io1.minimum); if (dev->cur_ioport->io2.minimum) data_mask = inb_p(dev->cur_ioport->io2.minimum); else data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset); dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset); if (ev == 0x00 || ev == 0xff) return IRQ_HANDLED; for (i = 0; dev->event_types[i].mask; i++) { if ((data_mask & dev->event_types[i].data) != dev->event_types[i].data) continue; if (!(mask & dev->event_types[i].mask)) continue; for (j = 0; dev->event_types[i].events[j].event; j++) { if (ev == dev->event_types[i].events[j].data) { device_event = dev->event_types[i].events[j].event; /* some events may require ignoring */ if (!device_event) return IRQ_HANDLED; goto found; } } } /* Still not able to decode the event try to pass * it over to the minidriver */ if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0) return IRQ_HANDLED; dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset); return IRQ_HANDLED; found: sony_laptop_report_input_event(device_event); sonypi_compat_report_event(device_event); return IRQ_HANDLED; } /***************** * * ACPI driver * *****************/ static int sony_pic_remove(struct acpi_device *device) { struct sony_pic_ioport *io, *tmp_io; struct sony_pic_irq *irq, *tmp_irq; if (sony_pic_disable(device)) { pr_err("Couldn't disable device\n"); return -ENXIO; } free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); release_region(spic_dev.cur_ioport->io1.minimum, spic_dev.cur_ioport->io1.address_length); if (spic_dev.cur_ioport->io2.minimum) release_region(spic_dev.cur_ioport->io2.minimum, spic_dev.cur_ioport->io2.address_length); sonypi_compat_exit(); sony_laptop_remove_input(); /* pf attrs */ sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group); sony_pf_remove(); list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { list_del(&io->list); kfree(io); } list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { list_del(&irq->list); kfree(irq); } spic_dev.cur_ioport = NULL; spic_dev.cur_irq = NULL; dprintk(SONY_PIC_DRIVER_NAME " removed.\n"); return 0; } static int sony_pic_add(struct acpi_device *device) { int result; struct sony_pic_ioport *io, *tmp_io; struct sony_pic_irq *irq, *tmp_irq; spic_dev.acpi_dev = device; strcpy(acpi_device_class(device), "sony/hotkey"); sony_pic_detect_device_type(&spic_dev); mutex_init(&spic_dev.lock); /* read _PRS resources */ result = sony_pic_possible_resources(device); if (result) { pr_err("Unable to read possible resources\n"); goto err_free_resources; } /* setup input devices and helper fifo */ result = sony_laptop_setup_input(device); if (result) { pr_err("Unable to create input devices\n"); goto err_free_resources; } result = sonypi_compat_init(); if (result) goto err_remove_input; /* request io port */ list_for_each_entry_reverse(io, &spic_dev.ioports, list) { if (request_region(io->io1.minimum, io->io1.address_length, "Sony Programmable I/O Device")) { dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", io->io1.minimum, io->io1.maximum, io->io1.address_length); /* Type 1 have 2 ioports */ if (io->io2.minimum) { if (request_region(io->io2.minimum, io->io2.address_length, "Sony Programmable I/O Device")) { dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", io->io2.minimum, io->io2.maximum, io->io2.address_length); spic_dev.cur_ioport = io; break; } else { dprintk("Unable to get I/O port2: " "0x%.4x (0x%.4x) + 0x%.2x\n", io->io2.minimum, io->io2.maximum, io->io2.address_length); release_region(io->io1.minimum, io->io1.address_length); } } else { spic_dev.cur_ioport = io; break; } } } if (!spic_dev.cur_ioport) { pr_err("Failed to request_region\n"); result = -ENODEV; goto err_remove_compat; } /* request IRQ */ list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, 0, "sony-laptop", &spic_dev)) { dprintk("IRQ: %d - triggering: %d - " "polarity: %d - shr: %d\n", irq->irq.interrupts[0], irq->irq.triggering, irq->irq.polarity, irq->irq.sharable); spic_dev.cur_irq = irq; break; } } if (!spic_dev.cur_irq) { pr_err("Failed to request_irq\n"); result = -ENODEV; goto err_release_region; } /* set resource status _SRS */ result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); if (result) { pr_err("Couldn't enable device\n"); goto err_free_irq; } spic_dev.bluetooth_power = -1; /* create device attributes */ result = sony_pf_add(); if (result) goto err_disable_device; result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group); if (result) goto err_remove_pf; pr_info("SPIC setup done.\n"); return 0; err_remove_pf: sony_pf_remove(); err_disable_device: sony_pic_disable(device); err_free_irq: free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); err_release_region: release_region(spic_dev.cur_ioport->io1.minimum, spic_dev.cur_ioport->io1.address_length); if (spic_dev.cur_ioport->io2.minimum) release_region(spic_dev.cur_ioport->io2.minimum, spic_dev.cur_ioport->io2.address_length); err_remove_compat: sonypi_compat_exit(); err_remove_input: sony_laptop_remove_input(); err_free_resources: list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { list_del(&io->list); kfree(io); } list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { list_del(&irq->list); kfree(irq); } spic_dev.cur_ioport = NULL; spic_dev.cur_irq = NULL; return result; } #ifdef CONFIG_PM_SLEEP static int sony_pic_suspend(struct device *dev) { if (sony_pic_disable(to_acpi_device(dev))) return -ENXIO; return 0; } static int sony_pic_resume(struct device *dev) { sony_pic_enable(to_acpi_device(dev), spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); static const struct acpi_device_id sony_pic_device_ids[] = { {SONY_PIC_HID, 0}, {"", 0}, }; static struct acpi_driver sony_pic_driver = { .name = SONY_PIC_DRIVER_NAME, .class = SONY_PIC_CLASS, .ids = sony_pic_device_ids, .owner = THIS_MODULE, .ops = { .add = sony_pic_add, .remove = sony_pic_remove, }, .drv.pm = &sony_pic_pm, }; static struct dmi_system_id __initdata sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), }, }, { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), }, }, { } }; static int __init sony_laptop_init(void) { int result; if (!no_spic && dmi_check_system(sonypi_dmi_table)) { result = acpi_bus_register_driver(&sony_pic_driver); if (result) { pr_err("Unable to register SPIC driver\n"); goto out; } spic_drv_registered = 1; } result = acpi_bus_register_driver(&sony_nc_driver); if (result) { pr_err("Unable to register SNC driver\n"); goto out_unregister_pic; } return 0; out_unregister_pic: if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); out: return result; } static void __exit sony_laptop_exit(void) { acpi_bus_unregister_driver(&sony_nc_driver); if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); } module_init(sony_laptop_init); module_exit(sony_laptop_exit);
gpl-2.0
jpoimboe/linux
sound/isa/gus/gus_mixer.c
1113
6116
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of ICS 2101 chip and "mixer" in GF1 chip * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/wait.h> #include <sound/core.h> #include <sound/control.h> #include <sound/gus.h> /* * */ #define GF1_SINGLE(xname, xindex, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_gf1_info_single, \ .get = snd_gf1_get_single, .put = snd_gf1_put_single, \ .private_value = shift | (invert << 8) } #define snd_gf1_info_single snd_ctl_boolean_mono_info static int snd_gf1_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; ucontrol->value.integer.value[0] = (gus->mix_cntrl_reg >> shift) & 1; if (invert) ucontrol->value.integer.value[0] ^= 1; return 0; } static int snd_gf1_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; int change; unsigned char oval, nval; nval = ucontrol->value.integer.value[0] & 1; if (invert) nval ^= 1; nval <<= shift; spin_lock_irqsave(&gus->reg_lock, flags); oval = gus->mix_cntrl_reg; nval = (oval & ~(1 << shift)) | nval; change = nval != oval; outb(gus->mix_cntrl_reg = nval, GUSP(gus, MIXCNTRLREG)); outb(gus->gf1.active_voice = 0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } #define ICS_DOUBLE(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ics_info_double, \ .get = snd_ics_get_double, .put = snd_ics_put_double, \ .private_value = addr } static int snd_ics_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_ics_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; unsigned char left, right; spin_lock_irqsave(&gus->reg_lock, flags); left = gus->gf1.ics_regs[addr][0]; right = gus->gf1.ics_regs[addr][1]; spin_unlock_irqrestore(&gus->reg_lock, flags); ucontrol->value.integer.value[0] = left & 127; ucontrol->value.integer.value[1] = right & 127; return 0; } static int snd_ics_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; int change; unsigned char val1, val2, oval1, oval2; val1 = ucontrol->value.integer.value[0] & 127; val2 = ucontrol->value.integer.value[1] & 127; spin_lock_irqsave(&gus->reg_lock, flags); oval1 = gus->gf1.ics_regs[addr][0]; oval2 = gus->gf1.ics_regs[addr][1]; change = val1 != oval1 || val2 != oval2; gus->gf1.ics_regs[addr][0] = val1; gus->gf1.ics_regs[addr][1] = val2; if (gus->ics_flag && gus->ics_flipped && (addr == SNDRV_ICS_GF1_DEV || addr == SNDRV_ICS_MASTER_DEV)) swap(val1, val2); addr <<= 3; outb(addr | 0, GUSP(gus, MIXCNTRLPORT)); outb(1, GUSP(gus, MIXDATAPORT)); outb(addr | 2, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val1, GUSP(gus, MIXDATAPORT)); outb(addr | 1, GUSP(gus, MIXCNTRLPORT)); outb(2, GUSP(gus, MIXDATAPORT)); outb(addr | 3, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val2, GUSP(gus, MIXDATAPORT)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_gf1_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), GF1_SINGLE("Line Switch", 0, 0, 1), GF1_SINGLE("Mic Switch", 0, 2, 0) }; static struct snd_kcontrol_new snd_ics_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), ICS_DOUBLE("Master Playback Volume", 0, SNDRV_ICS_MASTER_DEV), ICS_DOUBLE("Synth Playback Volume", 0, SNDRV_ICS_GF1_DEV), GF1_SINGLE("Line Switch", 0, 0, 1), ICS_DOUBLE("Line Playback Volume", 0, SNDRV_ICS_LINE_DEV), GF1_SINGLE("Mic Switch", 0, 2, 0), ICS_DOUBLE("Mic Playback Volume", 0, SNDRV_ICS_MIC_DEV), ICS_DOUBLE("CD Playback Volume", 0, SNDRV_ICS_CD_DEV) }; int snd_gf1_new_mixer(struct snd_gus_card * gus) { struct snd_card *card; unsigned int idx, max; int err; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; if (gus->ics_flag) snd_component_add(card, "ICS2101"); if (card->mixername[0] == '\0') { strcpy(card->mixername, gus->ics_flag ? "GF1,ICS2101" : "GF1"); } else { if (gus->ics_flag) strcat(card->mixername, ",ICS2101"); strcat(card->mixername, ",GF1"); } if (!gus->ics_flag) { max = gus->ess_flag ? 1 : ARRAY_SIZE(snd_gf1_controls); for (idx = 0; idx < max; idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus))) < 0) return err; } } else { for (idx = 0; idx < ARRAY_SIZE(snd_ics_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus))) < 0) return err; } } return 0; }
gpl-2.0
SteveLinCH/linux
drivers/media/radio/wl128x/fmdrv_tx.c
1625
9322
/* * FM Driver for Connectivity chip of Texas Instruments. * This sub-module of FM driver implements FM TX functionality. * * Copyright (C) 2011 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include "fmdrv.h" #include "fmdrv_common.h" #include "fmdrv_tx.h" int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode) { u16 payload; int ret; if (fmdev->tx_data.aud_mode == mode) return 0; fmdbg("stereo mode: %d\n", mode); /* Set Stereo/Mono mode */ payload = (1 - mode); ret = fmc_send_cmd(fmdev, MONO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fmdev->tx_data.aud_mode = mode; return ret; } static int set_rds_text(struct fmdev *fmdev, u8 *rds_text) { u16 payload; int ret; ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text, strlen(rds_text), NULL, NULL); if (ret < 0) return ret; /* Scroll mode */ payload = (u16)0x1; ret = fmc_send_cmd(fmdev, DISPLAY_MODE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } static int set_rds_data_mode(struct fmdev *fmdev, u8 mode) { u16 payload; int ret; /* Setting unique PI TODO: how unique? */ payload = (u16)0xcafe; ret = fmc_send_cmd(fmdev, PI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set decoder id */ payload = (u16)0xa; ret = fmc_send_cmd(fmdev, DI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: RDS_MODE_GET? */ return 0; } static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len) { u16 payload; int ret; len |= type << 8; payload = len; ret = fmc_send_cmd(fmdev, RDS_CONFIG_DATA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: LENGTH_GET? */ return 0; } int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis) { u16 payload; int ret; u8 rds_text[] = "Zoom2\n"; fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis, FM_RDS_ENABLE, FM_RDS_DISABLE); if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); } /* Send command to enable RDS */ if (rds_en_dis == FM_RDS_ENABLE) payload = 0x01; else payload = 0x00; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); } fmdev->tx_data.rds.flag = rds_en_dis; return 0; } int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fm_tx_set_rds_mode(fmdev, 0); /* Set RDS length */ set_rds_len(fmdev, rds_type, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); payload = 1; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_af(struct fmdev *fmdev, u32 af) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("AF: %d\n", af); af = (af - 87500) / 100; payload = (u16)af; ret = fmc_send_cmd(fmdev, TA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_region(struct fmdev *fmdev, u8 region) { u16 payload; int ret; if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) { fmerr("Invalid band\n"); return -EINVAL; } /* Send command to set the band */ payload = (u16)region; ret = fmc_send_cmd(fmdev, TX_BAND_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset) { u16 payload; int ret; fmdbg("tx: mute mode %d\n", mute_mode_toset); payload = mute_mode_toset; ret = fmc_send_cmd(fmdev, MUTE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } /* Set TX Audio I/O */ static int set_audio_io(struct fmdev *fmdev) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; /* Set Audio I/O Enable */ payload = tx->audio_io; ret = fmc_send_cmd(fmdev, AUDIO_IO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is audio set? */ return 0; } /* Start TX Transmission */ static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state) { struct fmtx_data *tx = &fmdev->tx_data; unsigned long timeleft; u16 payload; int ret; /* Enable POWER_ENB interrupts */ payload = FM_POW_ENB_EVENT; ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set Power Enable */ payload = new_xmit_state; ret = fmc_send_cmd(fmdev, POWER_ENB_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Wait for Power Enabled */ init_completion(&fmdev->maintask_comp); timeleft = wait_for_completion_timeout(&fmdev->maintask_comp, FM_DRV_TX_TIMEOUT); if (!timeleft) { fmerr("Timeout(%d sec),didn't get tune ended interrupt\n", jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); return -ETIMEDOUT; } set_bit(FM_CORE_TX_XMITING, &fmdev->flag); tx->xmit_state = new_xmit_state; return 0; } /* Set TX power level */ int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl) { u16 payload; struct fmtx_data *tx = &fmdev->tx_data; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl); /* If the core isn't ready update global variable */ if (!test_bit(FM_CORE_READY, &fmdev->flag)) { tx->pwr_lvl = new_pwr_lvl; return 0; } /* Set power level: Application will specify power level value in * units of dB/uV, whereas range and step are specific to FM chip. * For TI's WL chips, convert application specified power level value * to chip specific value by subtracting 122 from it. Refer to TI FM * data sheet for details. * */ payload = (FM_PWR_LVL_HIGH - new_pwr_lvl); ret = fmc_send_cmd(fmdev, POWER_LEV_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is the power level set? */ tx->pwr_lvl = new_pwr_lvl; return 0; } /* * Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us) * Convert V4L2 specified filter values to chip specific filter values. */ int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; switch (preemphasis) { case V4L2_PREEMPHASIS_DISABLED: payload = FM_TX_PREEMPH_OFF; break; case V4L2_PREEMPHASIS_50_uS: payload = FM_TX_PREEMPH_50US; break; case V4L2_PREEMPHASIS_75_uS: payload = FM_TX_PREEMPH_75US; break; } ret = fmc_send_cmd(fmdev, PREMPH_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->preemph = payload; return ret; } /* Get the TX tuning capacitor value.*/ int fm_tx_get_tune_cap_val(struct fmdev *fmdev) { u16 curr_val; u32 resp_len; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; ret = fmc_send_cmd(fmdev, READ_FMANT_TUNE_VALUE, REG_RD, NULL, sizeof(curr_val), &curr_val, &resp_len); if (ret < 0) return ret; curr_val = be16_to_cpu((__force __be16)curr_val); return curr_val; } /* Set TX Frequency */ int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload, chanl_index; int ret; if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) { enable_xmit(fmdev, 0); clear_bit(FM_CORE_TX_XMITING, &fmdev->flag); } /* Enable FR, BL interrupts */ payload = (FM_FR_EVENT | FM_BL_EVENT); ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->tx_frq = (unsigned long)freq_to_set; fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq); chanl_index = freq_to_set / 10; /* Set current tuner channel */ payload = chanl_index; ret = fmc_send_cmd(fmdev, CHANL_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl); fm_tx_set_preemph_filter(fmdev, tx->preemph); tx->audio_io = 0x01; /* I2S */ set_audio_io(fmdev); enable_xmit(fmdev, 0x01); /* Enable transmission */ tx->aud_mode = FM_STEREO_MODE; tx->rds.flag = FM_RDS_DISABLE; return 0; }
gpl-2.0
BuzzBumbleBee/linux-htc-acevivo
fs/ntfs/runlist.c
1881
60881
/** * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. * * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002-2005 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "debug.h" #include "dir.h" #include "endian.h" #include "malloc.h" #include "ntfs.h" /** * ntfs_rl_mm - runlist memmove * * It is up to the caller to serialize access to the runlist @base. */ static inline void ntfs_rl_mm(runlist_element *base, int dst, int src, int size) { if (likely((dst != src) && (size > 0))) memmove(base + dst, base + src, size * sizeof(*base)); } /** * ntfs_rl_mc - runlist memory copy * * It is up to the caller to serialize access to the runlists @dstbase and * @srcbase. */ static inline void ntfs_rl_mc(runlist_element *dstbase, int dst, runlist_element *srcbase, int src, int size) { if (likely(size > 0)) memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase)); } /** * ntfs_rl_realloc - Reallocate memory for runlists * @rl: original runlist * @old_size: number of runlist elements in the original runlist @rl * @new_size: number of runlist elements we need space for * * As the runlists grow, more memory will be required. To prevent the * kernel having to allocate and reallocate large numbers of small bits of * memory, this function returns an entire page of memory. * * It is up to the caller to serialize access to the runlist @rl. * * N.B. If the new allocation doesn't require a different number of pages in * memory, the function will return the original pointer. * * On success, return a pointer to the newly allocated, or recycled, memory. * On error, return -errno. The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, int old_size, int new_size) { runlist_element *new_rl; old_size = PAGE_ALIGN(old_size * sizeof(*rl)); new_size = PAGE_ALIGN(new_size * sizeof(*rl)); if (old_size == new_size) return rl; new_rl = ntfs_malloc_nofs(new_size); if (unlikely(!new_rl)) return ERR_PTR(-ENOMEM); if (likely(rl != NULL)) { if (unlikely(old_size > new_size)) old_size = new_size; memcpy(new_rl, rl, old_size); ntfs_free(rl); } return new_rl; } /** * ntfs_rl_realloc_nofail - Reallocate memory for runlists * @rl: original runlist * @old_size: number of runlist elements in the original runlist @rl * @new_size: number of runlist elements we need space for * * As the runlists grow, more memory will be required. To prevent the * kernel having to allocate and reallocate large numbers of small bits of * memory, this function returns an entire page of memory. * * This function guarantees that the allocation will succeed. It will sleep * for as long as it takes to complete the allocation. * * It is up to the caller to serialize access to the runlist @rl. * * N.B. If the new allocation doesn't require a different number of pages in * memory, the function will return the original pointer. * * On success, return a pointer to the newly allocated, or recycled, memory. * On error, return -errno. The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, int old_size, int new_size) { runlist_element *new_rl; old_size = PAGE_ALIGN(old_size * sizeof(*rl)); new_size = PAGE_ALIGN(new_size * sizeof(*rl)); if (old_size == new_size) return rl; new_rl = ntfs_malloc_nofs_nofail(new_size); BUG_ON(!new_rl); if (likely(rl != NULL)) { if (unlikely(old_size > new_size)) old_size = new_size; memcpy(new_rl, rl, old_size); ntfs_free(rl); } return new_rl; } /** * ntfs_are_rl_mergeable - test if two runlists can be joined together * @dst: original runlist * @src: new runlist to test for mergeability with @dst * * Test if two runlists can be joined together. For this, their VCNs and LCNs * must be adjacent. * * It is up to the caller to serialize access to the runlists @dst and @src. * * Return: true Success, the runlists can be merged. * false Failure, the runlists cannot be merged. */ static inline bool ntfs_are_rl_mergeable(runlist_element *dst, runlist_element *src) { BUG_ON(!dst); BUG_ON(!src); /* We can merge unmapped regions even if they are misaligned. */ if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) return true; /* If the runs are misaligned, we cannot merge them. */ if ((dst->vcn + dst->length) != src->vcn) return false; /* If both runs are non-sparse and contiguous, we can merge them. */ if ((dst->lcn >= 0) && (src->lcn >= 0) && ((dst->lcn + dst->length) == src->lcn)) return true; /* If we are merging two holes, we can merge them. */ if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) return true; /* Cannot merge. */ return false; } /** * __ntfs_rl_merge - merge two runlists without testing if they can be merged * @dst: original, destination runlist * @src: new runlist to merge with @dst * * Merge the two runlists, writing into the destination runlist @dst. The * caller must make sure the runlists can be merged or this will corrupt the * destination runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. */ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) { dst->length += src->length; } /** * ntfs_rl_append - append a runlist after a given element * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: runlist to be inserted into @dst * @ssize: number of elements in @src (excluding end marker) * @loc: append the new runlist @src after this element in @dst * * Append the runlist @src after element @loc in @dst. Merge the right end of * the new runlist, if necessary. Adjust the size of the hole before the * appended runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_append(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { bool right = false; /* Right end of @src needs merging. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* First, check if the right hand end needs merging. */ if ((loc + 1) < dsize) right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); /* Space required: @dst size + @src size, less one if we merged. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* First, merge the right hand end, if necessary. */ if (right) __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); /* First run after the @src runs that have been inserted. */ marker = loc + ssize + 1; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); ntfs_rl_mc(dst, loc + 1, src, 0, ssize); /* Adjust the size of the preceding hole. */ dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; /* We may have changed the length of the file, so fix the end marker */ if (dst[marker].lcn == LCN_ENOENT) dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; return dst; } /** * ntfs_rl_insert - insert a runlist into another * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: insert the new runlist @src before this element in @dst * * Insert the runlist @src before element @loc in the runlist @dst. Merge the * left end of the new runlist, if necessary. Adjust the size of the hole * after the inserted runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { bool left = false; /* Left end of @src needs merging. */ bool disc = false; /* Discontinuity between @dst and @src. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* * disc => Discontinuity between the end of @dst and the start of @src. * This means we might need to insert a "not mapped" run. */ if (loc == 0) disc = (src[0].vcn > 0); else { s64 merged_length; left = ntfs_are_rl_mergeable(dst + loc - 1, src); merged_length = dst[loc - 1].length; if (left) merged_length += src->length; disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); } /* * Space required: @dst size + @src size, less one if we merged, plus * one if there was a discontinuity. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlist. */ if (left) __ntfs_rl_merge(dst + loc - 1, src); /* * First run after the @src runs that have been inserted. * Nominally, @marker equals @loc + @ssize, i.e. location + number of * runs in @src. However, if @left, then the first run in @src has * been merged with one in @dst. And if @disc, then @dst and @src do * not meet and we need an extra run to fill the gap. */ marker = loc + ssize - left + disc; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, loc, dsize - loc); ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); /* Adjust the VCN of the first run after the insertion... */ dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; /* ... and the length. */ if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED) dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; /* Writing beyond the end of the file and there is a discontinuity. */ if (disc) { if (loc > 0) { dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; } else { dst[loc].vcn = 0; dst[loc].length = dst[loc + 1].vcn; } dst[loc].lcn = LCN_RL_NOT_MAPPED; } return dst; } /** * ntfs_rl_replace - overwrite a runlist element with another runlist * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: index in runlist @dst to overwrite with @src * * Replace the runlist element @dst at @loc with @src. Merge the left and * right ends of the inserted runlist, if necessary. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { signed delta; bool left = false; /* Left end of @src needs merging. */ bool right = false; /* Right end of @src needs merging. */ int tail; /* Start of tail of @dst. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* First, see if the left and right ends need merging. */ if ((loc + 1) < dsize) right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); if (loc > 0) left = ntfs_are_rl_mergeable(dst + loc - 1, src); /* * Allocate some space. We will need less if the left, right, or both * ends get merged. The -1 accounts for the run being replaced. */ delta = ssize - 1 - left - right; if (delta > 0) { dst = ntfs_rl_realloc(dst, dsize, dsize + delta); if (IS_ERR(dst)) return dst; } /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* First, merge the left and right ends, if necessary. */ if (right) __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); if (left) __ntfs_rl_merge(dst + loc - 1, src); /* * Offset of the tail of @dst. This needs to be moved out of the way * to make space for the runs to be copied from @src, i.e. the first * run of the tail of @dst. * Nominally, @tail equals @loc + 1, i.e. location, skipping the * replaced run. However, if @right, then one of @dst's runs is * already merged into @src. */ tail = loc + right + 1; /* * First run after the @src runs that have been inserted, i.e. where * the tail of @dst needs to be moved to. * Nominally, @marker equals @loc + @ssize, i.e. location + number of * runs in @src. However, if @left, then the first run in @src has * been merged with one in @dst. */ marker = loc + ssize - left; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, tail, dsize - tail); ntfs_rl_mc(dst, loc, src, left, ssize - left); /* We may have changed the length of the file, so fix the end marker. */ if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; return dst; } /** * ntfs_rl_split - insert a runlist into the centre of a hole * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: index in runlist @dst at which to split and insert @src * * Split the runlist @dst at @loc into two and insert @new in between the two * fragments. No merging of runlists is necessary. Adjust the size of the * holes either side. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { BUG_ON(!dst); BUG_ON(!src); /* Space required: @dst size + @src size + one new hole. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc); ntfs_rl_mc(dst, loc + 1, src, 0, ssize); /* Adjust the size of the holes either size of @src. */ dst[loc].length = dst[loc+1].vcn - dst[loc].vcn; dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length; dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn; return dst; } /** * ntfs_runlists_merge - merge two runlists into one * @drl: original runlist to be worked on * @srl: new runlist to be merged into @drl * * First we sanity check the two runlists @srl and @drl to make sure that they * are sensible and can be merged. The runlist @srl must be either after the * runlist @drl or completely within a hole (or unmapped region) in @drl. * * It is up to the caller to serialize access to the runlists @drl and @srl. * * Merging of runlists is necessary in two cases: * 1. When attribute lists are used and a further extent is being mapped. * 2. When new clusters are allocated to fill a hole or extend a file. * * There are four possible ways @srl can be merged. It can: * - be inserted at the beginning of a hole, * - split the hole in two and be inserted between the two fragments, * - be appended at the end of a hole, or it can * - replace the whole hole. * It can also be appended to the end of the runlist, which is just a variant * of the insert case. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @drl and @srl are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. * -ERANGE - The runlists overlap and cannot be merged. */ runlist_element *ntfs_runlists_merge(runlist_element *drl, runlist_element *srl) { int di, si; /* Current index into @[ds]rl. */ int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */ int dins; /* Index into @drl at which to insert @srl. */ int dend, send; /* Last index into @[ds]rl. */ int dfinal, sfinal; /* The last index into @[ds]rl with lcn >= LCN_HOLE. */ int marker = 0; VCN marker_vcn = 0; #ifdef DEBUG ntfs_debug("dst:"); ntfs_debug_dump_runlist(drl); ntfs_debug("src:"); ntfs_debug_dump_runlist(srl); #endif /* Check for silly calling... */ if (unlikely(!srl)) return drl; if (IS_ERR(srl) || IS_ERR(drl)) return ERR_PTR(-EINVAL); /* Check for the case where the first mapping is being done now. */ if (unlikely(!drl)) { drl = srl; /* Complete the source runlist if necessary. */ if (unlikely(drl[0].vcn)) { /* Scan to the end of the source runlist. */ for (dend = 0; likely(drl[dend].length); dend++) ; dend++; drl = ntfs_rl_realloc(drl, dend, dend + 1); if (IS_ERR(drl)) return drl; /* Insert start element at the front of the runlist. */ ntfs_rl_mm(drl, 1, 0, dend); drl[0].vcn = 0; drl[0].lcn = LCN_RL_NOT_MAPPED; drl[0].length = drl[1].vcn; } goto finished; } si = di = 0; /* Skip any unmapped start element(s) in the source runlist. */ while (srl[si].length && srl[si].lcn < LCN_HOLE) si++; /* Can't have an entirely unmapped source runlist. */ BUG_ON(!srl[si].length); /* Record the starting points. */ sstart = si; /* * Skip forward in @drl until we reach the position where @srl needs to * be inserted. If we reach the end of @drl, @srl just needs to be * appended to @drl. */ for (; drl[di].length; di++) { if (drl[di].vcn + drl[di].length > srl[sstart].vcn) break; } dins = di; /* Sanity check for illegal overlaps. */ if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) && (srl[si].lcn >= 0)) { ntfs_error(NULL, "Run lists overlap. Cannot merge!"); return ERR_PTR(-ERANGE); } /* Scan to the end of both runlists in order to know their sizes. */ for (send = si; srl[send].length; send++) ; for (dend = di; drl[dend].length; dend++) ; if (srl[send].lcn == LCN_ENOENT) marker_vcn = srl[marker = send].vcn; /* Scan to the last element with lcn >= LCN_HOLE. */ for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--) ; for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--) ; { bool start; bool finish; int ds = dend + 1; /* Number of elements in drl & srl */ int ss = sfinal - sstart + 1; start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */ (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */ finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */ ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ (srl[send - 1].vcn + srl[send - 1].length))); /* Or we will lose an end marker. */ if (finish && !drl[dins].length) ss++; if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) finish = false; #if 0 ntfs_debug("dfinal = %i, dend = %i", dfinal, dend); ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send); ntfs_debug("start = %i, finish = %i", start, finish); ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins); #endif if (start) { if (finish) drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins); else drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins); } else { if (finish) drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins); else drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins); } if (IS_ERR(drl)) { ntfs_error(NULL, "Merge failed."); return drl; } ntfs_free(srl); if (marker) { ntfs_debug("Triggering marker code."); for (ds = dend; drl[ds].length; ds++) ; /* We only need to care if @srl ended after @drl. */ if (drl[ds].vcn <= marker_vcn) { int slots = 0; if (drl[ds].vcn == marker_vcn) { ntfs_debug("Old marker = 0x%llx, replacing " "with LCN_ENOENT.", (unsigned long long) drl[ds].lcn); drl[ds].lcn = LCN_ENOENT; goto finished; } /* * We need to create an unmapped runlist element in * @drl or extend an existing one before adding the * ENOENT terminator. */ if (drl[ds].lcn == LCN_ENOENT) { ds--; slots = 1; } if (drl[ds].lcn != LCN_RL_NOT_MAPPED) { /* Add an unmapped runlist element. */ if (!slots) { drl = ntfs_rl_realloc_nofail(drl, ds, ds + 2); slots = 2; } ds++; /* Need to set vcn if it isn't set already. */ if (slots != 1) drl[ds].vcn = drl[ds - 1].vcn + drl[ds - 1].length; drl[ds].lcn = LCN_RL_NOT_MAPPED; /* We now used up a slot. */ slots--; } drl[ds].length = marker_vcn - drl[ds].vcn; /* Finally add the ENOENT terminator. */ ds++; if (!slots) drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1); drl[ds].vcn = marker_vcn; drl[ds].lcn = LCN_ENOENT; drl[ds].length = (s64)0; } } } finished: /* The merge was completed successfully. */ ntfs_debug("Merged runlist:"); ntfs_debug_dump_runlist(drl); return drl; } /** * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist * @vol: ntfs volume on which the attribute resides * @attr: attribute record whose mapping pairs array to decompress * @old_rl: optional runlist in which to insert @attr's runlist * * It is up to the caller to serialize access to the runlist @old_rl. * * Decompress the attribute @attr's mapping pairs array into a runlist. On * success, return the decompressed runlist. * * If @old_rl is not NULL, decompressed runlist is inserted into the * appropriate place in @old_rl and the resultant, combined runlist is * returned. The original @old_rl is deallocated. * * On error, return -errno. @old_rl is left unmodified in that case. * * The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EIO - Corrupt runlist. * -EINVAL - Invalid parameters were passed in. * -ERANGE - The two runlists overlap. * * FIXME: For now we take the conceptionally simplest approach of creating the * new runlist disregarding the already existing one and then splicing the * two into one, if that is possible (we check for overlap and discard the new * runlist if overlap present before returning ERR_PTR(-ERANGE)). */ runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol, const ATTR_RECORD *attr, runlist_element *old_rl) { VCN vcn; /* Current vcn. */ LCN lcn; /* Current lcn. */ s64 deltaxcn; /* Change in [vl]cn. */ runlist_element *rl; /* The output runlist. */ u8 *buf; /* Current position in mapping pairs array. */ u8 *attr_end; /* End of attribute. */ int rlsize; /* Size of runlist buffer. */ u16 rlpos; /* Current runlist position in units of runlist_elements. */ u8 b; /* Current byte offset in buf. */ #ifdef DEBUG /* Make sure attr exists and is non-resident. */ if (!attr || !attr->non_resident || sle64_to_cpu( attr->data.non_resident.lowest_vcn) < (VCN)0) { ntfs_error(vol->sb, "Invalid arguments."); return ERR_PTR(-EINVAL); } #endif /* Start at vcn = lowest_vcn and lcn 0. */ vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn); lcn = 0; /* Get start of the mapping pairs array. */ buf = (u8*)attr + le16_to_cpu( attr->data.non_resident.mapping_pairs_offset); attr_end = (u8*)attr + le32_to_cpu(attr->length); if (unlikely(buf < (u8*)attr || buf > attr_end)) { ntfs_error(vol->sb, "Corrupt attribute."); return ERR_PTR(-EIO); } /* If the mapping pairs array is valid but empty, nothing to do. */ if (!vcn && !*buf) return old_rl; /* Current position in runlist array. */ rlpos = 0; /* Allocate first page and set current runlist size to one page. */ rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE); if (unlikely(!rl)) return ERR_PTR(-ENOMEM); /* Insert unmapped starting element if necessary. */ if (vcn) { rl->vcn = 0; rl->lcn = LCN_RL_NOT_MAPPED; rl->length = vcn; rlpos++; } while (buf < attr_end && *buf) { /* * Allocate more memory if needed, including space for the * not-mapped and terminator elements. ntfs_malloc_nofs() * operates on whole pages only. */ if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) { runlist_element *rl2; rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE); if (unlikely(!rl2)) { ntfs_free(rl); return ERR_PTR(-ENOMEM); } memcpy(rl2, rl, rlsize); ntfs_free(rl); rl = rl2; rlsize += PAGE_SIZE; } /* Enter the current vcn into the current runlist element. */ rl[rlpos].vcn = vcn; /* * Get the change in vcn, i.e. the run length in clusters. * Doing it this way ensures that we signextend negative values. * A negative run length doesn't make any sense, but hey, I * didn't make up the NTFS specs and Windows NT4 treats the run * length as a signed value so that's how it is... */ b = *buf & 0xf; if (b) { if (unlikely(buf + b > attr_end)) goto io_error; for (deltaxcn = (s8)buf[b--]; b; b--) deltaxcn = (deltaxcn << 8) + buf[b]; } else { /* The length entry is compulsory. */ ntfs_error(vol->sb, "Missing length entry in mapping " "pairs array."); deltaxcn = (s64)-1; } /* * Assume a negative length to indicate data corruption and * hence clean-up and return NULL. */ if (unlikely(deltaxcn < 0)) { ntfs_error(vol->sb, "Invalid length in mapping pairs " "array."); goto err_out; } /* * Enter the current run length into the current runlist * element. */ rl[rlpos].length = deltaxcn; /* Increment the current vcn by the current run length. */ vcn += deltaxcn; /* * There might be no lcn change at all, as is the case for * sparse clusters on NTFS 3.0+, in which case we set the lcn * to LCN_HOLE. */ if (!(*buf & 0xf0)) rl[rlpos].lcn = LCN_HOLE; else { /* Get the lcn change which really can be negative. */ u8 b2 = *buf & 0xf; b = b2 + ((*buf >> 4) & 0xf); if (buf + b > attr_end) goto io_error; for (deltaxcn = (s8)buf[b--]; b > b2; b--) deltaxcn = (deltaxcn << 8) + buf[b]; /* Change the current lcn to its new value. */ lcn += deltaxcn; #ifdef DEBUG /* * On NTFS 1.2-, apparently can have lcn == -1 to * indicate a hole. But we haven't verified ourselves * whether it is really the lcn or the deltaxcn that is * -1. So if either is found give us a message so we * can investigate it further! */ if (vol->major_ver < 3) { if (unlikely(deltaxcn == (LCN)-1)) ntfs_error(vol->sb, "lcn delta == -1"); if (unlikely(lcn == (LCN)-1)) ntfs_error(vol->sb, "lcn == -1"); } #endif /* Check lcn is not below -1. */ if (unlikely(lcn < (LCN)-1)) { ntfs_error(vol->sb, "Invalid LCN < -1 in " "mapping pairs array."); goto err_out; } /* Enter the current lcn into the runlist element. */ rl[rlpos].lcn = lcn; } /* Get to the next runlist element. */ rlpos++; /* Increment the buffer position to the next mapping pair. */ buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1; } if (unlikely(buf >= attr_end)) goto io_error; /* * If there is a highest_vcn specified, it must be equal to the final * vcn in the runlist - 1, or something has gone badly wrong. */ deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn); if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) { mpa_err: ntfs_error(vol->sb, "Corrupt mapping pairs array in " "non-resident attribute."); goto err_out; } /* Setup not mapped runlist element if this is the base extent. */ if (!attr->data.non_resident.lowest_vcn) { VCN max_cluster; max_cluster = ((sle64_to_cpu( attr->data.non_resident.allocated_size) + vol->cluster_size - 1) >> vol->cluster_size_bits) - 1; /* * A highest_vcn of zero means this is a single extent * attribute so simply terminate the runlist with LCN_ENOENT). */ if (deltaxcn) { /* * If there is a difference between the highest_vcn and * the highest cluster, the runlist is either corrupt * or, more likely, there are more extents following * this one. */ if (deltaxcn < max_cluster) { ntfs_debug("More extents to follow; deltaxcn " "= 0x%llx, max_cluster = " "0x%llx", (unsigned long long)deltaxcn, (unsigned long long) max_cluster); rl[rlpos].vcn = vcn; vcn += rl[rlpos].length = max_cluster - deltaxcn; rl[rlpos].lcn = LCN_RL_NOT_MAPPED; rlpos++; } else if (unlikely(deltaxcn > max_cluster)) { ntfs_error(vol->sb, "Corrupt attribute. " "deltaxcn = 0x%llx, " "max_cluster = 0x%llx", (unsigned long long)deltaxcn, (unsigned long long) max_cluster); goto mpa_err; } } rl[rlpos].lcn = LCN_ENOENT; } else /* Not the base extent. There may be more extents to follow. */ rl[rlpos].lcn = LCN_RL_NOT_MAPPED; /* Setup terminating runlist element. */ rl[rlpos].vcn = vcn; rl[rlpos].length = (s64)0; /* If no existing runlist was specified, we are done. */ if (!old_rl) { ntfs_debug("Mapping pairs array successfully decompressed:"); ntfs_debug_dump_runlist(rl); return rl; } /* Now combine the new and old runlists checking for overlaps. */ old_rl = ntfs_runlists_merge(old_rl, rl); if (likely(!IS_ERR(old_rl))) return old_rl; ntfs_free(rl); ntfs_error(vol->sb, "Failed to merge runlists."); return old_rl; io_error: ntfs_error(vol->sb, "Corrupt attribute."); err_out: ntfs_free(rl); return ERR_PTR(-EIO); } /** * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist * @rl: runlist to use for conversion * @vcn: vcn to convert * * Convert the virtual cluster number @vcn of an attribute into a logical * cluster number (lcn) of a device using the runlist @rl to map vcns to their * corresponding lcns. * * It is up to the caller to serialize access to the runlist @rl. * * Since lcns must be >= 0, we use negative return codes with special meaning: * * Return code Meaning / Description * ================================================== * LCN_HOLE Hole / not allocated on disk. * LCN_RL_NOT_MAPPED This is part of the runlist which has not been * inserted into the runlist yet. * LCN_ENOENT There is no such vcn in the attribute. * * Locking: - The caller must have locked the runlist (for reading or writing). * - This function does not touch the lock, nor does it modify the * runlist. */ LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn) { int i; BUG_ON(vcn < 0); /* * If rl is NULL, assume that we have found an unmapped runlist. The * caller can then attempt to map it and fail appropriately if * necessary. */ if (unlikely(!rl)) return LCN_RL_NOT_MAPPED; /* Catch out of lower bounds vcn. */ if (unlikely(vcn < rl[0].vcn)) return LCN_ENOENT; for (i = 0; likely(rl[i].length); i++) { if (unlikely(vcn < rl[i+1].vcn)) { if (likely(rl[i].lcn >= (LCN)0)) return rl[i].lcn + (vcn - rl[i].vcn); return rl[i].lcn; } } /* * The terminator element is setup to the correct value, i.e. one of * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT. */ if (likely(rl[i].lcn < (LCN)0)) return rl[i].lcn; /* Just in case... We could replace this with BUG() some day. */ return LCN_ENOENT; } #ifdef NTFS_RW /** * ntfs_rl_find_vcn_nolock - find a vcn in a runlist * @rl: runlist to search * @vcn: vcn to find * * Find the virtual cluster number @vcn in the runlist @rl and return the * address of the runlist element containing the @vcn on success. * * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of * the runlist. * * Locking: The runlist must be locked on entry. */ runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn) { BUG_ON(vcn < 0); if (unlikely(!rl || vcn < rl[0].vcn)) return NULL; while (likely(rl->length)) { if (unlikely(vcn < rl[1].vcn)) { if (likely(rl->lcn >= LCN_HOLE)) return rl; return NULL; } rl++; } if (likely(rl->lcn == LCN_ENOENT)) return rl; return NULL; } /** * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number * @n: number for which to get the number of bytes for * * Return the number of bytes required to store @n unambiguously as * a signed number. * * This is used in the context of the mapping pairs array to determine how * many bytes will be needed in the array to store a given logical cluster * number (lcn) or a specific run length. * * Return the number of bytes written. This function cannot fail. */ static inline int ntfs_get_nr_significant_bytes(const s64 n) { s64 l = n; int i; s8 j; i = 0; do { l >>= 8; i++; } while (l != 0 && l != -1); j = (n >> 8 * (i - 1)) & 0xff; /* If the sign bit is wrong, we need an extra byte. */ if ((n < 0 && j >= 0) || (n > 0 && j < 0)) i++; return i; } /** * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array * @vol: ntfs volume (needed for the ntfs version) * @rl: locked runlist to determine the size of the mapping pairs of * @first_vcn: first vcn which to include in the mapping pairs array * @last_vcn: last vcn which to include in the mapping pairs array * * Walk the locked runlist @rl and calculate the size in bytes of the mapping * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and * finishing with vcn @last_vcn. * * A @last_vcn of -1 means end of runlist and in that case the size of the * mapping pairs array corresponding to the runlist starting at vcn @first_vcn * and finishing at the end of the runlist is determined. * * This for example allows us to allocate a buffer of the right size when * building the mapping pairs array. * * If @rl is NULL, just return 1 (for the single terminator byte). * * Return the calculated size in bytes on success. On error, return -errno. * The following error codes are defined: * -EINVAL - Run list contains unmapped elements. Make sure to only pass * fully mapped runlists to this function. * -EIO - The runlist is corrupt. * * Locking: @rl must be locked on entry (either for reading or writing), it * remains locked throughout, and is left locked upon return. */ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn) { LCN prev_lcn; int rls; bool the_end = false; BUG_ON(first_vcn < 0); BUG_ON(last_vcn < -1); BUG_ON(last_vcn >= 0 && first_vcn > last_vcn); if (!rl) { BUG_ON(first_vcn); BUG_ON(last_vcn > 0); return 1; } /* Skip to runlist element containing @first_vcn. */ while (rl->length && first_vcn >= rl[1].vcn) rl++; if (unlikely((!rl->length && first_vcn > rl->vcn) || first_vcn < rl->vcn)) return -EINVAL; prev_lcn = 0; /* Always need the termining zero byte. */ rls = 1; /* Do the first partial run if present. */ if (first_vcn > rl->vcn) { s64 delta, length = rl->length; /* We know rl->length != 0 already. */ if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } delta = first_vcn - rl->vcn; /* Header byte + length. */ rls += 1 + ntfs_get_nr_significant_bytes(length - delta); /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just store the lcn. * Note: this assumes that on NTFS 1.2-, holes are stored with * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { prev_lcn = rl->lcn; if (likely(rl->lcn >= 0)) prev_lcn += delta; /* Change in lcn. */ rls += ntfs_get_nr_significant_bytes(prev_lcn); } /* Go to next runlist element. */ rl++; } /* Do the full runs. */ for (; rl->length && !the_end; rl++) { s64 length = rl->length; if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } /* Header byte + length. */ rls += 1 + ntfs_get_nr_significant_bytes(length); /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just store the lcn. * Note: this assumes that on NTFS 1.2-, holes are stored with * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { /* Change in lcn. */ rls += ntfs_get_nr_significant_bytes(rl->lcn - prev_lcn); prev_lcn = rl->lcn; } } return rls; err_out: if (rl->lcn == LCN_RL_NOT_MAPPED) rls = -EINVAL; else rls = -EIO; return rls; } /** * ntfs_write_significant_bytes - write the significant bytes of a number * @dst: destination buffer to write to * @dst_max: pointer to last byte of destination buffer for bounds checking * @n: number whose significant bytes to write * * Store in @dst, the minimum bytes of the number @n which are required to * identify @n unambiguously as a signed number, taking care not to exceed * @dest_max, the maximum position within @dst to which we are allowed to * write. * * This is used when building the mapping pairs array of a runlist to compress * a given logical cluster number (lcn) or a specific run length to the minumum * size possible. * * Return the number of bytes written on success. On error, i.e. the * destination buffer @dst is too small, return -ENOSPC. */ static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max, const s64 n) { s64 l = n; int i; s8 j; i = 0; do { if (unlikely(dst > dst_max)) goto err_out; *dst++ = l & 0xffll; l >>= 8; i++; } while (l != 0 && l != -1); j = (n >> 8 * (i - 1)) & 0xff; /* If the sign bit is wrong, we need an extra byte. */ if (n < 0 && j >= 0) { if (unlikely(dst > dst_max)) goto err_out; i++; *dst = (s8)-1; } else if (n > 0 && j < 0) { if (unlikely(dst > dst_max)) goto err_out; i++; *dst = (s8)0; } return i; err_out: return -ENOSPC; } /** * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist * @vol: ntfs volume (needed for the ntfs version) * @dst: destination buffer to which to write the mapping pairs array * @dst_len: size of destination buffer @dst in bytes * @rl: locked runlist for which to build the mapping pairs array * @first_vcn: first vcn which to include in the mapping pairs array * @last_vcn: last vcn which to include in the mapping pairs array * @stop_vcn: first vcn outside destination buffer on success or -ENOSPC * * Create the mapping pairs array from the locked runlist @rl, starting at vcn * @first_vcn and finishing with vcn @last_vcn and save the array in @dst. * @dst_len is the size of @dst in bytes and it should be at least equal to the * value obtained by calling ntfs_get_size_for_mapping_pairs(). * * A @last_vcn of -1 means end of runlist and in that case the mapping pairs * array corresponding to the runlist starting at vcn @first_vcn and finishing * at the end of the runlist is created. * * If @rl is NULL, just write a single terminator byte to @dst. * * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to * the first vcn outside the destination buffer. Note that on error, @dst has * been filled with all the mapping pairs that will fit, thus it can be treated * as partial success, in that a new attribute extent needs to be created or * the next extent has to be used and the mapping pairs build has to be * continued with @first_vcn set to *@stop_vcn. * * Return 0 on success and -errno on error. The following error codes are * defined: * -EINVAL - Run list contains unmapped elements. Make sure to only pass * fully mapped runlists to this function. * -EIO - The runlist is corrupt. * -ENOSPC - The destination buffer is too small. * * Locking: @rl must be locked on entry (either for reading or writing), it * remains locked throughout, and is left locked upon return. */ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, const int dst_len, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn, VCN *const stop_vcn) { LCN prev_lcn; s8 *dst_max, *dst_next; int err = -ENOSPC; bool the_end = false; s8 len_len, lcn_len; BUG_ON(first_vcn < 0); BUG_ON(last_vcn < -1); BUG_ON(last_vcn >= 0 && first_vcn > last_vcn); BUG_ON(dst_len < 1); if (!rl) { BUG_ON(first_vcn); BUG_ON(last_vcn > 0); if (stop_vcn) *stop_vcn = 0; /* Terminator byte. */ *dst = 0; return 0; } /* Skip to runlist element containing @first_vcn. */ while (rl->length && first_vcn >= rl[1].vcn) rl++; if (unlikely((!rl->length && first_vcn > rl->vcn) || first_vcn < rl->vcn)) return -EINVAL; /* * @dst_max is used for bounds checking in * ntfs_write_significant_bytes(). */ dst_max = dst + dst_len - 1; prev_lcn = 0; /* Do the first partial run if present. */ if (first_vcn > rl->vcn) { s64 delta, length = rl->length; /* We know rl->length != 0 already. */ if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } delta = first_vcn - rl->vcn; /* Write length. */ len_len = ntfs_write_significant_bytes(dst + 1, dst_max, length - delta); if (unlikely(len_len < 0)) goto size_err; /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just write the lcn * change. FIXME: Do we need to write the lcn change or just * the lcn in that case? Not sure as I have never seen this * case on NT4. - We assume that we just need to write the lcn * change until someone tells us otherwise... (AIA) */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { prev_lcn = rl->lcn; if (likely(rl->lcn >= 0)) prev_lcn += delta; /* Write change in lcn. */ lcn_len = ntfs_write_significant_bytes(dst + 1 + len_len, dst_max, prev_lcn); if (unlikely(lcn_len < 0)) goto size_err; } else lcn_len = 0; dst_next = dst + len_len + lcn_len + 1; if (unlikely(dst_next > dst_max)) goto size_err; /* Update header byte. */ *dst = lcn_len << 4 | len_len; /* Position at next mapping pairs array element. */ dst = dst_next; /* Go to next runlist element. */ rl++; } /* Do the full runs. */ for (; rl->length && !the_end; rl++) { s64 length = rl->length; if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } /* Write length. */ len_len = ntfs_write_significant_bytes(dst + 1, dst_max, length); if (unlikely(len_len < 0)) goto size_err; /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just write the lcn * change. FIXME: Do we need to write the lcn change or just * the lcn in that case? Not sure as I have never seen this * case on NT4. - We assume that we just need to write the lcn * change until someone tells us otherwise... (AIA) */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { /* Write change in lcn. */ lcn_len = ntfs_write_significant_bytes(dst + 1 + len_len, dst_max, rl->lcn - prev_lcn); if (unlikely(lcn_len < 0)) goto size_err; prev_lcn = rl->lcn; } else lcn_len = 0; dst_next = dst + len_len + lcn_len + 1; if (unlikely(dst_next > dst_max)) goto size_err; /* Update header byte. */ *dst = lcn_len << 4 | len_len; /* Position at next mapping pairs array element. */ dst = dst_next; } /* Success. */ err = 0; size_err: /* Set stop vcn. */ if (stop_vcn) *stop_vcn = rl->vcn; /* Add terminator byte. */ *dst = 0; return err; err_out: if (rl->lcn == LCN_RL_NOT_MAPPED) err = -EINVAL; else err = -EIO; return err; } /** * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn * @vol: ntfs volume (needed for error output) * @runlist: runlist to truncate * @new_length: the new length of the runlist in VCNs * * Truncate the runlist described by @runlist as well as the memory buffer * holding the runlist elements to a length of @new_length VCNs. * * If @new_length lies within the runlist, the runlist elements with VCNs of * @new_length and above are discarded. As a special case if @new_length is * zero, the runlist is discarded and set to NULL. * * If @new_length lies beyond the runlist, a sparse runlist element is added to * the end of the runlist @runlist or if the last runlist element is a sparse * one already, this is extended. * * Note, no checking is done for unmapped runlist elements. It is assumed that * the caller has mapped any elements that need to be mapped already. * * Return 0 on success and -errno on error. * * Locking: The caller must hold @runlist->lock for writing. */ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, const s64 new_length) { runlist_element *rl; int old_size; ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length); BUG_ON(!runlist); BUG_ON(new_length < 0); rl = runlist->rl; if (!new_length) { ntfs_debug("Freeing runlist."); runlist->rl = NULL; if (rl) ntfs_free(rl); return 0; } if (unlikely(!rl)) { /* * Create a runlist consisting of a sparse runlist element of * length @new_length followed by a terminator runlist element. */ rl = ntfs_malloc_nofs(PAGE_SIZE); if (unlikely(!rl)) { ntfs_error(vol->sb, "Not enough memory to allocate " "runlist element buffer."); return -ENOMEM; } runlist->rl = rl; rl[1].length = rl->vcn = 0; rl->lcn = LCN_HOLE; rl[1].vcn = rl->length = new_length; rl[1].lcn = LCN_ENOENT; return 0; } BUG_ON(new_length < rl->vcn); /* Find @new_length in the runlist. */ while (likely(rl->length && new_length >= rl[1].vcn)) rl++; /* * If not at the end of the runlist we need to shrink it. * If at the end of the runlist we need to expand it. */ if (rl->length) { runlist_element *trl; bool is_end; ntfs_debug("Shrinking runlist."); /* Determine the runlist size. */ trl = rl + 1; while (likely(trl->length)) trl++; old_size = trl - runlist->rl + 1; /* Truncate the run. */ rl->length = new_length - rl->vcn; /* * If a run was partially truncated, make the following runlist * element a terminator. */ is_end = false; if (rl->length) { rl++; if (!rl->length) is_end = true; rl->vcn = new_length; rl->length = 0; } rl->lcn = LCN_ENOENT; /* Reallocate memory if necessary. */ if (!is_end) { int new_size = rl - runlist->rl + 1; rl = ntfs_rl_realloc(runlist->rl, old_size, new_size); if (IS_ERR(rl)) ntfs_warning(vol->sb, "Failed to shrink " "runlist buffer. This just " "wastes a bit of memory " "temporarily so we ignore it " "and return success."); else runlist->rl = rl; } } else if (likely(/* !rl->length && */ new_length > rl->vcn)) { ntfs_debug("Expanding runlist."); /* * If there is a previous runlist element and it is a sparse * one, extend it. Otherwise need to add a new, sparse runlist * element. */ if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE)) (rl - 1)->length = new_length - (rl - 1)->vcn; else { /* Determine the runlist size. */ old_size = rl - runlist->rl + 1; /* Reallocate memory if necessary. */ rl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(rl)) { ntfs_error(vol->sb, "Failed to expand runlist " "buffer, aborting."); return PTR_ERR(rl); } runlist->rl = rl; /* * Set @rl to the same runlist element in the new * runlist as before in the old runlist. */ rl += old_size - 1; /* Add a new, sparse runlist element. */ rl->lcn = LCN_HOLE; rl->length = new_length - rl->vcn; /* Add a new terminator runlist element. */ rl++; rl->length = 0; } rl->vcn = new_length; rl->lcn = LCN_ENOENT; } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ { /* Runlist already has same size as requested. */ rl->lcn = LCN_ENOENT; } ntfs_debug("Done."); return 0; } /** * ntfs_rl_punch_nolock - punch a hole into a runlist * @vol: ntfs volume (needed for error output) * @runlist: runlist to punch a hole into * @start: starting VCN of the hole to be created * @length: size of the hole to be created in units of clusters * * Punch a hole into the runlist @runlist starting at VCN @start and of size * @length clusters. * * Return 0 on success and -errno on error, in which case @runlist has not been * modified. * * If @start and/or @start + @length are outside the runlist return error code * -ENOENT. * * If the runlist contains unmapped or error elements between @start and @start * + @length return error code -EINVAL. * * Locking: The caller must hold @runlist->lock for writing. */ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, const VCN start, const s64 length) { const VCN end = start + length; s64 delta; runlist_element *rl, *rl_end, *rl_real_end, *trl; int old_size; bool lcn_fixup = false; ntfs_debug("Entering for start 0x%llx, length 0x%llx.", (long long)start, (long long)length); BUG_ON(!runlist); BUG_ON(start < 0); BUG_ON(length < 0); BUG_ON(end < 0); rl = runlist->rl; if (unlikely(!rl)) { if (likely(!start && !length)) return 0; return -ENOENT; } /* Find @start in the runlist. */ while (likely(rl->length && start >= rl[1].vcn)) rl++; rl_end = rl; /* Find @end in the runlist. */ while (likely(rl_end->length && end >= rl_end[1].vcn)) { /* Verify there are no unmapped or error elements. */ if (unlikely(rl_end->lcn < LCN_HOLE)) return -EINVAL; rl_end++; } /* Check the last element. */ if (unlikely(rl_end->length && rl_end->lcn < LCN_HOLE)) return -EINVAL; /* This covers @start being out of bounds, too. */ if (!rl_end->length && end > rl_end->vcn) return -ENOENT; if (!length) return 0; if (!rl->length) return -ENOENT; rl_real_end = rl_end; /* Determine the runlist size. */ while (likely(rl_real_end->length)) rl_real_end++; old_size = rl_real_end - runlist->rl + 1; /* If @start is in a hole simply extend the hole. */ if (rl->lcn == LCN_HOLE) { /* * If both @start and @end are in the same sparse run, we are * done. */ if (end <= rl[1].vcn) { ntfs_debug("Done (requested hole is already sparse)."); return 0; } extend_hole: /* Extend the hole. */ rl->length = end - rl->vcn; /* If @end is in a hole, merge it with the current one. */ if (rl_end->lcn == LCN_HOLE) { rl_end++; rl->length = rl_end->vcn - rl->vcn; } /* We have done the hole. Now deal with the remaining tail. */ rl++; /* Cut out all runlist elements up to @end. */ if (rl < rl_end) memmove(rl, rl_end, (rl_real_end - rl_end + 1) * sizeof(*rl)); /* Adjust the beginning of the tail if necessary. */ if (end > rl->vcn) { delta = end - rl->vcn; rl->vcn = end; rl->length -= delta; /* Only adjust the lcn if it is real. */ if (rl->lcn >= 0) rl->lcn += delta; } shrink_allocation: /* Reallocate memory if the allocation changed. */ if (rl < rl_end) { rl = ntfs_rl_realloc(runlist->rl, old_size, old_size - (rl_end - rl)); if (IS_ERR(rl)) ntfs_warning(vol->sb, "Failed to shrink " "runlist buffer. This just " "wastes a bit of memory " "temporarily so we ignore it " "and return success."); else runlist->rl = rl; } ntfs_debug("Done (extend hole)."); return 0; } /* * If @start is at the beginning of a run things are easier as there is * no need to split the first run. */ if (start == rl->vcn) { /* * @start is at the beginning of a run. * * If the previous run is sparse, extend its hole. * * If @end is not in the same run, switch the run to be sparse * and extend the newly created hole. * * Thus both of these cases reduce the problem to the above * case of "@start is in a hole". */ if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) { rl--; goto extend_hole; } if (end >= rl[1].vcn) { rl->lcn = LCN_HOLE; goto extend_hole; } /* * The final case is when @end is in the same run as @start. * For this need to split the run into two. One run for the * sparse region between the beginning of the old run, i.e. * @start, and @end and one for the remaining non-sparse * region, i.e. between @end and the end of the old run. */ trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(trl)) goto enomem_out; old_size++; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } split_end: /* Shift all the runs up by one. */ memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl)); /* Finally, setup the two split runs. */ rl->lcn = LCN_HOLE; rl->length = length; rl++; rl->vcn += length; /* Only adjust the lcn if it is real. */ if (rl->lcn >= 0 || lcn_fixup) rl->lcn += length; rl->length -= length; ntfs_debug("Done (split one)."); return 0; } /* * @start is neither in a hole nor at the beginning of a run. * * If @end is in a hole, things are easier as simply truncating the run * @start is in to end at @start - 1, deleting all runs after that up * to @end, and finally extending the beginning of the run @end is in * to be @start is all that is needed. */ if (rl_end->lcn == LCN_HOLE) { /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; /* Cut out all runlist elements up to @end. */ if (rl < rl_end) memmove(rl, rl_end, (rl_real_end - rl_end + 1) * sizeof(*rl)); /* Extend the beginning of the run @end is in to be @start. */ rl->vcn = start; rl->length = rl[1].vcn - start; goto shrink_allocation; } /* * If @end is not in a hole there are still two cases to distinguish. * Either @end is or is not in the same run as @start. * * The second case is easier as it can be reduced to an already solved * problem by truncating the run @start is in to end at @start - 1. * Then, if @end is in the next run need to split the run into a sparse * run followed by a non-sparse run (already covered above) and if @end * is not in the next run switching it to be sparse, again reduces the * problem to the already covered case of "@start is in a hole". */ if (end >= rl[1].vcn) { /* * If @end is not in the next run, reduce the problem to the * case of "@start is in a hole". */ if (rl[1].length && end >= rl[2].vcn) { /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; rl->vcn = start; rl->lcn = LCN_HOLE; goto extend_hole; } trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(trl)) goto enomem_out; old_size++; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; /* * @end is in the next run, reduce the problem to the case * where "@start is at the beginning of a run and @end is in * the same run as @start". */ delta = rl->vcn - start; rl->vcn = start; if (rl->lcn >= 0) { rl->lcn -= delta; /* Need this in case the lcn just became negative. */ lcn_fixup = true; } rl->length += delta; goto split_end; } /* * The first case from above, i.e. @end is in the same run as @start. * We need to split the run into three. One run for the non-sparse * region between the beginning of the old run and @start, one for the * sparse region between @start and @end, and one for the remaining * non-sparse region, i.e. between @end and the end of the old run. */ trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2); if (IS_ERR(trl)) goto enomem_out; old_size += 2; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } /* Shift all the runs up by two. */ memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl)); /* Finally, setup the three split runs. */ rl->length = start - rl->vcn; rl++; rl->vcn = start; rl->lcn = LCN_HOLE; rl->length = length; rl++; delta = end - rl->vcn; rl->vcn = end; rl->lcn += delta; rl->length -= delta; ntfs_debug("Done (split both)."); return 0; enomem_out: ntfs_error(vol->sb, "Not enough memory to extend runlist buffer."); return -ENOMEM; } #endif /* NTFS_RW */
gpl-2.0
lilinj2000/linux-4.3.3
net/wimax/op-state-get.c
1881
2135
/* * Linux WiMAX * Implement and export a method for getting a WiMAX device current state * * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on previous WiMAX core work by: * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <net/wimax.h> #include <net/genetlink.h> #include <linux/wimax.h> #include <linux/security.h> #include "wimax-internal.h" #define D_SUBMODULE op_state_get #include "debug-levels.h" /* * Exporting to user space over generic netlink * * Parse the state get command from user space, return a combination * value that describe the current state. * * No attributes. */ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; struct wimax_dev *wimax_dev; d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) { pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]); wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); if (wimax_dev == NULL) goto error_no_wimax_dev; /* Execute the operation and send the result back to user space */ result = wimax_state_get(wimax_dev); dev_put(wimax_dev->net_dev); error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; }
gpl-2.0
dremaker/imx6ul_linux
drivers/infiniband/hw/cxgb4/resource.c
2905
12208
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Crude resource management */ #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/ratelimit.h> #include "iw_cxgb4.h" static int c4iw_init_qid_table(struct c4iw_rdev *rdev) { u32 i; if (c4iw_id_table_alloc(&rdev->resource.qid_table, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->qp.size, 0)) return -ENOMEM; for (i = rdev->lldi.vr->qp.start; i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) if (!(i & rdev->qpmask)) c4iw_id_free(&rdev->resource.qid_table, i); return 0; } /* nr_* must be power of 2 */ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) { int err = 0; err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, C4IW_ID_TABLE_F_RANDOM); if (err) goto tpt_err; err = c4iw_init_qid_table(rdev); if (err) goto qid_err; err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, nr_pdid, 1, 0); if (err) goto pdid_err; return 0; pdid_err: c4iw_id_table_free(&rdev->resource.qid_table); qid_err: c4iw_id_table_free(&rdev->resource.tpt_table); tpt_err: return -ENOMEM; } /* * returns 0 if no resource available */ u32 c4iw_get_resource(struct c4iw_id_table *id_table) { u32 entry; entry = c4iw_id_alloc(id_table); if (entry == (u32)(-1)) return 0; return entry; } void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) { PDBG("%s entry 0x%x\n", __func__, entry); c4iw_id_free(id_table, entry); } u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->cqids)) { entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } /* * now put the same ids on the qp list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->qpids); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->cqids); mutex_unlock(&uctx->lock); } u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->qpids)) { entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } /* * now put the same ids on the cq list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->cqids); for (i = qid; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->qpids); mutex_unlock(&uctx->lock); } void c4iw_destroy_resource(struct c4iw_resource *rscp) { c4iw_id_table_free(&rscp->tpt_table); c4iw_id_table_free(&rscp->qid_table); c4iw_id_table_free(&rscp->pdid_table); } /* * PBL Memory Manager. Uses Linux generic allocator. */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) { unsigned pbl_start, pbl_chunk, pbl_top; rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); if (!rdev->pbl_pool) return -ENOMEM; pbl_start = rdev->lldi.vr->pbl.start; pbl_chunk = rdev->lldi.vr->pbl.size; pbl_top = pbl_start + pbl_chunk; while (pbl_start < pbl_top) { pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { PDBG("%s failed to add PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { printk(KERN_WARNING MOD "Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start); return 0; } pbl_chunk >>= 1; } else { PDBG("%s added PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); pbl_start += pbl_chunk; } } return 0; } void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->pbl_pool); } /* * RQT Memory Manager. Uses Linux generic allocator. */ #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); if (!addr) printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", pci_name(rdev->lldi.pdev)); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); mutex_lock(&rdev->stats.lock); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) { unsigned rqt_start, rqt_chunk, rqt_top; rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); if (!rdev->rqt_pool) return -ENOMEM; rqt_start = rdev->lldi.vr->rq.start; rqt_chunk = rdev->lldi.vr->rq.size; rqt_top = rqt_start + rqt_chunk; while (rqt_start < rqt_top) { rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { PDBG("%s failed to add RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { printk(KERN_WARNING MOD "Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start); return 0; } rqt_chunk >>= 1; } else { PDBG("%s added RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); rqt_start += rqt_chunk; } } return 0; } void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->rqt_pool); } /* * On-Chip QP Memory. */ #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); if (addr) { mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) rdev->stats.ocqp.max = rdev->stats.ocqp.cur; mutex_unlock(&rdev->stats.lock); } return (u32)addr; } void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); } int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) { unsigned start, chunk, top; rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); if (!rdev->ocqp_pool) return -ENOMEM; start = rdev->lldi.vr->ocq.start; chunk = rdev->lldi.vr->ocq.size; top = start + chunk; while (start < top) { chunk = min(top - start + 1, chunk); if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { PDBG("%s failed to add OCQP chunk (%x/%x)\n", __func__, start, chunk); if (chunk <= 1024 << MIN_OCQP_SHIFT) { printk(KERN_WARNING MOD "Failed to add all OCQP chunks (%x/%x)\n", start, top - start); return 0; } chunk >>= 1; } else { PDBG("%s added OCQP chunk (%x/%x)\n", __func__, start, chunk); start += chunk; } } return 0; } void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->ocqp_pool); }
gpl-2.0
SebastianFM/HTC-Rezound-overclocked-kernel
net/ipv4/fib_rules.c
2905
6843
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 Forwarding Information Base: policy rules. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Thomas Graf <tgraf@suug.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Rani Assaf : local_rule cannot be deleted * Marc Boucher : routing by fwmark */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <net/ip.h> #include <net/route.h> #include <net/tcp.h> #include <net/ip_fib.h> #include <net/fib_rules.h> struct fib4_rule { struct fib_rule common; u8 dst_len; u8 src_len; u8 tos; __be32 src; __be32 srcmask; __be32 dst; __be32 dstmask; #ifdef CONFIG_IP_ROUTE_CLASSID u32 tclassid; #endif }; #ifdef CONFIG_IP_ROUTE_CLASSID u32 fib_rules_tclass(const struct fib_result *res) { return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0; } #endif int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) { struct fib_lookup_arg arg = { .result = res, .flags = FIB_LOOKUP_NOREF, }; int err; err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg); res->r = arg.rule; return err; } static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { int err = -EAGAIN; struct fib_table *tbl; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: err = -ENETUNREACH; goto errout; case FR_ACT_PROHIBIT: err = -EACCES; goto errout; case FR_ACT_BLACKHOLE: default: err = -EINVAL; goto errout; } tbl = fib_get_table(rule->fr_net, rule->table); if (!tbl) goto errout; err = fib_table_lookup(tbl, &flp->u.ip4, (struct fib_result *) arg->result, arg->flags); if (err > 0) err = -EAGAIN; errout: return err; } static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { struct fib4_rule *r = (struct fib4_rule *) rule; struct flowi4 *fl4 = &fl->u.ip4; __be32 daddr = fl4->daddr; __be32 saddr = fl4->saddr; if (((saddr ^ r->src) & r->srcmask) || ((daddr ^ r->dst) & r->dstmask)) return 0; if (r->tos && (r->tos != fl4->flowi4_tos)) return 0; return 1; } static struct fib_table *fib_empty_table(struct net *net) { u32 id; for (id = 1; id <= RT_TABLE_MAX; id++) if (fib_get_table(net, id) == NULL) return fib_new_table(net, id); return NULL; } static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { FRA_GENERIC_POLICY, [FRA_FLOW] = { .type = NLA_U32 }, }; static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { struct net *net = sock_net(skb->sk); int err = -EINVAL; struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (frh->tos & ~IPTOS_TOS_MASK) goto errout; if (rule->table == RT_TABLE_UNSPEC) { if (rule->action == FR_ACT_TO_TBL) { struct fib_table *table; table = fib_empty_table(net); if (table == NULL) { err = -ENOBUFS; goto errout; } rule->table = table->tb_id; } } if (frh->src_len) rule4->src = nla_get_be32(tb[FRA_SRC]); if (frh->dst_len) rule4->dst = nla_get_be32(tb[FRA_DST]); #ifdef CONFIG_IP_ROUTE_CLASSID if (tb[FRA_FLOW]) rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); #endif rule4->src_len = frh->src_len; rule4->srcmask = inet_make_mask(rule4->src_len); rule4->dst_len = frh->dst_len; rule4->dstmask = inet_make_mask(rule4->dst_len); rule4->tos = frh->tos; err = 0; errout: return err; } static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (frh->src_len && (rule4->src_len != frh->src_len)) return 0; if (frh->dst_len && (rule4->dst_len != frh->dst_len)) return 0; if (frh->tos && (rule4->tos != frh->tos)) return 0; #ifdef CONFIG_IP_ROUTE_CLASSID if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) return 0; #endif if (frh->src_len && (rule4->src != nla_get_be32(tb[FRA_SRC]))) return 0; if (frh->dst_len && (rule4->dst != nla_get_be32(tb[FRA_DST]))) return 0; return 1; } static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; frh->dst_len = rule4->dst_len; frh->src_len = rule4->src_len; frh->tos = rule4->tos; if (rule4->dst_len) NLA_PUT_BE32(skb, FRA_DST, rule4->dst); if (rule4->src_len) NLA_PUT_BE32(skb, FRA_SRC, rule4->src); #ifdef CONFIG_IP_ROUTE_CLASSID if (rule4->tclassid) NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); #endif return 0; nla_put_failure: return -ENOBUFS; } static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) { return nla_total_size(4) /* dst */ + nla_total_size(4) /* src */ + nla_total_size(4); /* flow */ } static void fib4_rule_flush_cache(struct fib_rules_ops *ops) { rt_cache_flush(ops->fro_net, -1); } static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { .family = AF_INET, .rule_size = sizeof(struct fib4_rule), .addr_size = sizeof(u32), .action = fib4_rule_action, .match = fib4_rule_match, .configure = fib4_rule_configure, .compare = fib4_rule_compare, .fill = fib4_rule_fill, .default_pref = fib_default_rule_pref, .nlmsg_payload = fib4_rule_nlmsg_payload, .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, .policy = fib4_rule_policy, .owner = THIS_MODULE, }; static int fib_default_rules_init(struct fib_rules_ops *ops) { int err; err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, 0); if (err < 0) return err; err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0); if (err < 0) return err; err = fib_default_rule_add(ops, 0x7FFF, RT_TABLE_DEFAULT, 0); if (err < 0) return err; return 0; } int __net_init fib4_rules_init(struct net *net) { int err; struct fib_rules_ops *ops; ops = fib_rules_register(&fib4_rules_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); err = fib_default_rules_init(ops); if (err < 0) goto fail; net->ipv4.rules_ops = ops; return 0; fail: /* also cleans all rules already added */ fib_rules_unregister(ops); return err; } void __net_exit fib4_rules_exit(struct net *net) { fib_rules_unregister(net->ipv4.rules_ops); }
gpl-2.0
MyAOSP/kernel_asus_tf300t
arch/x86/kernel/cpu/mshyperv.c
3161
1462
/* * HyperV Detection code. * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <ksrinivasan@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * */ #include <linux/types.h> #include <linux/module.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv.h> #include <asm/mshyperv.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); static bool __init ms_hyperv_platform(void) { u32 eax; u32 hyp_signature[3]; if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); return eax >= HYPERV_CPUID_MIN && eax <= HYPERV_CPUID_MAX && !memcmp("Microsoft Hv", hyp_signature, 12); } static void __init ms_hyperv_init_platform(void) { /* * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", ms_hyperv.features, ms_hyperv.hints); } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft HyperV", .detect = ms_hyperv_platform, .init_platform = ms_hyperv_init_platform, }; EXPORT_SYMBOL(x86_hyper_ms_hyperv);
gpl-2.0
mdeejay/shooteru-ics-caf
drivers/acpi/pci_slot.c
4185
10346
/* * pci_slot.c - ACPI PCI Slot Driver * * The code here is heavily leveraged from the acpiphp module. * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance. * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code * review and fixes. * * Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P. * Alex Chiang <achiang@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/dmi.h> static int debug; static int check_sta_before_sun; #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>" #define DRIVER_DESC "ACPI PCI Slot Detection Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(debug, bool, 0644); #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_slot"); #define MY_NAME "pci_slot" #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define dbg(format, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " format, \ MY_NAME , ## arg); \ } while (0) #define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */ struct acpi_pci_slot { acpi_handle root_handle; /* handle of the root bridge */ struct pci_slot *pci_slot; /* corresponding pci_slot */ struct list_head list; /* node in the list of slots */ }; static int acpi_pci_slot_add(acpi_handle handle); static void acpi_pci_slot_remove(acpi_handle handle); static LIST_HEAD(slot_list); static DEFINE_MUTEX(slot_list_lock); static struct acpi_pci_driver acpi_pci_slot_driver = { .add = acpi_pci_slot_add, .remove = acpi_pci_slot_remove, }; static int check_slot(acpi_handle handle, unsigned long long *sun) { int device = -1; unsigned long long adr, sta; acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); dbg("Checking slot on path: %s\n", (char *)buffer.pointer); if (check_sta_before_sun) { /* If SxFy doesn't have _STA, we just assume it's there */ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) goto out; } status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); goto out; } /* No _SUN == not a slot == bail */ status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); if (ACPI_FAILURE(status)) { dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); goto out; } device = (adr >> 16) & 0xffff; out: kfree(buffer.pointer); return device; } struct callback_args { acpi_walk_callback user_function; /* only for walk_p2p_bridge */ struct pci_bus *pci_bus; acpi_handle root_handle; }; /* * register_slot * * Called once for each SxFy object in the namespace. Don't worry about * calling pci_create_slot multiple times for the same pci_bus:device, * since each subsequent call simply bumps the refcount on the pci_slot. * * The number of calls to pci_destroy_slot from unregister_slot is * symmetrical. */ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) { int device; unsigned long long sun; char name[SLOT_NAME_SIZE]; struct acpi_pci_slot *slot; struct pci_slot *pci_slot; struct callback_args *parent_context = context; struct pci_bus *pci_bus = parent_context->pci_bus; device = check_slot(handle, &sun); if (device < 0) return AE_OK; slot = kmalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { err("%s: cannot allocate memory\n", __func__); return AE_OK; } snprintf(name, sizeof(name), "%llu", sun); pci_slot = pci_create_slot(pci_bus, device, name, NULL); if (IS_ERR(pci_slot)) { err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); kfree(slot); return AE_OK; } slot->root_handle = parent_context->root_handle; slot->pci_slot = pci_slot; INIT_LIST_HEAD(&slot->list); mutex_lock(&slot_list_lock); list_add(&slot->list, &slot_list); mutex_unlock(&slot_list_lock); get_device(&pci_bus->dev); dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n", pci_slot, pci_bus->number, device, name); return AE_OK; } /* * walk_p2p_bridge - discover and walk p2p bridges * @handle: points to an acpi_pci_root * @context: p2p_bridge_context pointer * * Note that when we call ourselves recursively, we pass a different * value of pci_bus in the child_context. */ static acpi_status walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) { int device, function; unsigned long long adr; acpi_status status; acpi_handle dummy_handle; acpi_walk_callback user_function; struct pci_dev *dev; struct pci_bus *pci_bus; struct callback_args child_context; struct callback_args *parent_context = context; pci_bus = parent_context->pci_bus; user_function = parent_context->user_function; status = acpi_get_handle(handle, "_ADR", &dummy_handle); if (ACPI_FAILURE(status)) return AE_OK; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) return AE_OK; device = (adr >> 16) & 0xffff; function = adr & 0xffff; dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); if (!dev || !dev->subordinate) goto out; child_context.pci_bus = dev->subordinate; child_context.user_function = user_function; child_context.root_handle = parent_context->root_handle; dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number); status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, user_function, NULL, &child_context, NULL); if (ACPI_FAILURE(status)) goto out; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, walk_p2p_bridge, NULL, &child_context, NULL); out: pci_dev_put(dev); return AE_OK; } /* * walk_root_bridge - generic root bridge walker * @handle: points to an acpi_pci_root * @user_function: user callback for slot objects * * Call user_function for all objects underneath this root bridge. * Walk p2p bridges underneath us and call user_function on those too. */ static int walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function) { int seg, bus; unsigned long long tmp; acpi_status status; acpi_handle dummy_handle; struct pci_bus *pci_bus; struct callback_args context; /* If the bridge doesn't have _STA, we assume it is always there */ status = acpi_get_handle(handle, "_STA", &dummy_handle); if (ACPI_SUCCESS(status)) { status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp); if (ACPI_FAILURE(status)) { info("%s: _STA evaluation failure\n", __func__); return 0; } if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0) /* don't register this object */ return 0; } status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); seg = ACPI_SUCCESS(status) ? tmp : 0; status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); bus = ACPI_SUCCESS(status) ? tmp : 0; pci_bus = pci_find_bus(seg, bus); if (!pci_bus) return 0; context.pci_bus = pci_bus; context.user_function = user_function; context.root_handle = handle; dbg("root bridge walk, pci_bus = %x\n", pci_bus->number); status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, user_function, NULL, &context, NULL); if (ACPI_FAILURE(status)) return status; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, walk_p2p_bridge, NULL, &context, NULL); if (ACPI_FAILURE(status)) err("%s: walk_p2p_bridge failure - %d\n", __func__, status); return status; } /* * acpi_pci_slot_add * @handle: points to an acpi_pci_root */ static int acpi_pci_slot_add(acpi_handle handle) { acpi_status status; status = walk_root_bridge(handle, register_slot); if (ACPI_FAILURE(status)) err("%s: register_slot failure - %d\n", __func__, status); return status; } /* * acpi_pci_slot_remove * @handle: points to an acpi_pci_root */ static void acpi_pci_slot_remove(acpi_handle handle) { struct acpi_pci_slot *slot, *tmp; struct pci_bus *pbus; mutex_lock(&slot_list_lock); list_for_each_entry_safe(slot, tmp, &slot_list, list) { if (slot->root_handle == handle) { list_del(&slot->list); pbus = slot->pci_slot->bus; pci_destroy_slot(slot->pci_slot); put_device(&pbus->dev); kfree(slot); } } mutex_unlock(&slot_list_lock); } static int do_sta_before_sun(const struct dmi_system_id *d) { info("%s detected: will evaluate _STA before calling _SUN\n", d->ident); check_sta_before_sun = 1; return 0; } static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that * are not present (as indicated by _STA), so for those machines, * we want to check _STA before evaluating _SUN. */ { .callback = do_sta_before_sun, .ident = "Fujitsu PRIMEQUEST", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"), DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"), }, }, {} }; static int __init acpi_pci_slot_init(void) { dmi_check_system(acpi_pci_slot_dmi_table); acpi_pci_register_driver(&acpi_pci_slot_driver); return 0; } static void __exit acpi_pci_slot_exit(void) { acpi_pci_unregister_driver(&acpi_pci_slot_driver); } module_init(acpi_pci_slot_init); module_exit(acpi_pci_slot_exit);
gpl-2.0
chli/tripndroid-endeavoru-3.0
drivers/edac/cell_edac.c
5465
7388
/* * Cell MIC driver for ECC counting * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * This file may be distributed under the terms of the * GNU General Public License. */ #undef DEBUG #include <linux/edac.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/stop_machine.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/cell-regs.h> #include "edac_core.h" struct cell_edac_priv { struct cbe_mic_tm_regs __iomem *regs; int node; int chanmask; #ifdef DEBUG u64 prev_fir; #endif }; static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) { struct cell_edac_priv *priv = mci->pvt_info; struct csrow_info *csrow = &mci->csrows[0]; unsigned long address, pfn, offset, syndrome; dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", priv->node, chan, ar); /* Address decoding is likely a bit bogus, to dbl check */ address = (ar & 0xffffffffe0000000ul) >> 29; if (priv->chanmask == 0x3) address = (address << 1) | chan; pfn = address >> PAGE_SHIFT; offset = address & ~PAGE_MASK; syndrome = (ar & 0x000000001fe00000ul) >> 21; /* TODO: Decoding of the error address */ edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, syndrome, 0, chan, ""); } static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) { struct cell_edac_priv *priv = mci->pvt_info; struct csrow_info *csrow = &mci->csrows[0]; unsigned long address, pfn, offset; dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", priv->node, chan, ar); /* Address decoding is likely a bit bogus, to dbl check */ address = (ar & 0xffffffffe0000000ul) >> 29; if (priv->chanmask == 0x3) address = (address << 1) | chan; pfn = address >> PAGE_SHIFT; offset = address & ~PAGE_MASK; /* TODO: Decoding of the error address */ edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); } static void cell_edac_check(struct mem_ctl_info *mci) { struct cell_edac_priv *priv = mci->pvt_info; u64 fir, addreg, clear = 0; fir = in_be64(&priv->regs->mic_fir); #ifdef DEBUG if (fir != priv->prev_fir) { dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir); priv->prev_fir = fir; } #endif if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) { addreg = in_be64(&priv->regs->mic_df_ecc_address_0); clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET; cell_edac_count_ce(mci, 0, addreg); } if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) { addreg = in_be64(&priv->regs->mic_df_ecc_address_1); clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET; cell_edac_count_ce(mci, 1, addreg); } if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) { addreg = in_be64(&priv->regs->mic_df_ecc_address_0); clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET; cell_edac_count_ue(mci, 0, addreg); } if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) { addreg = in_be64(&priv->regs->mic_df_ecc_address_1); clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET; cell_edac_count_ue(mci, 1, addreg); } /* The procedure for clearing FIR bits is a bit ... weird */ if (clear) { fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK); fir |= CBE_MIC_FIR_ECC_RESET_MASK; fir &= ~clear; out_be64(&priv->regs->mic_fir, fir); (void)in_be64(&priv->regs->mic_fir); mb(); /* sync up */ #ifdef DEBUG fir = in_be64(&priv->regs->mic_fir); dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir); #endif } } static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) { struct csrow_info *csrow = &mci->csrows[0]; struct cell_edac_priv *priv = mci->pvt_info; struct device_node *np; for (np = NULL; (np = of_find_node_by_name(np, "memory")) != NULL;) { struct resource r; /* We "know" that the Cell firmware only creates one entry * in the "memory" nodes. If that changes, this code will * need to be adapted. */ if (of_address_to_resource(np, 0, &r)) continue; if (of_node_to_nid(np) != priv->node) continue; csrow->first_page = r.start >> PAGE_SHIFT; csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT; csrow->last_page = csrow->first_page + csrow->nr_pages - 1; csrow->mtype = MEM_XDR; csrow->edac_mode = EDAC_SECDED; dev_dbg(mci->dev, "Initialized on node %d, chanmask=0x%x," " first_page=0x%lx, nr_pages=0x%x\n", priv->node, priv->chanmask, csrow->first_page, csrow->nr_pages); break; } } static int __devinit cell_edac_probe(struct platform_device *pdev) { struct cbe_mic_tm_regs __iomem *regs; struct mem_ctl_info *mci; struct cell_edac_priv *priv; u64 reg; int rc, chanmask; regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); if (regs == NULL) return -ENODEV; edac_op_state = EDAC_OPSTATE_POLL; /* Get channel population */ reg = in_be64(&regs->mic_mnt_cfg); dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg); chanmask = 0; if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP) chanmask |= 0x1; if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP) chanmask |= 0x2; if (chanmask == 0) { dev_warn(&pdev->dev, "Yuck ! No channel populated ? Aborting !\n"); return -ENODEV; } dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n", in_be64(&regs->mic_fir)); /* Allocate & init EDAC MC data structure */ mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1, chanmask == 3 ? 2 : 1, pdev->id); if (mci == NULL) return -ENOMEM; priv = mci->pvt_info; priv->regs = regs; priv->node = pdev->id; priv->chanmask = chanmask; mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_XDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->mod_name = "cell_edac"; mci->ctl_name = "MIC"; mci->dev_name = dev_name(&pdev->dev); mci->edac_check = cell_edac_check; cell_edac_init_csrows(mci); /* Register with EDAC core */ rc = edac_mc_add_mc(mci); if (rc) { dev_err(&pdev->dev, "failed to register with EDAC core\n"); edac_mc_free(mci); return rc; } return 0; } static int __devexit cell_edac_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); if (mci) edac_mc_free(mci); return 0; } static struct platform_driver cell_edac_driver = { .driver = { .name = "cbe-mic", .owner = THIS_MODULE, }, .probe = cell_edac_probe, .remove = __devexit_p(cell_edac_remove), }; static int __init cell_edac_init(void) { /* Sanity check registers data structure */ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_df_ecc_address_0) != 0xf8); BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_df_ecc_address_1) != 0x1b8); BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_df_config) != 0x218); BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_fir) != 0x230); BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_mnt_cfg) != 0x210); BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, mic_exc) != 0x208); return platform_driver_register(&cell_edac_driver); } static void __exit cell_edac_exit(void) { platform_driver_unregister(&cell_edac_driver); } module_init(cell_edac_init); module_exit(cell_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("ECC counting for Cell MIC");
gpl-2.0
jiangjiali66/linux-xlnx
arch/mips/sgi-ip22/ip22-setup.c
9049
2178
/* * ip22-setup.c: SGI specific setup, including init of the feature struct. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/module.h> #include <linux/console.h> #include <linux/sched.h> #include <linux/tty.h> #include <asm/addrspace.h> #include <asm/bcache.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/io.h> #include <asm/traps.h> #include <asm/sgialib.h> #include <asm/sgi/mc.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> extern void ip22_be_init(void) __init; void __init plat_mem_setup(void) { char *ctype; char *cserial; board_be_init = ip22_be_init; /* Init the INDY HPC I/O controller. Need to call this before * fucking with the memory controller because it needs to know the * boardID and whether this is a Guiness or a FullHouse machine. */ sgihpc_init(); /* Init INDY memory controller. */ sgimc_init(); #ifdef CONFIG_BOARD_SCACHE /* Now enable boardcaches, if any. */ indy_sc_init(); #endif /* Set EISA IO port base for Indigo2 * ioremap cannot fail */ set_io_port_base((unsigned long)ioremap(0x00080000, 0x1fffffff - 0x00080000)); /* ARCS console environment variable is set to "g?" for * graphics console, it is set to "d" for the first serial * line and "d2" for the second serial line. * * Need to check if the case is 'g' but no keyboard: * (ConsoleIn/Out = serial) */ ctype = ArcGetEnvironmentVariable("console"); cserial = ArcGetEnvironmentVariable("ConsoleOut"); if ((ctype && *ctype == 'd') || (cserial && *cserial == 's')) { static char options[8] __initdata; char *baud = ArcGetEnvironmentVariable("dbaud"); if (baud) strcpy(options, baud); add_preferred_console("ttyS", *(ctype + 1) == '2' ? 1 : 0, baud ? options : NULL); } else if (!ctype || *ctype != 'g') { /* Use ARC if we don't want serial ('d') or graphics ('g'). */ prom_flags |= PROM_FLAG_USE_AS_CONSOLE; add_preferred_console("arc", 0, NULL); } }
gpl-2.0
DESHONOR/android_kernel_huawei_msm8916
arch/mips/alchemy/common/vss.c
9049
2204
/* * Au1300 media block power gating (VSS) * * This is a stop-gap solution until I have the clock framework integration * ready. This stuff here really must be handled transparently when clocks * for various media blocks are enabled/disabled. */ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/mach-au1x00/au1000.h> #define VSS_GATE 0x00 /* gate wait timers */ #define VSS_CLKRST 0x04 /* clock/block control */ #define VSS_FTR 0x08 /* footers */ #define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c)) static DEFINE_SPINLOCK(au1300_vss_lock); /* enable a block as outlined in the databook */ static inline void __enable_block(int block) { void __iomem *base = (void __iomem *)VSS_ADDR(block); __raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */ wmb(); __raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */ wmb(); /* enable footers in sequence */ __raw_writel(0x01, base + VSS_FTR); wmb(); __raw_writel(0x03, base + VSS_FTR); wmb(); __raw_writel(0x07, base + VSS_FTR); wmb(); __raw_writel(0x0f, base + VSS_FTR); wmb(); __raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */ wmb(); __raw_writel(2, base + VSS_CLKRST); /* deassert reset */ wmb(); __raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */ wmb(); } /* disable a block as outlined in the databook */ static inline void __disable_block(int block) { void __iomem *base = (void __iomem *)VSS_ADDR(block); __raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */ wmb(); __raw_writel(0, base + VSS_GATE); /* disable FSM */ wmb(); __raw_writel(3, base + VSS_CLKRST); /* assert reset */ wmb(); __raw_writel(1, base + VSS_CLKRST); /* disable clock */ wmb(); __raw_writel(0, base + VSS_FTR); /* disable all footers */ wmb(); } void au1300_vss_block_control(int block, int enable) { unsigned long flags; if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300) return; /* only one block at a time */ spin_lock_irqsave(&au1300_vss_lock, flags); if (enable) __enable_block(block); else __disable_block(block); spin_unlock_irqrestore(&au1300_vss_lock, flags); } EXPORT_SYMBOL_GPL(au1300_vss_block_control);
gpl-2.0
nekromant/linux
sound/isa/gus/gus_instr.c
13401
4974
/* * Routines for Gravis UltraSound soundcards - Synthesizer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> /* * */ int snd_gus_iwffff_put_sample(void *private_data, struct iwffff_wave *wave, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; struct snd_gf1_mem_block *block; int err; if (wave->format & IWFFFF_WAVE_ROM) return 0; /* it's probably ok - verify the address? */ if (wave->format & IWFFFF_WAVE_STEREO) return -EINVAL; /* not supported */ block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc, SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF, NULL, wave->size, wave->format & IWFFFF_WAVE_16BIT, 1, wave->share_id); if (block == NULL) return -ENOMEM; err = snd_gus_dram_write(gus, data, block->ptr, wave->size); if (err < 0) { snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0); snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block); snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1); return err; } wave->address.memory = block->ptr; return 0; } int snd_gus_iwffff_get_sample(void *private_data, struct iwffff_wave *wave, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; return snd_gus_dram_read(gus, data, wave->address.memory, wave->size, wave->format & IWFFFF_WAVE_ROM ? 1 : 0); } int snd_gus_iwffff_remove_sample(void *private_data, struct iwffff_wave *wave, int atomic) { struct snd_gus_card *gus = private_data; if (wave->format & IWFFFF_WAVE_ROM) return 0; /* it's probably ok - verify the address? */ return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory); } /* * */ int snd_gus_gf1_put_sample(void *private_data, struct gf1_wave *wave, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; struct snd_gf1_mem_block *block; int err; if (wave->format & GF1_WAVE_STEREO) return -EINVAL; /* not supported */ block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc, SNDRV_GF1_MEM_OWNER_WAVE_GF1, NULL, wave->size, wave->format & GF1_WAVE_16BIT, 1, wave->share_id); if (block == NULL) return -ENOMEM; err = snd_gus_dram_write(gus, data, block->ptr, wave->size); if (err < 0) { snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0); snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block); snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1); return err; } wave->address.memory = block->ptr; return 0; } int snd_gus_gf1_get_sample(void *private_data, struct gf1_wave *wave, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; return snd_gus_dram_read(gus, data, wave->address.memory, wave->size, 0); } int snd_gus_gf1_remove_sample(void *private_data, struct gf1_wave *wave, int atomic) { struct snd_gus_card *gus = private_data; return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory); } /* * */ int snd_gus_simple_put_sample(void *private_data, struct simple_instrument *instr, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; struct snd_gf1_mem_block *block; int err; if (instr->format & SIMPLE_WAVE_STEREO) return -EINVAL; /* not supported */ block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc, SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE, NULL, instr->size, instr->format & SIMPLE_WAVE_16BIT, 1, instr->share_id); if (block == NULL) return -ENOMEM; err = snd_gus_dram_write(gus, data, block->ptr, instr->size); if (err < 0) { snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0); snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block); snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1); return err; } instr->address.memory = block->ptr; return 0; } int snd_gus_simple_get_sample(void *private_data, struct simple_instrument *instr, char __user *data, long len, int atomic) { struct snd_gus_card *gus = private_data; return snd_gus_dram_read(gus, data, instr->address.memory, instr->size, 0); } int snd_gus_simple_remove_sample(void *private_data, struct simple_instrument *instr, int atomic) { struct snd_gus_card *gus = private_data; return snd_gf1_mem_free(&gus->gf1.mem_alloc, instr->address.memory); }
gpl-2.0
Supervenom/linux-mod_sys_call
fs/ufs/cylinder.c
14681
5848
/* * linux/fs/ufs/cylinder.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics * * ext2 - inode (block) bitmap caching inspired */ #include <linux/fs.h> #include <linux/time.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" /* * Read cylinder group into cache. The memory space for ufs_cg_private_info * structure is already allocated during ufs_read_super. */ static void ufs_read_cylinder (struct super_block * sb, unsigned cgno, unsigned bitmap_nr) { struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i, j; UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr); uspi = sbi->s_uspi; ucpi = sbi->s_ucpi[bitmap_nr]; ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno); UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits; /* * We have already the first fragment of cylinder group block in buffer */ UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; for (i = 1; i < UCPI_UBH(ucpi)->count; i++) if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i))) goto failed; sbi->s_cgno[bitmap_nr] = cgno; ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx); ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl); ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk); ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk); ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor); ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor); ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor); ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff); ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff); ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff); ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff); ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff); ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff); ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff); ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks); UFSD("EXIT\n"); return; failed: for (j = 1; j < i; j++) brelse (sbi->s_ucg[j]); sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); } /* * Remove cylinder group from cache, doesn't release memory * allocated for cylinder group (this is done at ufs_put_super only). */ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) { struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i; UFSD("ENTER, bitmap_nr %u\n", bitmap_nr); uspi = sbi->s_uspi; if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { UFSD("EXIT\n"); return; } ucpi = sbi->s_ucpi[bitmap_nr]; ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { ufs_panic (sb, "ufs_put_cylinder", "internal error"); return; } /* * rotor is not so important data, so we put it to disk * at the end of working with cylinder */ ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor); ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor); ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); for (i = 1; i < UCPI_UBH(ucpi)->count; i++) { brelse (UCPI_UBH(ucpi)->bh[i]); } sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; UFSD("EXIT\n"); } /* * Find cylinder group in cache and return it as pointer. * If cylinder group is not in cache, we will load it from disk. * * The cache is managed by LRU algorithm. */ struct ufs_cg_private_info * ufs_load_cylinder ( struct super_block * sb, unsigned cgno) { struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; unsigned cg, i, j; UFSD("ENTER, cgno %u\n", cgno); uspi = sbi->s_uspi; if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg"); return NULL; } /* * Cylinder group number cg it in cache and it was last used */ if (sbi->s_cgno[0] == cgno) { UFSD("EXIT\n"); return sbi->s_ucpi[0]; } /* * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED */ if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) { if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) { if (sbi->s_cgno[cgno] != cgno) { ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache"); UFSD("EXIT (FAILED)\n"); return NULL; } else { UFSD("EXIT\n"); return sbi->s_ucpi[cgno]; } } else { ufs_read_cylinder (sb, cgno, cgno); UFSD("EXIT\n"); return sbi->s_ucpi[cgno]; } } /* * Cylinder group number cg is in cache but it was not last used, * we will move to the first position */ for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++); if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) { cg = sbi->s_cgno[i]; ucpi = sbi->s_ucpi[i]; for (j = i; j > 0; j--) { sbi->s_cgno[j] = sbi->s_cgno[j-1]; sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } sbi->s_cgno[0] = cg; sbi->s_ucpi[0] = ucpi; /* * Cylinder group number cg is not in cache, we will read it from disk * and put it to the first position */ } else { if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED) sbi->s_cg_loaded++; else ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1); ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1]; for (j = sbi->s_cg_loaded - 1; j > 0; j--) { sbi->s_cgno[j] = sbi->s_cgno[j-1]; sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } sbi->s_ucpi[0] = ucpi; ufs_read_cylinder (sb, cgno, 0); } UFSD("EXIT\n"); return sbi->s_ucpi[0]; }
gpl-2.0
uoaerg/linux-dccp
arch/alpha/mm/fault.c
90
6398
/* * linux/arch/alpha/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> #define __EXTERN_INLINE inline #include <asm/mmu_context.h> #include <asm/tlbflush.h> #undef __EXTERN_INLINE #include <linux/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/uaccess.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); /* * Force a new ASN for a task. */ #ifndef CONFIG_SMP unsigned long last_asn = ASN_FIRST_VERSION; #endif void __load_new_mm_context(struct mm_struct *next_mm) { unsigned long mmc; struct pcb_struct *pcb; mmc = __get_new_mm_context(next_mm, smp_processor_id()); next_mm->context[smp_processor_id()] = mmc; pcb = &current_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; __reload_thread(pcb); } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them. */ /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ (r) <= 18 ? (r)+8 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs by ignoring such an instruction. */ if (cause == 0) { unsigned int insn; __get_user(insn, (unsigned int __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */ (1ul << (insn >> 26) & 0x30f00001400ul)) { regs->pc += 4; return; } } /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm || faulthandler_disabled()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ good_area: si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } } up_read(&mm->mmap_sem); return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) goto do_sigsegv; no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); regs->pc = newpc; return; } /* Oops. The kernel tried to access some bad page. We'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); do_exit(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, current); if (!user_mode(regs)) goto no_context; return; do_sigsegv: info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, current); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table with the "reference" page table from init. */ long index = pgd_index(address); pgd_t *pgd, *pgd_k; pgd = current->active_mm->pgd + index; pgd_k = swapper_pg_dir + index; if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { pgd_val(*pgd) = pgd_val(*pgd_k); return; } goto no_context; } #endif }
gpl-2.0
sarwarbhuiyan/linux
drivers/media/i2c/saa7115.c
346
56341
/* saa711x - Philips SAA711x video decoder driver * This driver can work with saa7111, saa7111a, saa7113, saa7114, * saa7115 and saa7118. * * Based on saa7114 driver by Maxim Yevtyushkin, which is based on * the saa7111 driver by Dave Perks. * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Slight changes for video timing and attachment output by * Wolfgang Scherr <scherr@net4you.net> * * Moved over to the linux >= 2.4.x i2c protocol (1/1/2003) * by Ronald Bultje <rbultje@ronald.bitfreak.net> * * Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com> * (2/17/2003) * * VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl> * * Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * SAA7111, SAA7113 and SAA7118 support * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "saa711x_regs.h" #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/saa7115.h> #include <asm/div64.h> #define VRES_60HZ (480+16) MODULE_DESCRIPTION("Philips SAA7111/SAA7113/SAA7114/SAA7115/SAA7118 video decoder driver"); MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, " "Hans Verkuil, Mauro Carvalho Chehab"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum saa711x_model { SAA7111A, SAA7111, SAA7113, GM7113C, SAA7114, SAA7115, SAA7118, }; struct saa711x_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; struct { /* chroma gain control cluster */ struct v4l2_ctrl *agc; struct v4l2_ctrl *gain; }; v4l2_std_id std; int input; int output; int enable; int radio; int width; int height; enum saa711x_model ident; u32 audclk_freq; u32 crystal_freq; bool ucgc; u8 cgcdiv; bool apll; bool double_asclk; }; static inline struct saa711x_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa711x_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct saa711x_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ static inline int saa711x_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } /* Sanity routine to check if a register is present */ static int saa711x_has_reg(const int id, const u8 reg) { if (id == SAA7111) return reg < 0x20 && reg != 0x01 && reg != 0x0f && (reg < 0x13 || reg > 0x19) && reg != 0x1d && reg != 0x1e; if (id == SAA7111A) return reg < 0x20 && reg != 0x01 && reg != 0x0f && reg != 0x14 && reg != 0x18 && reg != 0x19 && reg != 0x1d && reg != 0x1e; /* common for saa7113/4/5/8 */ if (unlikely((reg >= 0x3b && reg <= 0x3f) || reg == 0x5c || reg == 0x5f || reg == 0xa3 || reg == 0xa7 || reg == 0xab || reg == 0xaf || (reg >= 0xb5 && reg <= 0xb7) || reg == 0xd3 || reg == 0xd7 || reg == 0xdb || reg == 0xdf || (reg >= 0xe5 && reg <= 0xe7) || reg == 0x82 || (reg >= 0x89 && reg <= 0x8e))) return 0; switch (id) { case GM7113C: return reg != 0x14 && (reg < 0x18 || reg > 0x1e) && reg < 0x20; case SAA7113: return reg != 0x14 && (reg < 0x18 || reg > 0x1e) && (reg < 0x20 || reg > 0x3f) && reg != 0x5d && reg < 0x63; case SAA7114: return (reg < 0x1a || reg > 0x1e) && (reg < 0x20 || reg > 0x2f) && (reg < 0x63 || reg > 0x7f) && reg != 0x33 && reg != 0x37 && reg != 0x81 && reg < 0xf0; case SAA7115: return (reg < 0x20 || reg > 0x2f) && reg != 0x65 && (reg < 0xfc || reg > 0xfe); case SAA7118: return (reg < 0x1a || reg > 0x1d) && (reg < 0x20 || reg > 0x22) && (reg < 0x26 || reg > 0x28) && reg != 0x33 && reg != 0x37 && (reg < 0x63 || reg > 0x7f) && reg != 0x81 && reg < 0xf0; } return 1; } static int saa711x_writeregs(struct v4l2_subdev *sd, const unsigned char *regs) { struct saa711x_state *state = to_state(sd); unsigned char reg, data; while (*regs != 0x00) { reg = *(regs++); data = *(regs++); /* According with datasheets, reserved regs should be filled with 0 - seems better not to touch on they */ if (saa711x_has_reg(state->ident, reg)) { if (saa711x_write(sd, reg, data) < 0) return -1; } else { v4l2_dbg(1, debug, sd, "tried to access reserved reg 0x%02x\n", reg); } } return 0; } static inline int saa711x_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } /* ----------------------------------------------------------------------- */ /* SAA7111 initialization table */ static const unsigned char saa7111_init[] = { R_01_INC_DELAY, 0x00, /* reserved */ /*front end */ R_02_INPUT_CNTL_1, 0xd0, /* FUSE=3, GUDL=2, MODE=0 */ R_03_INPUT_CNTL_2, 0x23, /* HLNRS=0, VBSL=1, WPOFF=0, HOLDG=0, * GAFIX=0, GAI1=256, GAI2=256 */ R_04_INPUT_CNTL_3, 0x00, /* GAI1=256 */ R_05_INPUT_CNTL_4, 0x00, /* GAI2=256 */ /* decoder */ R_06_H_SYNC_START, 0xf3, /* HSB at 13(50Hz) / 17(60Hz) * pixels after end of last line */ R_07_H_SYNC_STOP, 0xe8, /* HSS seems to be needed to * work with NTSC, too */ R_08_SYNC_CNTL, 0xc8, /* AUFD=1, FSEL=1, EXFIL=0, * VTRC=1, HPLL=0, VNOI=0 */ R_09_LUMA_CNTL, 0x01, /* BYPS=0, PREF=0, BPSS=0, * VBLB=0, UPTCV=0, APER=1 */ R_0A_LUMA_BRIGHT_CNTL, 0x80, R_0B_LUMA_CONTRAST_CNTL, 0x47, /* 0b - CONT=1.109 */ R_0C_CHROMA_SAT_CNTL, 0x40, R_0D_CHROMA_HUE_CNTL, 0x00, R_0E_CHROMA_CNTL_1, 0x01, /* 0e - CDTO=0, CSTD=0, DCCF=0, * FCTC=0, CHBW=1 */ R_0F_CHROMA_GAIN_CNTL, 0x00, /* reserved */ R_10_CHROMA_CNTL_2, 0x48, /* 10 - OFTS=1, HDEL=0, VRLN=1, YDEL=0 */ R_11_MODE_DELAY_CNTL, 0x1c, /* 11 - GPSW=0, CM99=0, FECO=0, COMPO=1, * OEYC=1, OEHV=1, VIPB=0, COLO=0 */ R_12_RT_SIGNAL_CNTL, 0x00, /* 12 - output control 2 */ R_13_RT_X_PORT_OUT_CNTL, 0x00, /* 13 - output control 3 */ R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_15_VGATE_START_FID_CHG, 0x00, R_16_VGATE_STOP, 0x00, R_17_MISC_VGATE_CONF_AND_MSB, 0x00, 0x00, 0x00 }; /* * This table has one illegal value, and some values that are not * correct according to the datasheet initialization table. * * If you need a table with legal/default values tell the driver in * i2c_board_info.platform_data, and you will get the gm7113c_init * table instead. */ /* SAA7113 Init codes */ static const unsigned char saa7113_init[] = { R_01_INC_DELAY, 0x08, R_02_INPUT_CNTL_1, 0xc2, R_03_INPUT_CNTL_2, 0x30, R_04_INPUT_CNTL_3, 0x00, R_05_INPUT_CNTL_4, 0x00, R_06_H_SYNC_START, 0x89, /* Illegal value -119, * min. value = -108 (0x94) */ R_07_H_SYNC_STOP, 0x0d, R_08_SYNC_CNTL, 0x88, /* Not datasheet default. * HTC = VTR mode, should be 0x98 */ R_09_LUMA_CNTL, 0x01, R_0A_LUMA_BRIGHT_CNTL, 0x80, R_0B_LUMA_CONTRAST_CNTL, 0x47, R_0C_CHROMA_SAT_CNTL, 0x40, R_0D_CHROMA_HUE_CNTL, 0x00, R_0E_CHROMA_CNTL_1, 0x01, R_0F_CHROMA_GAIN_CNTL, 0x2a, R_10_CHROMA_CNTL_2, 0x08, /* Not datsheet default. * VRLN enabled, should be 0x00 */ R_11_MODE_DELAY_CNTL, 0x0c, R_12_RT_SIGNAL_CNTL, 0x07, /* Not datasheet default, * should be 0x01 */ R_13_RT_X_PORT_OUT_CNTL, 0x00, R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_15_VGATE_START_FID_CHG, 0x00, R_16_VGATE_STOP, 0x00, R_17_MISC_VGATE_CONF_AND_MSB, 0x00, 0x00, 0x00 }; /* * GM7113C is a clone of the SAA7113 chip * This init table is copied out of the saa7113 datasheet. * In R_08 we enable "Automatic Field Detection" [AUFD], * this is disabled when saa711x_set_v4lstd is called. */ static const unsigned char gm7113c_init[] = { R_01_INC_DELAY, 0x08, R_02_INPUT_CNTL_1, 0xc0, R_03_INPUT_CNTL_2, 0x33, R_04_INPUT_CNTL_3, 0x00, R_05_INPUT_CNTL_4, 0x00, R_06_H_SYNC_START, 0xe9, R_07_H_SYNC_STOP, 0x0d, R_08_SYNC_CNTL, 0x98, R_09_LUMA_CNTL, 0x01, R_0A_LUMA_BRIGHT_CNTL, 0x80, R_0B_LUMA_CONTRAST_CNTL, 0x47, R_0C_CHROMA_SAT_CNTL, 0x40, R_0D_CHROMA_HUE_CNTL, 0x00, R_0E_CHROMA_CNTL_1, 0x01, R_0F_CHROMA_GAIN_CNTL, 0x2a, R_10_CHROMA_CNTL_2, 0x00, R_11_MODE_DELAY_CNTL, 0x0c, R_12_RT_SIGNAL_CNTL, 0x01, R_13_RT_X_PORT_OUT_CNTL, 0x00, R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_15_VGATE_START_FID_CHG, 0x00, R_16_VGATE_STOP, 0x00, R_17_MISC_VGATE_CONF_AND_MSB, 0x00, 0x00, 0x00 }; /* If a value differs from the Hauppauge driver values, then the comment starts with 'was 0xXX' to denote the Hauppauge value. Otherwise the value is identical to what the Hauppauge driver sets. */ /* SAA7114 and SAA7115 initialization table */ static const unsigned char saa7115_init_auto_input[] = { /* Front-End Part */ R_01_INC_DELAY, 0x48, /* white peak control disabled */ R_03_INPUT_CNTL_2, 0x20, /* was 0x30. 0x20: long vertical blanking */ R_04_INPUT_CNTL_3, 0x90, /* analog gain set to 0 */ R_05_INPUT_CNTL_4, 0x90, /* analog gain set to 0 */ /* Decoder Part */ R_06_H_SYNC_START, 0xeb, /* horiz sync begin = -21 */ R_07_H_SYNC_STOP, 0xe0, /* horiz sync stop = -17 */ R_09_LUMA_CNTL, 0x53, /* 0x53, was 0x56 for 60hz. luminance control */ R_0A_LUMA_BRIGHT_CNTL, 0x80, /* was 0x88. decoder brightness, 0x80 is itu standard */ R_0B_LUMA_CONTRAST_CNTL, 0x44, /* was 0x48. decoder contrast, 0x44 is itu standard */ R_0C_CHROMA_SAT_CNTL, 0x40, /* was 0x47. decoder saturation, 0x40 is itu standard */ R_0D_CHROMA_HUE_CNTL, 0x00, R_0F_CHROMA_GAIN_CNTL, 0x00, /* use automatic gain */ R_10_CHROMA_CNTL_2, 0x06, /* chroma: active adaptive combfilter */ R_11_MODE_DELAY_CNTL, 0x00, R_12_RT_SIGNAL_CNTL, 0x9d, /* RTS0 output control: VGATE */ R_13_RT_X_PORT_OUT_CNTL, 0x80, /* ITU656 standard mode, RTCO output enable RTCE */ R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_18_RAW_DATA_GAIN_CNTL, 0x40, /* gain 0x00 = nominal */ R_19_RAW_DATA_OFF_CNTL, 0x80, R_1A_COLOR_KILL_LVL_CNTL, 0x77, /* recommended value */ R_1B_MISC_TVVCRDET, 0x42, /* recommended value */ R_1C_ENHAN_COMB_CTRL1, 0xa9, /* recommended value */ R_1D_ENHAN_COMB_CTRL2, 0x01, /* recommended value */ R_80_GLOBAL_CNTL_1, 0x0, /* No tasks enabled at init */ /* Power Device Control */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset device */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* set device programmed, all in operational mode */ 0x00, 0x00 }; /* Used to reset saa7113, saa7114 and saa7115 */ static const unsigned char saa7115_cfg_reset_scaler[] = { R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x00, /* disable I-port output */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* enable I-port output */ 0x00, 0x00 }; /* ============== SAA7715 VIDEO templates ============= */ static const unsigned char saa7115_cfg_60hz_video[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_15_VGATE_START_FID_CHG, 0x03, R_16_VGATE_STOP, 0x11, R_17_MISC_VGATE_CONF_AND_MSB, 0x9c, R_08_SYNC_CNTL, 0x68, /* 0xBO: auto detection, 0x68 = NTSC */ R_0E_CHROMA_CNTL_1, 0x07, /* video autodetection is on */ R_5A_V_OFF_FOR_SLICER, 0x06, /* standard 60hz value for ITU656 line counting */ /* Task A */ R_90_A_TASK_HANDLING_CNTL, 0x80, R_91_A_X_PORT_FORMATS_AND_CONF, 0x48, R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40, R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84, /* hoffset low (input), 0x0002 is minimum */ R_94_A_HORIZ_INPUT_WINDOW_START, 0x01, R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize low (input), 0x02d0 = 720 */ R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, R_98_A_VERT_INPUT_WINDOW_START, 0x05, R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00, R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x0c, R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00, R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0, R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05, R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x0c, R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00, /* Task B */ R_C0_B_TASK_HANDLING_CNTL, 0x00, R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08, R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00, R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80, /* 0x0002 is minimum */ R_C4_B_HORIZ_INPUT_WINDOW_START, 0x02, R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* 0x02d0 = 720 */ R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, /* vwindow start 0x12 = 18 */ R_C8_B_VERT_INPUT_WINDOW_START, 0x12, R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vwindow length 0xf8 = 248 */ R_CA_B_VERT_INPUT_WINDOW_LENGTH, VRES_60HZ>>1, R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, VRES_60HZ>>9, /* hwindow 0x02d0 = 720 */ R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02, R_F0_LFCO_PER_LINE, 0xad, /* Set PLL Register. 60hz 525 lines per frame, 27 MHz */ R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0 */ R_F5_PULSGEN_LINE_LENGTH, 0xad, R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01, 0x00, 0x00 }; static const unsigned char saa7115_cfg_50hz_video[] = { R_80_GLOBAL_CNTL_1, 0x00, R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_15_VGATE_START_FID_CHG, 0x37, /* VGATE start */ R_16_VGATE_STOP, 0x16, R_17_MISC_VGATE_CONF_AND_MSB, 0x99, R_08_SYNC_CNTL, 0x28, /* 0x28 = PAL */ R_0E_CHROMA_CNTL_1, 0x07, R_5A_V_OFF_FOR_SLICER, 0x03, /* standard 50hz value */ /* Task A */ R_90_A_TASK_HANDLING_CNTL, 0x81, R_91_A_X_PORT_FORMATS_AND_CONF, 0x48, R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40, R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84, /* This is weird: the datasheet says that you should use 2 as the minimum value, */ /* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */ /* hoffset low (input), 0x0002 is minimum */ R_94_A_HORIZ_INPUT_WINDOW_START, 0x00, R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize low (input), 0x02d0 = 720 */ R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, R_98_A_VERT_INPUT_WINDOW_START, 0x03, R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vsize 0x12 = 18 */ R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x12, R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00, /* hsize 0x05a0 = 1440 */ R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0, R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05, /* hsize hi (output) */ R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x12, /* vsize low (output), 0x12 = 18 */ R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00, /* vsize hi (output) */ /* Task B */ R_C0_B_TASK_HANDLING_CNTL, 0x00, R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08, R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00, R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80, /* This is weird: the datasheet says that you should use 2 as the minimum value, */ /* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */ /* hoffset low (input), 0x0002 is minimum. See comment above. */ R_C4_B_HORIZ_INPUT_WINDOW_START, 0x00, R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize 0x02d0 = 720 */ R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, /* voffset 0x16 = 22 */ R_C8_B_VERT_INPUT_WINDOW_START, 0x16, R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vsize 0x0120 = 288 */ R_CA_B_VERT_INPUT_WINDOW_LENGTH, 0x20, R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, 0x01, /* hsize 0x02d0 = 720 */ R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02, R_F0_LFCO_PER_LINE, 0xb0, /* Set PLL Register. 50hz 625 lines per frame, 27 MHz */ R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0, (was 0x05) */ R_F5_PULSGEN_LINE_LENGTH, 0xb0, R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01, 0x00, 0x00 }; /* ============== SAA7715 VIDEO templates (end) ======= */ static const unsigned char saa7115_cfg_vbi_on[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_80_GLOBAL_CNTL_1, 0x30, /* Activate both tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */ 0x00, 0x00 }; static const unsigned char saa7115_cfg_vbi_off[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_80_GLOBAL_CNTL_1, 0x20, /* Activate only task "B" */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */ 0x00, 0x00 }; static const unsigned char saa7115_init_misc[] = { R_81_V_SYNC_FLD_ID_SRC_SEL_AND_RETIMED_V_F, 0x01, R_83_X_PORT_I_O_ENA_AND_OUT_CLK, 0x01, R_84_I_PORT_SIGNAL_DEF, 0x20, R_85_I_PORT_SIGNAL_POLAR, 0x21, R_86_I_PORT_FIFO_FLAG_CNTL_AND_ARBIT, 0xc5, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Task A */ R_A0_A_HORIZ_PRESCALING, 0x01, R_A1_A_ACCUMULATION_LENGTH, 0x00, R_A2_A_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00, /* Configure controls at nominal value*/ R_A4_A_LUMA_BRIGHTNESS_CNTL, 0x80, R_A5_A_LUMA_CONTRAST_CNTL, 0x40, R_A6_A_CHROMA_SATURATION_CNTL, 0x40, /* note: 2 x zoom ensures that VBI lines have same length as video lines. */ R_A8_A_HORIZ_LUMA_SCALING_INC, 0x00, R_A9_A_HORIZ_LUMA_SCALING_INC_MSB, 0x02, R_AA_A_HORIZ_LUMA_PHASE_OFF, 0x00, /* must be horiz lum scaling / 2 */ R_AC_A_HORIZ_CHROMA_SCALING_INC, 0x00, R_AD_A_HORIZ_CHROMA_SCALING_INC_MSB, 0x01, /* must be offset luma / 2 */ R_AE_A_HORIZ_CHROMA_PHASE_OFF, 0x00, R_B0_A_VERT_LUMA_SCALING_INC, 0x00, R_B1_A_VERT_LUMA_SCALING_INC_MSB, 0x04, R_B2_A_VERT_CHROMA_SCALING_INC, 0x00, R_B3_A_VERT_CHROMA_SCALING_INC_MSB, 0x04, R_B4_A_VERT_SCALING_MODE_CNTL, 0x01, R_B8_A_VERT_CHROMA_PHASE_OFF_00, 0x00, R_B9_A_VERT_CHROMA_PHASE_OFF_01, 0x00, R_BA_A_VERT_CHROMA_PHASE_OFF_10, 0x00, R_BB_A_VERT_CHROMA_PHASE_OFF_11, 0x00, R_BC_A_VERT_LUMA_PHASE_OFF_00, 0x00, R_BD_A_VERT_LUMA_PHASE_OFF_01, 0x00, R_BE_A_VERT_LUMA_PHASE_OFF_10, 0x00, R_BF_A_VERT_LUMA_PHASE_OFF_11, 0x00, /* Task B */ R_D0_B_HORIZ_PRESCALING, 0x01, R_D1_B_ACCUMULATION_LENGTH, 0x00, R_D2_B_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00, /* Configure controls at nominal value*/ R_D4_B_LUMA_BRIGHTNESS_CNTL, 0x80, R_D5_B_LUMA_CONTRAST_CNTL, 0x40, R_D6_B_CHROMA_SATURATION_CNTL, 0x40, /* hor lum scaling 0x0400 = 1 */ R_D8_B_HORIZ_LUMA_SCALING_INC, 0x00, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, 0x04, R_DA_B_HORIZ_LUMA_PHASE_OFF, 0x00, /* must be hor lum scaling / 2 */ R_DC_B_HORIZ_CHROMA_SCALING, 0x00, R_DD_B_HORIZ_CHROMA_SCALING_MSB, 0x02, /* must be offset luma / 2 */ R_DE_B_HORIZ_PHASE_OFFSET_CRHOMA, 0x00, R_E0_B_VERT_LUMA_SCALING_INC, 0x00, R_E1_B_VERT_LUMA_SCALING_INC_MSB, 0x04, R_E2_B_VERT_CHROMA_SCALING_INC, 0x00, R_E3_B_VERT_CHROMA_SCALING_INC_MSB, 0x04, R_E4_B_VERT_SCALING_MODE_CNTL, 0x01, R_E8_B_VERT_CHROMA_PHASE_OFF_00, 0x00, R_E9_B_VERT_CHROMA_PHASE_OFF_01, 0x00, R_EA_B_VERT_CHROMA_PHASE_OFF_10, 0x00, R_EB_B_VERT_CHROMA_PHASE_OFF_11, 0x00, R_EC_B_VERT_LUMA_PHASE_OFF_00, 0x00, R_ED_B_VERT_LUMA_PHASE_OFF_01, 0x00, R_EE_B_VERT_LUMA_PHASE_OFF_10, 0x00, R_EF_B_VERT_LUMA_PHASE_OFF_11, 0x00, R_F2_NOMINAL_PLL2_DTO, 0x50, /* crystal clock = 24.576 MHz, target = 27MHz */ R_F3_PLL_INCREMENT, 0x46, R_F4_PLL2_STATUS, 0x00, R_F7_PULSE_A_POS_MSB, 0x4b, /* not the recommended settings! */ R_F8_PULSE_B_POS, 0x00, R_F9_PULSE_B_POS_MSB, 0x4b, R_FA_PULSE_C_POS, 0x00, R_FB_PULSE_C_POS_MSB, 0x4b, /* PLL2 lock detection settings: 71 lines 50% phase error */ R_FF_S_PLL_MAX_PHASE_ERR_THRESH_NUM_LINES, 0x88, /* Turn off VBI */ R_40_SLICER_CNTL_1, 0x20, /* No framing code errors allowed. */ R_41_LCR_BASE, 0xff, R_41_LCR_BASE+1, 0xff, R_41_LCR_BASE+2, 0xff, R_41_LCR_BASE+3, 0xff, R_41_LCR_BASE+4, 0xff, R_41_LCR_BASE+5, 0xff, R_41_LCR_BASE+6, 0xff, R_41_LCR_BASE+7, 0xff, R_41_LCR_BASE+8, 0xff, R_41_LCR_BASE+9, 0xff, R_41_LCR_BASE+10, 0xff, R_41_LCR_BASE+11, 0xff, R_41_LCR_BASE+12, 0xff, R_41_LCR_BASE+13, 0xff, R_41_LCR_BASE+14, 0xff, R_41_LCR_BASE+15, 0xff, R_41_LCR_BASE+16, 0xff, R_41_LCR_BASE+17, 0xff, R_41_LCR_BASE+18, 0xff, R_41_LCR_BASE+19, 0xff, R_41_LCR_BASE+20, 0xff, R_41_LCR_BASE+21, 0xff, R_41_LCR_BASE+22, 0xff, R_58_PROGRAM_FRAMING_CODE, 0x40, R_59_H_OFF_FOR_SLICER, 0x47, R_5B_FLD_OFF_AND_MSB_FOR_H_AND_V_OFF, 0x83, R_5D_DID, 0xbd, R_5E_SDID, 0x35, R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */ R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, 0x00, 0x00 }; static int saa711x_odd_parity(u8 c) { c ^= (c >> 4); c ^= (c >> 2); c ^= (c >> 1); return c & 1; } static int saa711x_decode_vps(u8 *dst, u8 *p) { static const u8 biphase_tbl[] = { 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87, 0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3, 0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85, 0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86, 0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2, 0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84, 0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, }; int i; u8 c, err = 0; for (i = 0; i < 2 * 13; i += 2) { err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]]; c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4); dst[i / 2] = c; } return err & 0xf0; } static int saa711x_decode_wss(u8 *p) { static const int wss_bits[8] = { 0, 0, 0, 1, 0, 1, 1, 1 }; unsigned char parity; int wss = 0; int i; for (i = 0; i < 16; i++) { int b1 = wss_bits[p[i] & 7]; int b2 = wss_bits[(p[i] >> 3) & 7]; if (b1 == b2) return -1; wss |= b2 << i; } parity = wss & 15; parity ^= parity >> 2; parity ^= parity >> 1; if (!(parity & 1)) return -1; return wss; } static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { struct saa711x_state *state = to_state(sd); u32 acpf; u32 acni; u32 hz; u64 f; u8 acc = 0; /* reg 0x3a, audio clock control */ /* Checks for chips that don't have audio clock (saa7111, saa7113) */ if (!saa711x_has_reg(state->ident, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD)) return 0; v4l2_dbg(1, debug, sd, "set audio clock freq: %d\n", freq); /* sanity check */ if (freq < 32000 || freq > 48000) return -EINVAL; /* hz is the refresh rate times 100 */ hz = (state->std & V4L2_STD_525_60) ? 5994 : 5000; /* acpf = (256 * freq) / field_frequency == (256 * 100 * freq) / hz */ acpf = (25600 * freq) / hz; /* acni = (256 * freq * 2^23) / crystal_frequency = (freq * 2^(8+23)) / crystal_frequency = (freq << 31) / crystal_frequency */ f = freq; f = f << 31; do_div(f, state->crystal_freq); acni = f; if (state->ucgc) { acpf = acpf * state->cgcdiv / 16; acni = acni * state->cgcdiv / 16; acc = 0x80; if (state->cgcdiv == 3) acc |= 0x40; } if (state->apll) acc |= 0x08; if (state->double_asclk) { acpf <<= 1; acni <<= 1; } saa711x_write(sd, R_38_CLK_RATIO_AMXCLK_TO_ASCLK, 0x03); saa711x_write(sd, R_39_CLK_RATIO_ASCLK_TO_ALRCLK, 0x10 << state->double_asclk); saa711x_write(sd, R_3A_AUD_CLK_GEN_BASIC_SETUP, acc); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD, acpf & 0xff); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+1, (acpf >> 8) & 0xff); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+2, (acpf >> 16) & 0x03); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC, acni & 0xff); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+1, (acni >> 8) & 0xff); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+2, (acni >> 16) & 0x3f); state->audclk_freq = freq; return 0; } static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa711x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_CHROMA_AGC: /* chroma gain cluster */ if (state->agc->val) state->gain->val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f; break; } return 0; } static int saa711x_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa711x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, ctrl->val); break; case V4L2_CID_CONTRAST: saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, ctrl->val); break; case V4L2_CID_SATURATION: saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, ctrl->val); break; case V4L2_CID_HUE: saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, ctrl->val); break; case V4L2_CID_CHROMA_AGC: /* chroma gain cluster */ if (state->agc->val) saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val); else saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val | 0x80); break; default: return -EINVAL; } return 0; } static int saa711x_set_size(struct v4l2_subdev *sd, int width, int height) { struct saa711x_state *state = to_state(sd); int HPSC, HFSC; int VSCY; int res; int is_50hz = state->std & V4L2_STD_625_50; int Vsrc = is_50hz ? 576 : 480; v4l2_dbg(1, debug, sd, "decoder set size to %ix%i\n", width, height); /* FIXME need better bounds checking here */ if ((width < 1) || (width > 1440)) return -EINVAL; if ((height < 1) || (height > Vsrc)) return -EINVAL; if (!saa711x_has_reg(state->ident, R_D0_B_HORIZ_PRESCALING)) { /* Decoder only supports 720 columns and 480 or 576 lines */ if (width != 720) return -EINVAL; if (height != Vsrc) return -EINVAL; } state->width = width; state->height = height; if (!saa711x_has_reg(state->ident, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH)) return 0; /* probably have a valid size, let's set it */ /* Set output width/height */ /* width */ saa711x_write(sd, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, (u8) (width & 0xff)); saa711x_write(sd, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, (u8) ((width >> 8) & 0xff)); /* Vertical Scaling uses height/2 */ res = height / 2; /* On 60Hz, it is using a higher Vertical Output Size */ if (!is_50hz) res += (VRES_60HZ - 480) >> 1; /* height */ saa711x_write(sd, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH, (u8) (res & 0xff)); saa711x_write(sd, R_CF_B_VERT_OUTPUT_WINDOW_LENGTH_MSB, (u8) ((res >> 8) & 0xff)); /* Scaling settings */ /* Hprescaler is floor(inres/outres) */ HPSC = (int)(720 / width); /* 0 is not allowed (div. by zero) */ HPSC = HPSC ? HPSC : 1; HFSC = (int)((1024 * 720) / (HPSC * width)); /* FIXME hardcodes to "Task B" * write H prescaler integer */ saa711x_write(sd, R_D0_B_HORIZ_PRESCALING, (u8) (HPSC & 0x3f)); v4l2_dbg(1, debug, sd, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC); /* write H fine-scaling (luminance) */ saa711x_write(sd, R_D8_B_HORIZ_LUMA_SCALING_INC, (u8) (HFSC & 0xff)); saa711x_write(sd, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, (u8) ((HFSC >> 8) & 0xff)); /* write H fine-scaling (chrominance) * must be lum/2, so i'll just bitshift :) */ saa711x_write(sd, R_DC_B_HORIZ_CHROMA_SCALING, (u8) ((HFSC >> 1) & 0xff)); saa711x_write(sd, R_DD_B_HORIZ_CHROMA_SCALING_MSB, (u8) ((HFSC >> 9) & 0xff)); VSCY = (int)((1024 * Vsrc) / height); v4l2_dbg(1, debug, sd, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY); /* Correct Contrast and Luminance */ saa711x_write(sd, R_D5_B_LUMA_CONTRAST_CNTL, (u8) (64 * 1024 / VSCY)); saa711x_write(sd, R_D6_B_CHROMA_SATURATION_CNTL, (u8) (64 * 1024 / VSCY)); /* write V fine-scaling (luminance) */ saa711x_write(sd, R_E0_B_VERT_LUMA_SCALING_INC, (u8) (VSCY & 0xff)); saa711x_write(sd, R_E1_B_VERT_LUMA_SCALING_INC_MSB, (u8) ((VSCY >> 8) & 0xff)); /* write V fine-scaling (chrominance) */ saa711x_write(sd, R_E2_B_VERT_CHROMA_SCALING_INC, (u8) (VSCY & 0xff)); saa711x_write(sd, R_E3_B_VERT_CHROMA_SCALING_INC_MSB, (u8) ((VSCY >> 8) & 0xff)); saa711x_writeregs(sd, saa7115_cfg_reset_scaler); /* Activates task "B" */ saa711x_write(sd, R_80_GLOBAL_CNTL_1, saa711x_read(sd, R_80_GLOBAL_CNTL_1) | 0x20); return 0; } static void saa711x_set_v4lstd(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa711x_state *state = to_state(sd); /* Prevent unnecessary standard changes. During a standard change the I-Port is temporarily disabled. Any devices reading from that port can get confused. Note that s_std is also used to switch from radio to TV mode, so if a s_std is broadcast to all I2C devices then you do not want to have an unwanted side-effect here. */ if (std == state->std) return; state->std = std; // This works for NTSC-M, SECAM-L and the 50Hz PAL variants. if (std & V4L2_STD_525_60) { v4l2_dbg(1, debug, sd, "decoder set standard 60 Hz\n"); if (state->ident == GM7113C) { u8 reg = saa711x_read(sd, R_08_SYNC_CNTL); reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD); reg |= SAA7113_R_08_FSEL; saa711x_write(sd, R_08_SYNC_CNTL, reg); } else { saa711x_writeregs(sd, saa7115_cfg_60hz_video); } saa711x_set_size(sd, 720, 480); } else { v4l2_dbg(1, debug, sd, "decoder set standard 50 Hz\n"); if (state->ident == GM7113C) { u8 reg = saa711x_read(sd, R_08_SYNC_CNTL); reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD); saa711x_write(sd, R_08_SYNC_CNTL, reg); } else { saa711x_writeregs(sd, saa7115_cfg_50hz_video); } saa711x_set_size(sd, 720, 576); } /* Register 0E - Bits D6-D4 on NO-AUTO mode (SAA7111 and SAA7113 doesn't have auto mode) 50 Hz / 625 lines 60 Hz / 525 lines 000 PAL BGDHI (4.43Mhz) NTSC M (3.58MHz) 001 NTSC 4.43 (50 Hz) PAL 4.43 (60 Hz) 010 Combination-PAL N (3.58MHz) NTSC 4.43 (60 Hz) 011 NTSC N (3.58MHz) PAL M (3.58MHz) 100 reserved NTSC-Japan (3.58MHz) */ if (state->ident <= SAA7113 || state->ident == GM7113C) { u8 reg = saa711x_read(sd, R_0E_CHROMA_CNTL_1) & 0x8f; if (std == V4L2_STD_PAL_M) { reg |= 0x30; } else if (std == V4L2_STD_PAL_Nc) { reg |= 0x20; } else if (std == V4L2_STD_PAL_60) { reg |= 0x10; } else if (std == V4L2_STD_NTSC_M_JP) { reg |= 0x40; } else if (std & V4L2_STD_SECAM) { reg |= 0x50; } saa711x_write(sd, R_0E_CHROMA_CNTL_1, reg); } else { /* restart task B if needed */ int taskb = saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10; if (taskb && state->ident == SAA7114) saa711x_writeregs(sd, saa7115_cfg_vbi_on); /* switch audio mode too! */ saa711x_s_clock_freq(sd, state->audclk_freq); } } /* setup the sliced VBI lcr registers according to the sliced VBI format */ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt) { struct saa711x_state *state = to_state(sd); int is_50hz = (state->std & V4L2_STD_625_50); u8 lcr[24]; int i, x; #if 1 /* saa7113/7114/7118 VBI support are experimental */ if (!saa711x_has_reg(state->ident, R_41_LCR_BASE)) return; #else /* SAA7113 and SAA7118 also should support VBI - Need testing */ if (state->ident != SAA7115) return; #endif for (i = 0; i <= 23; i++) lcr[i] = 0xff; if (fmt == NULL) { /* raw VBI */ if (is_50hz) for (i = 6; i <= 23; i++) lcr[i] = 0xdd; else for (i = 10; i <= 21; i++) lcr[i] = 0xdd; } else { /* sliced VBI */ /* first clear lines that cannot be captured */ if (is_50hz) { for (i = 0; i <= 5; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; } else { for (i = 0; i <= 9; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; for (i = 22; i <= 23; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; } /* Now set the lcr values according to the specified service */ for (i = 6; i <= 23; i++) { lcr[i] = 0; for (x = 0; x <= 1; x++) { switch (fmt->service_lines[1-x][i]) { case 0: lcr[i] |= 0xf << (4 * x); break; case V4L2_SLICED_TELETEXT_B: lcr[i] |= 1 << (4 * x); break; case V4L2_SLICED_CAPTION_525: lcr[i] |= 4 << (4 * x); break; case V4L2_SLICED_WSS_625: lcr[i] |= 5 << (4 * x); break; case V4L2_SLICED_VPS: lcr[i] |= 7 << (4 * x); break; } } } } /* write the lcr registers */ for (i = 2; i <= 23; i++) { saa711x_write(sd, i - 2 + R_41_LCR_BASE, lcr[i]); } /* enable/disable raw VBI capturing */ saa711x_writeregs(sd, fmt == NULL ? saa7115_cfg_vbi_on : saa7115_cfg_vbi_off); } static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced) { static u16 lcr2vbi[] = { 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 0, V4L2_SLICED_CAPTION_525, /* 4 */ V4L2_SLICED_WSS_625, 0, /* 5 */ V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */ 0, 0, 0, 0 }; int i; memset(sliced->service_lines, 0, sizeof(sliced->service_lines)); sliced->service_set = 0; /* done if using raw VBI */ if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10) return 0; for (i = 2; i <= 23; i++) { u8 v = saa711x_read(sd, i - 2 + R_41_LCR_BASE); sliced->service_lines[0][i] = lcr2vbi[v >> 4]; sliced->service_lines[1][i] = lcr2vbi[v & 0xf]; sliced->service_set |= sliced->service_lines[0][i] | sliced->service_lines[1][i]; } return 0; } static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt) { saa711x_set_lcr(sd, NULL); return 0; } static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt) { saa711x_set_lcr(sd, fmt); return 0; } static int saa711x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; if (format->pad || fmt->code != MEDIA_BUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; if (format->which == V4L2_SUBDEV_FORMAT_TRY) return 0; return saa711x_set_size(sd, fmt->width, fmt->height); } /* Decode the sliced VBI data stream as created by the saa7115. The format is described in the saa7115 datasheet in Tables 25 and 26 and in Figure 33. The current implementation uses SAV/EAV codes and not the ancillary data headers. The vbi->p pointer points to the R_5E_SDID byte right after the SAV code. */ static int saa711x_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi) { struct saa711x_state *state = to_state(sd); static const char vbi_no_data_pattern[] = { 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0 }; u8 *p = vbi->p; u32 wss; int id1, id2; /* the ID1 and ID2 bytes from the internal header */ vbi->type = 0; /* mark result as a failure */ id1 = p[2]; id2 = p[3]; /* Note: the field bit is inverted for 60 Hz video */ if (state->std & V4L2_STD_525_60) id1 ^= 0x40; /* Skip internal header, p now points to the start of the payload */ p += 4; vbi->p = p; /* calculate field and line number of the VBI packet (1-23) */ vbi->is_second_field = ((id1 & 0x40) != 0); vbi->line = (id1 & 0x3f) << 3; vbi->line |= (id2 & 0x70) >> 4; /* Obtain data type */ id2 &= 0xf; /* If the VBI slicer does not detect any signal it will fill up the payload buffer with 0xa0 bytes. */ if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern))) return 0; /* decode payloads */ switch (id2) { case 1: vbi->type = V4L2_SLICED_TELETEXT_B; break; case 4: if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1])) return 0; vbi->type = V4L2_SLICED_CAPTION_525; break; case 5: wss = saa711x_decode_wss(p); if (wss == -1) return 0; p[0] = wss & 0xff; p[1] = wss >> 8; vbi->type = V4L2_SLICED_WSS_625; break; case 7: if (saa711x_decode_vps(p, p) != 0) return 0; vbi->type = V4L2_SLICED_VPS; break; default: break; } return 0; } /* ============ SAA7115 AUDIO settings (end) ============= */ static int saa711x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa711x_state *state = to_state(sd); int status; if (state->radio) return 0; status = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); v4l2_dbg(1, debug, sd, "status: 0x%02x\n", status); vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0; return 0; } static int saa711x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa711x_state *state = to_state(sd); state->radio = 0; saa711x_set_v4lstd(sd, std); return 0; } static int saa711x_s_radio(struct v4l2_subdev *sd) { struct saa711x_state *state = to_state(sd); state->radio = 1; return 0; } static int saa711x_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa711x_state *state = to_state(sd); u8 mask = (state->ident <= SAA7111A) ? 0xf8 : 0xf0; v4l2_dbg(1, debug, sd, "decoder set input %d output %d\n", input, output); /* saa7111/3 does not have these inputs */ if ((state->ident <= SAA7113 || state->ident == GM7113C) && (input == SAA7115_COMPOSITE4 || input == SAA7115_COMPOSITE5)) { return -EINVAL; } if (input > SAA7115_SVIDEO3) return -EINVAL; if (state->input == input && state->output == output) return 0; v4l2_dbg(1, debug, sd, "now setting %s input %s output\n", (input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite", (output == SAA7115_IPORT_ON) ? "iport on" : "iport off"); state->input = input; /* saa7111 has slightly different input numbering */ if (state->ident <= SAA7111A) { if (input >= SAA7115_COMPOSITE4) input -= 2; /* saa7111 specific */ saa711x_write(sd, R_10_CHROMA_CNTL_2, (saa711x_read(sd, R_10_CHROMA_CNTL_2) & 0x3f) | ((output & 0xc0) ^ 0x40)); saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, (saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL) & 0xf0) | ((output & 2) ? 0x0a : 0)); } /* select mode */ saa711x_write(sd, R_02_INPUT_CNTL_1, (saa711x_read(sd, R_02_INPUT_CNTL_1) & mask) | input); /* bypass chrominance trap for S-Video modes */ saa711x_write(sd, R_09_LUMA_CNTL, (saa711x_read(sd, R_09_LUMA_CNTL) & 0x7f) | (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0)); state->output = output; if (state->ident == SAA7114 || state->ident == SAA7115) { saa711x_write(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK, (saa711x_read(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK) & 0xfe) | (state->output & 0x01)); } if (state->ident > SAA7111A) { if (config & SAA7115_IDQ_IS_DEFAULT) saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x20); else saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x21); } return 0; } static int saa711x_s_gpio(struct v4l2_subdev *sd, u32 val) { struct saa711x_state *state = to_state(sd); if (state->ident > SAA7111A) return -EINVAL; saa711x_write(sd, 0x11, (saa711x_read(sd, 0x11) & 0x7f) | (val ? 0x80 : 0)); return 0; } static int saa711x_s_stream(struct v4l2_subdev *sd, int enable) { struct saa711x_state *state = to_state(sd); v4l2_dbg(1, debug, sd, "%s output\n", enable ? "enable" : "disable"); if (state->enable == enable) return 0; state->enable = enable; if (!saa711x_has_reg(state->ident, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED)) return 0; saa711x_write(sd, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, state->enable); return 0; } static int saa711x_s_crystal_freq(struct v4l2_subdev *sd, u32 freq, u32 flags) { struct saa711x_state *state = to_state(sd); if (freq != SAA7115_FREQ_32_11_MHZ && freq != SAA7115_FREQ_24_576_MHZ) return -EINVAL; state->crystal_freq = freq; state->double_asclk = flags & SAA7115_FREQ_FL_DOUBLE_ASCLK; state->cgcdiv = (flags & SAA7115_FREQ_FL_CGCDIV) ? 3 : 4; state->ucgc = flags & SAA7115_FREQ_FL_UCGC; state->apll = flags & SAA7115_FREQ_FL_APLL; saa711x_s_clock_freq(sd, state->audclk_freq); return 0; } static int saa711x_reset(struct v4l2_subdev *sd, u32 val) { v4l2_dbg(1, debug, sd, "decoder RESET\n"); saa711x_writeregs(sd, saa7115_cfg_reset_scaler); return 0; } static int saa711x_g_vbi_data(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *data) { /* Note: the internal field ID is inverted for NTSC, so data->field 0 maps to the saa7115 even field, whereas for PAL it maps to the saa7115 odd field. */ switch (data->id) { case V4L2_SLICED_WSS_625: if (saa711x_read(sd, 0x6b) & 0xc0) return -EIO; data->data[0] = saa711x_read(sd, 0x6c); data->data[1] = saa711x_read(sd, 0x6d); return 0; case V4L2_SLICED_CAPTION_525: if (data->field == 0) { /* CC */ if (saa711x_read(sd, 0x66) & 0x30) return -EIO; data->data[0] = saa711x_read(sd, 0x69); data->data[1] = saa711x_read(sd, 0x6a); return 0; } /* XDS */ if (saa711x_read(sd, 0x66) & 0xc0) return -EIO; data->data[0] = saa711x_read(sd, 0x67); data->data[1] = saa711x_read(sd, 0x68); return 0; default: return -EINVAL; } } static int saa711x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { struct saa711x_state *state = to_state(sd); int reg1f, reg1e; /* * The V4L2 core already initializes std with all supported * Standards. All driver needs to do is to mask it, to remove * standards that don't apply from the mask */ reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); if (state->ident == SAA7115) { reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); v4l2_dbg(1, debug, sd, "Status byte 1 (0x1e)=0x%02x\n", reg1e); switch (reg1e & 0x03) { case 1: *std &= V4L2_STD_NTSC; break; case 2: /* * V4L2_STD_PAL just cover the european PAL standards. * This is wrong, as the device could also be using an * other PAL standard. */ *std &= V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | V4L2_STD_PAL_M | V4L2_STD_PAL_60; break; case 3: *std &= V4L2_STD_SECAM; break; default: *std = V4L2_STD_UNKNOWN; /* Can't detect anything */ break; } } v4l2_dbg(1, debug, sd, "Status byte 2 (0x1f)=0x%02x\n", reg1f); /* horizontal/vertical not locked */ if (reg1f & 0x40) { *std = V4L2_STD_UNKNOWN; goto ret; } if (reg1f & 0x20) *std &= V4L2_STD_525_60; else *std &= V4L2_STD_625_50; ret: v4l2_dbg(1, debug, sd, "detected std mask = %08Lx\n", *std); return 0; } static int saa711x_g_input_status(struct v4l2_subdev *sd, u32 *status) { struct saa711x_state *state = to_state(sd); int reg1e = 0x80; int reg1f; *status = V4L2_IN_ST_NO_SIGNAL; if (state->ident == SAA7115) reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); if ((reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80) *status = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa711x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = saa711x_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int saa711x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { saa711x_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static int saa711x_log_status(struct v4l2_subdev *sd) { struct saa711x_state *state = to_state(sd); int reg1e, reg1f; int signalOk; int vcr; v4l2_info(sd, "Audio frequency: %d Hz\n", state->audclk_freq); if (state->ident != SAA7115) { /* status for the saa7114 */ reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); signalOk = (reg1f & 0xc1) == 0x81; v4l2_info(sd, "Video signal: %s\n", signalOk ? "ok" : "bad"); v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz"); return 0; } /* status for the saa7115 */ reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80; vcr = !(reg1f & 0x10); if (state->input >= 6) v4l2_info(sd, "Input: S-Video %d\n", state->input - 6); else v4l2_info(sd, "Input: Composite %d\n", state->input); v4l2_info(sd, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad"); v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz"); switch (reg1e & 0x03) { case 1: v4l2_info(sd, "Detected format: NTSC\n"); break; case 2: v4l2_info(sd, "Detected format: PAL\n"); break; case 3: v4l2_info(sd, "Detected format: SECAM\n"); break; default: v4l2_info(sd, "Detected format: BW/No color\n"); break; } v4l2_info(sd, "Width, Height: %d, %d\n", state->width, state->height); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops saa711x_ctrl_ops = { .s_ctrl = saa711x_s_ctrl, .g_volatile_ctrl = saa711x_g_volatile_ctrl, }; static const struct v4l2_subdev_core_ops saa711x_core_ops = { .log_status = saa711x_log_status, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .reset = saa711x_reset, .s_gpio = saa711x_s_gpio, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa711x_g_register, .s_register = saa711x_s_register, #endif }; static const struct v4l2_subdev_tuner_ops saa711x_tuner_ops = { .s_radio = saa711x_s_radio, .g_tuner = saa711x_g_tuner, }; static const struct v4l2_subdev_audio_ops saa711x_audio_ops = { .s_clock_freq = saa711x_s_clock_freq, }; static const struct v4l2_subdev_video_ops saa711x_video_ops = { .s_std = saa711x_s_std, .s_routing = saa711x_s_routing, .s_crystal_freq = saa711x_s_crystal_freq, .s_stream = saa711x_s_stream, .querystd = saa711x_querystd, .g_input_status = saa711x_g_input_status, }; static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = { .g_vbi_data = saa711x_g_vbi_data, .decode_vbi_line = saa711x_decode_vbi_line, .g_sliced_fmt = saa711x_g_sliced_fmt, .s_sliced_fmt = saa711x_s_sliced_fmt, .s_raw_fmt = saa711x_s_raw_fmt, }; static const struct v4l2_subdev_pad_ops saa711x_pad_ops = { .set_fmt = saa711x_set_fmt, }; static const struct v4l2_subdev_ops saa711x_ops = { .core = &saa711x_core_ops, .tuner = &saa711x_tuner_ops, .audio = &saa711x_audio_ops, .video = &saa711x_video_ops, .vbi = &saa711x_vbi_ops, .pad = &saa711x_pad_ops, }; #define CHIP_VER_SIZE 16 /* ----------------------------------------------------------------------- */ static void saa711x_write_platform_data(struct saa711x_state *state, struct saa7115_platform_data *data) { struct v4l2_subdev *sd = &state->sd; u8 work; if (state->ident != GM7113C && state->ident != SAA7113) return; if (data->saa7113_r08_htc) { work = saa711x_read(sd, R_08_SYNC_CNTL); work &= ~SAA7113_R_08_HTC_MASK; work |= ((*data->saa7113_r08_htc) << SAA7113_R_08_HTC_OFFSET); saa711x_write(sd, R_08_SYNC_CNTL, work); } if (data->saa7113_r10_vrln) { work = saa711x_read(sd, R_10_CHROMA_CNTL_2); work &= ~SAA7113_R_10_VRLN_MASK; if (*data->saa7113_r10_vrln) work |= (1 << SAA7113_R_10_VRLN_OFFSET); saa711x_write(sd, R_10_CHROMA_CNTL_2, work); } if (data->saa7113_r10_ofts) { work = saa711x_read(sd, R_10_CHROMA_CNTL_2); work &= ~SAA7113_R_10_OFTS_MASK; work |= (*data->saa7113_r10_ofts << SAA7113_R_10_OFTS_OFFSET); saa711x_write(sd, R_10_CHROMA_CNTL_2, work); } if (data->saa7113_r12_rts0) { work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL); work &= ~SAA7113_R_12_RTS0_MASK; work |= (*data->saa7113_r12_rts0 << SAA7113_R_12_RTS0_OFFSET); /* According to the datasheet, * SAA7113_RTS_DOT_IN should only be used on RTS1 */ WARN_ON(*data->saa7113_r12_rts0 == SAA7113_RTS_DOT_IN); saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work); } if (data->saa7113_r12_rts1) { work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL); work &= ~SAA7113_R_12_RTS1_MASK; work |= (*data->saa7113_r12_rts1 << SAA7113_R_12_RTS1_OFFSET); saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work); } if (data->saa7113_r13_adlsb) { work = saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL); work &= ~SAA7113_R_13_ADLSB_MASK; if (*data->saa7113_r13_adlsb) work |= (1 << SAA7113_R_13_ADLSB_OFFSET); saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, work); } } /** * saa711x_detect_chip - Detects the saa711x (or clone) variant * @client: I2C client structure. * @id: I2C device ID structure. * @name: Name of the device to be filled. * * Detects the Philips/NXP saa711x chip, or some clone of it. * if 'id' is NULL or id->driver_data is equal to 1, it auto-probes * the analog demod. * If the tuner is not found, it returns -ENODEV. * If auto-detection is disabled and the tuner doesn't match what it was * required, it returns -EINVAL and fills 'name'. * If the chip is found, it returns the chip ID and fills 'name'. */ static int saa711x_detect_chip(struct i2c_client *client, const struct i2c_device_id *id, char *name) { char chip_ver[CHIP_VER_SIZE]; char chip_id; int i; int autodetect; autodetect = !id || id->driver_data == 1; /* Read the chip version register */ for (i = 0; i < CHIP_VER_SIZE; i++) { i2c_smbus_write_byte_data(client, 0, i); chip_ver[i] = i2c_smbus_read_byte_data(client, 0); name[i] = (chip_ver[i] & 0x0f) + '0'; if (name[i] > '9') name[i] += 'a' - '9' - 1; } name[i] = '\0'; /* Check if it is a Philips/NXP chip */ if (!memcmp(name + 1, "f711", 4)) { chip_id = name[5]; snprintf(name, CHIP_VER_SIZE, "saa711%c", chip_id); if (!autodetect && strcmp(name, id->name)) return -EINVAL; switch (chip_id) { case '1': if (chip_ver[0] & 0xf0) { snprintf(name, CHIP_VER_SIZE, "saa711%ca", chip_id); v4l_info(client, "saa7111a variant found\n"); return SAA7111A; } return SAA7111; case '3': return SAA7113; case '4': return SAA7114; case '5': return SAA7115; case '8': return SAA7118; default: v4l2_info(client, "WARNING: Philips/NXP chip unknown - Falling back to saa7111\n"); return SAA7111; } } /* Check if it is a gm7113c */ if (!memcmp(name, "0000", 4)) { chip_id = 0; for (i = 0; i < 4; i++) { chip_id = chip_id << 1; chip_id |= (chip_ver[i] & 0x80) ? 1 : 0; } /* * Note: From the datasheet, only versions 1 and 2 * exists. However, tests on a device labeled as: * "GM7113C 1145" returned "10" on all 16 chip * version (reg 0x00) reads. So, we need to also * accept at least verion 0. For now, let's just * assume that a device that returns "0000" for * the lower nibble is a gm7113c. */ strlcpy(name, "gm7113c", CHIP_VER_SIZE); if (!autodetect && strcmp(name, id->name)) return -EINVAL; v4l_dbg(1, debug, client, "It seems to be a %s chip (%*ph) @ 0x%x.\n", name, 16, chip_ver, client->addr << 1); return GM7113C; } /* Chip was not discovered. Return its ID and don't bind */ v4l_dbg(1, debug, client, "chip %*ph @ 0x%x is unknown.\n", 16, chip_ver, client->addr << 1); return -ENODEV; } static int saa711x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct saa711x_state *state; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; struct saa7115_platform_data *pdata; int ident; char name[CHIP_VER_SIZE + 1]; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; ident = saa711x_detect_chip(client, id, name); if (ident == -EINVAL) { /* Chip exists, but doesn't match */ v4l_warn(client, "found %s while %s was expected\n", name, id->name); return -ENODEV; } if (ident < 0) return ident; strlcpy(client->name, name, sizeof(client->name)); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &saa711x_ops); v4l_info(client, "%s found @ 0x%x (%s)\n", name, client->addr << 1, client->adapter->name); hdl = &state->hdl; v4l2_ctrl_handler_init(hdl, 6); /* add in ascending ID order */ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CONTRAST, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_SATURATION, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); state->agc = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CHROMA_AGC, 0, 1, 1, 1); state->gain = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CHROMA_GAIN, 0, 127, 1, 40); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } v4l2_ctrl_auto_cluster(2, &state->agc, 0, true); state->input = -1; state->output = SAA7115_IPORT_ON; state->enable = 1; state->radio = 0; state->ident = ident; state->audclk_freq = 48000; v4l2_dbg(1, debug, sd, "writing init values\n"); /* init to 60hz/48khz */ state->crystal_freq = SAA7115_FREQ_24_576_MHZ; pdata = client->dev.platform_data; switch (state->ident) { case SAA7111: case SAA7111A: saa711x_writeregs(sd, saa7111_init); break; case GM7113C: saa711x_writeregs(sd, gm7113c_init); break; case SAA7113: if (pdata && pdata->saa7113_force_gm7113c_init) saa711x_writeregs(sd, gm7113c_init); else saa711x_writeregs(sd, saa7113_init); break; default: state->crystal_freq = SAA7115_FREQ_32_11_MHZ; saa711x_writeregs(sd, saa7115_init_auto_input); } if (state->ident > SAA7111A && state->ident != GM7113C) saa711x_writeregs(sd, saa7115_init_misc); if (pdata) saa711x_write_platform_data(state, pdata); saa711x_set_v4lstd(sd, V4L2_STD_NTSC); v4l2_ctrl_handler_setup(hdl); v4l2_dbg(1, debug, sd, "status: (1E) 0x%02x, (1F) 0x%02x\n", saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC), saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC)); return 0; } /* ----------------------------------------------------------------------- */ static int saa711x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); return 0; } static const struct i2c_device_id saa711x_id[] = { { "saa7115_auto", 1 }, /* autodetect */ { "saa7111", 0 }, { "saa7113", 0 }, { "saa7114", 0 }, { "saa7115", 0 }, { "saa7118", 0 }, { "gm7113c", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa711x_id); static struct i2c_driver saa711x_driver = { .driver = { .name = "saa7115", }, .probe = saa711x_probe, .remove = saa711x_remove, .id_table = saa711x_id, }; module_i2c_driver(saa711x_driver);
gpl-2.0
priyatransbit/linux
drivers/usb/gadget/legacy/cdc2.c
346
6039
/* * cdc2.c -- CDC Composite driver, with ECM and ACM support * * Copyright (C) 2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include "u_ether.h" #include "u_serial.h" #include "u_ecm.h" #define DRIVER_DESC "CDC Composite Gadget" #define DRIVER_VERSION "King Kamehameha Day 2008" /*-------------------------------------------------------------------------*/ /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ /* Thanks to NetChip Technologies for donating this product ID. * It's for devices with only this composite CDC configuration. */ #define CDC_VENDOR_NUM 0x0525 /* NetChip */ #define CDC_PRODUCT_NUM 0xa4aa /* CDC Composite: ECM + ACM */ USB_GADGET_COMPOSITE_OPTIONS(); USB_ETHERNET_MODULE_PARAMETERS(); /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id can be overridden by module parameters. */ .idVendor = cpu_to_le16(CDC_VENDOR_NUM), .idProduct = cpu_to_le16(CDC_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ static struct usb_string strings_dev[] = { [USB_GADGET_MANUFACTURER_IDX].s = "", [USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC, [USB_GADGET_SERIAL_IDX].s = "", { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; /*-------------------------------------------------------------------------*/ static struct usb_function *f_acm; static struct usb_function_instance *fi_serial; static struct usb_function *f_ecm; static struct usb_function_instance *fi_ecm; /* * We _always_ have both CDC ECM and CDC ACM functions. */ static int cdc_do_config(struct usb_configuration *c) { int status; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } f_ecm = usb_get_function(fi_ecm); if (IS_ERR(f_ecm)) { status = PTR_ERR(f_ecm); goto err_get_ecm; } status = usb_add_function(c, f_ecm); if (status) goto err_add_ecm; f_acm = usb_get_function(fi_serial); if (IS_ERR(f_acm)) { status = PTR_ERR(f_acm); goto err_get_acm; } status = usb_add_function(c, f_acm); if (status) goto err_add_acm; return 0; err_add_acm: usb_put_function(f_acm); err_get_acm: usb_remove_function(c, f_ecm); err_add_ecm: usb_put_function(f_ecm); err_get_ecm: return status; } static struct usb_configuration cdc_config_driver = { .label = "CDC Composite (ECM + ACM)", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int cdc_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct f_ecm_opts *ecm_opts; int status; if (!can_support_ecm(cdev->gadget)) { dev_err(&gadget->dev, "controller '%s' not usable\n", gadget->name); return -EINVAL; } fi_ecm = usb_get_function_instance("ecm"); if (IS_ERR(fi_ecm)) return PTR_ERR(fi_ecm); ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst); gether_set_qmult(ecm_opts->net, qmult); if (!gether_set_host_addr(ecm_opts->net, host_addr)) pr_info("using host ethernet address: %s", host_addr); if (!gether_set_dev_addr(ecm_opts->net, dev_addr)) pr_info("using self ethernet address: %s", dev_addr); fi_serial = usb_get_function_instance("acm"); if (IS_ERR(fi_serial)) { status = PTR_ERR(fi_serial); goto fail; } /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ status = usb_string_ids_tab(cdev, strings_dev); if (status < 0) goto fail1; device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id; device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id; /* register our configuration */ status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config); if (status < 0) goto fail1; usb_composite_overwrite_options(cdev, &coverwrite); dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); return 0; fail1: usb_put_function_instance(fi_serial); fail: usb_put_function_instance(fi_ecm); return status; } static int cdc_unbind(struct usb_composite_dev *cdev) { usb_put_function(f_acm); usb_put_function_instance(fi_serial); if (!IS_ERR_OR_NULL(f_ecm)) usb_put_function(f_ecm); if (!IS_ERR_OR_NULL(fi_ecm)) usb_put_function_instance(fi_ecm); return 0; } static struct usb_composite_driver cdc_driver = { .name = "g_cdc", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = cdc_bind, .unbind = cdc_unbind, }; module_usb_composite_driver(cdc_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL");
gpl-2.0
gshwang/kernel-3.1.4
drivers/net/wireless/ath/ath9k/hw.c
346
73033
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/io.h> #include <linux/slab.h> #include <asm/unaligned.h> #include "hw.h" #include "hw-ops.h" #include "rc.h" #include "ar9003_mac.h" static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static int __init ath9k_init(void) { return 0; } module_init(ath9k_init); static void __exit ath9k_exit(void) { return; } module_exit(ath9k_exit); /* Private hardware callbacks */ static void ath9k_hw_init_cal_settings(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_cal_settings(ah); } static void ath9k_hw_init_mode_regs(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_mode_regs(ah); } static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan); } static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) { if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs) return; ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); } static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) { /* You will not have this callback if using the old ANI */ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs) return; ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah); } /********************/ /* Helper Functions */ /********************/ static void ath9k_hw_set_clockrate(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; struct ath_common *common = ath9k_hw_common(ah); unsigned int clockrate; /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) clockrate = 117; else if (!ah->curchan) /* should really check for CCK instead */ clockrate = ATH9K_CLOCK_RATE_CCK; else if (conf->channel->band == IEEE80211_BAND_2GHZ) clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; else clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; if (conf_is_ht40(conf)) clockrate *= 2; if (ah->curchan) { if (IS_CHAN_HALF_RATE(ah->curchan)) clockrate /= 2; if (IS_CHAN_QUARTER_RATE(ah->curchan)) clockrate /= 4; } common->clockrate = clockrate; } static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) { struct ath_common *common = ath9k_hw_common(ah); return usecs * common->clockrate; } bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) { int i; BUG_ON(timeout < AH_TIME_QUANTUM); for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { if ((REG_READ(ah, reg) & mask) == val) return true; udelay(AH_TIME_QUANTUM); } ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY, "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", timeout, reg, REG_READ(ah, reg), mask, val); return false; } EXPORT_SYMBOL(ath9k_hw_wait); void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, int column, unsigned int *writecnt) { int r; ENABLE_REGWRITE_BUFFER(ah); for (r = 0; r < array->ia_rows; r++) { REG_WRITE(ah, INI_RA(array, r, 0), INI_RA(array, r, column)); DO_DELAY(*writecnt); } REGWRITE_BUFFER_FLUSH(ah); } u32 ath9k_hw_reverse_bits(u32 val, u32 n) { u32 retval; int i; for (i = 0, retval = 0; i < n; i++) { retval = (retval << 1) | (val & 1); val >>= 1; } return retval; } u16 ath9k_hw_computetxtime(struct ath_hw *ah, u8 phy, int kbps, u32 frameLen, u16 rateix, bool shortPreamble) { u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; if (kbps == 0) return 0; switch (phy) { case WLAN_RC_PHY_CCK: phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; if (shortPreamble) phyTime >>= 1; numBits = frameLen << 3; txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); break; case WLAN_RC_PHY_OFDM: if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_QUARTER + OFDM_PREAMBLE_TIME_QUARTER + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); } else if (ah->curchan && IS_CHAN_HALF_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_HALF + OFDM_PREAMBLE_TIME_HALF + (numSymbols * OFDM_SYMBOL_TIME_HALF); } else { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + (numSymbols * OFDM_SYMBOL_TIME); } break; default: ath_err(ath9k_hw_common(ah), "Unknown phy %u (rate ix %u)\n", phy, rateix); txTime = 0; break; } return txTime; } EXPORT_SYMBOL(ath9k_hw_computetxtime); void ath9k_hw_get_channel_centers(struct ath_hw *ah, struct ath9k_channel *chan, struct chan_centers *centers) { int8_t extoff; if (!IS_CHAN_HT40(chan)) { centers->ctl_center = centers->ext_center = centers->synth_center = chan->channel; return; } if ((chan->chanmode == CHANNEL_A_HT40PLUS) || (chan->chanmode == CHANNEL_G_HT40PLUS)) { centers->synth_center = chan->channel + HT40_CHANNEL_CENTER_SHIFT; extoff = 1; } else { centers->synth_center = chan->channel - HT40_CHANNEL_CENTER_SHIFT; extoff = -1; } centers->ctl_center = centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); /* 25 MHz spacing is supported by hw but not on upper layers */ centers->ext_center = centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); } /******************/ /* Chip Revisions */ /******************/ static void ath9k_hw_read_revisions(struct ath_hw *ah) { u32 val; switch (ah->hw_version.devid) { case AR5416_AR9100_DEVID: ah->hw_version.macVersion = AR_SREV_VERSION_9100; break; case AR9300_DEVID_AR9330: ah->hw_version.macVersion = AR_SREV_VERSION_9330; if (ah->get_mac_revision) { ah->hw_version.macRev = ah->get_mac_revision(); } else { val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); } return; case AR9300_DEVID_AR9340: ah->hw_version.macVersion = AR_SREV_VERSION_9340; val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; if (val == 0xFF) { val = REG_READ(ah, AR_SREV); ah->hw_version.macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; } else { if (!AR_SREV_9100(ah)) ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); ah->hw_version.macRev = val & AR_SREV_REVISION; if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ah->is_pciexpress = true; } } /************************************/ /* HW Attach, Detach, Init Routines */ /************************************/ static void ath9k_hw_disablepcie(struct ath_hw *ah) { if (!AR_SREV_5416(ah)) return; REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); } static void ath9k_hw_aspm_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (common->bus_ops->aspm_init) common->bus_ops->aspm_init(common); } /* This should work for all families including legacy */ static bool ath9k_hw_chip_test(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 regAddr[2] = { AR_STA_ID0 }; u32 regHold[2]; static const u32 patternData[4] = { 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999 }; int i, j, loop_max; if (!AR_SREV_9300_20_OR_LATER(ah)) { loop_max = 2; regAddr[1] = AR_PHY_BASE + (8 << 2); } else loop_max = 1; for (i = 0; i < loop_max; i++) { u32 addr = regAddr[i]; u32 wrData, rdData; regHold[i] = REG_READ(ah, addr); for (j = 0; j < 0x100; j++) { wrData = (j << 16) | j; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (rdData != wrData) { ath_err(common, "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return false; } } for (j = 0; j < 4; j++) { wrData = patternData[j]; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (wrData != rdData) { ath_err(common, "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return false; } } REG_WRITE(ah, regAddr[i], regHold[i]); } udelay(100); return true; } static void ath9k_hw_init_config(struct ath_hw *ah) { int i; ah->config.dma_beacon_response_time = 2; ah->config.sw_beacon_response_time = 10; ah->config.additional_swba_backoff = 0; ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; ah->config.enable_ani = true; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; ah->config.spurchans[i][1] = AR_NO_SPUR; } /* PAPRD needs some more work to be enabled */ ah->config.paprd_disable = 1; ah->config.rx_intr_mitigation = true; ah->config.pcieSerDesWrite = true; /* * We need this for PCI devices only (Cardbus, PCI, miniPCI) * _and_ if on non-uniprocessor systems (Multiprocessor/HT). * This means we use it for all AR5416 devices, and the few * minor PCI AR9280 devices out there. * * Serialization is required because these devices do not handle * well the case of two concurrent reads/writes due to the latency * involved. During one read/write another read/write can be issued * on another CPU while the previous read/write may still be working * on our hardware, if we hit this case the hardware poops in a loop. * We prevent this by serializing reads and writes. * * This issue is not present on PCI-Express devices or pre-AR5416 * devices (legacy, 802.11abg). */ if (num_possible_cpus() > 1) ah->config.serialize_regmode = SER_REG_MODE_AUTO; } static void ath9k_hw_init_defaults(struct ath_hw *ah) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); regulatory->country_code = CTRY_DEFAULT; regulatory->power_limit = MAX_RATE_POWER; regulatory->tp_scale = ATH9K_TP_SCALE_MAX; ah->hw_version.magic = AR5416_MAGIC; ah->hw_version.subvendorid = 0; ah->atim_window = 0; ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE | AR_STA_ID1_MCAST_KSRCH; if (AR_SREV_9100(ah)) ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; ah->enable_32kHz_clock = DONT_USE_32KHZ; ah->slottime = 20; ah->globaltxtimeout = (u32) -1; ah->power_mode = ATH9K_PM_UNDEFINED; } static int ath9k_hw_init_macaddr(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sum; int i; u16 eeval; static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; sum = 0; for (i = 0; i < 3; i++) { eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]); sum += eeval; common->macaddr[2 * i] = eeval >> 8; common->macaddr[2 * i + 1] = eeval & 0xff; } if (sum == 0 || sum == 0xffff * 3) return -EADDRNOTAVAIL; return 0; } static int ath9k_hw_post_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int ecode; if (common->bus_ops->ath_bus_type != ATH_USB) { if (!ath9k_hw_chip_test(ah)) return -ENODEV; } if (!AR_SREV_9300_20_OR_LATER(ah)) { ecode = ar9002_hw_rf_claim(ah); if (ecode != 0) return ecode; } ecode = ath9k_hw_eeprom_init(ah); if (ecode != 0) return ecode; ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n", ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); ecode = ath9k_hw_rf_alloc_ext_banks(ah); if (ecode) { ath_err(ath9k_hw_common(ah), "Failed allocating banks for external radio\n"); ath9k_hw_rf_free_ext_banks(ah); return ecode; } if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) { ath9k_hw_ani_setup(ah); ath9k_hw_ani_init(ah); } return 0; } static void ath9k_hw_attach_ops(struct ath_hw *ah) { if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_attach_ops(ah); else ar9002_hw_attach_ops(ah); } /* Called for all hardware families */ static int __ath9k_hw_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int r = 0; ath9k_hw_read_revisions(ah); /* * Read back AR_WA into a permanent copy and set bits 14 and 17. * We need to do this to avoid RMW of this register. We cannot * read the reg when chip is asleep. */ ah->WARegVal = REG_READ(ah, AR_WA); ah->WARegVal |= (AR_WA_D3_L1_DISABLE | AR_WA_ASPM_TIMER_BASED_DISABLE); if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; } ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah); ath9k_hw_attach_ops(ah); if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { ath_err(common, "Couldn't wakeup chip\n"); return -EIO; } if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; } else { ah->config.serialize_regmode = SER_REG_MODE_OFF; } } ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n", ah->config.serialize_regmode); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; else ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; switch (ah->hw_version.macVersion) { case AR_SREV_VERSION_5416_PCI: case AR_SREV_VERSION_5416_PCIE: case AR_SREV_VERSION_9160: case AR_SREV_VERSION_9100: case AR_SREV_VERSION_9280: case AR_SREV_VERSION_9285: case AR_SREV_VERSION_9287: case AR_SREV_VERSION_9271: case AR_SREV_VERSION_9300: case AR_SREV_VERSION_9330: case AR_SREV_VERSION_9485: case AR_SREV_VERSION_9340: break; default: ath_err(common, "Mac Chip Rev 0x%02x.%x is not supported by this driver\n", ah->hw_version.macVersion, ah->hw_version.macRev); return -EOPNOTSUPP; } if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) ah->is_pciexpress = false; ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); ath9k_hw_init_cal_settings(ah); ah->ani_function = ATH9K_ANI_ALL; if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; ath9k_hw_init_mode_regs(ah); if (ah->is_pciexpress) ath9k_hw_aspm_init(ah); else ath9k_hw_disablepcie(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_cck_chan14_spread(ah); r = ath9k_hw_post_init(ah); if (r) return r; ath9k_hw_init_mode_gain_regs(ah); r = ath9k_hw_fill_cap_info(ah); if (r) return r; r = ath9k_hw_init_macaddr(ah); if (r) { ath_err(common, "Failed to initialize MAC address\n"); return r; } if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); else ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); if (AR_SREV_9330(ah)) ah->bb_watchdog_timeout_ms = 85; else ah->bb_watchdog_timeout_ms = 25; common->state = ATH_HW_INITIALIZED; return 0; } int ath9k_hw_init(struct ath_hw *ah) { int ret; struct ath_common *common = ath9k_hw_common(ah); /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ switch (ah->hw_version.devid) { case AR5416_DEVID_PCI: case AR5416_DEVID_PCIE: case AR5416_AR9100_DEVID: case AR9160_DEVID_PCI: case AR9280_DEVID_PCI: case AR9280_DEVID_PCIE: case AR9285_DEVID_PCIE: case AR9287_DEVID_PCI: case AR9287_DEVID_PCIE: case AR2427_DEVID_PCIE: case AR9300_DEVID_PCIE: case AR9300_DEVID_AR9485_PCIE: case AR9300_DEVID_AR9330: case AR9300_DEVID_AR9340: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) break; ath_err(common, "Hardware device ID 0x%04x not supported\n", ah->hw_version.devid); return -EOPNOTSUPP; } ret = __ath9k_hw_init(ah); if (ret) { ath_err(common, "Unable to initialize hardware; initialization status: %d\n", ret); return ret; } return 0; } EXPORT_SYMBOL(ath9k_hw_init); static void ath9k_hw_init_qos(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); REG_WRITE(ah, AR_QOS_NO_ACK, SM(2, AR_QOS_NO_ACK_TWO_BIT) | SM(5, AR_QOS_NO_ACK_BIT_OFF) | SM(0, AR_QOS_NO_ACK_BYTE_OFF)); REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); REGWRITE_BUFFER_FLUSH(ah); } u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) { REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); udelay(100); REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) udelay(100); return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; } EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); static void ath9k_hw_init_pll(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; if (AR_SREV_9485(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, 0x40); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x4); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_REFDIV, 0x5); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NINI, 0x58); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NFRAC, 0x0); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_OUTDIV, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1); /* program BB PLL phase_shift to 0x6 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x0); udelay(1000); } else if (AR_SREV_9330(ah)) { u32 ddr_dpll2, pll_control2, kd; if (ah->is_clk_25mhz) { ddr_dpll2 = 0x18e82f01; pll_control2 = 0xe04a3d; kd = 0x1d; } else { ddr_dpll2 = 0x19e82f01; pll_control2 = 0x886666; kd = 0x3d; } /* program DDR PLL ki and kd value */ REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2); /* program DDR PLL phase_shift */ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, AR_CH0_DPLL3_PHASE_SHIFT, 0x1); REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); udelay(1000); /* program refdiv, nint, frac to RTC register */ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2); /* program BB PLL kd and ki value */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06); /* program BB PLL phase_shift */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1); } else if (AR_SREV_9340(ah)) { u32 regval, pll2_divint, pll2_divfrac, refdiv; REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); udelay(1000); REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16); udelay(100); if (ah->is_clk_25mhz) { pll2_divint = 0x54; pll2_divfrac = 0x1eb85; refdiv = 3; } else { pll2_divint = 88; pll2_divfrac = 0; refdiv = 5; } regval = REG_READ(ah, AR_PHY_PLL_MODE); regval |= (0x1 << 16); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); udelay(100); REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) | (pll2_divint << 18) | pll2_divfrac); udelay(100); regval = REG_READ(ah, AR_PHY_PLL_MODE); regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) | (0x4 << 26) | (0x18 << 19); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); REG_WRITE(ah, AR_PHY_PLL_MODE, REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff); udelay(1000); } pll = ath9k_hw_compute_pll_control(ah, chan); REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) udelay(1000); /* Switch the core clock for ar9271 to 117Mhz */ if (AR_SREV_9271(ah)) { udelay(500); REG_WRITE(ah, 0x50040, 0x304); } udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); if (AR_SREV_9340(ah)) { if (ah->is_clk_25mhz) { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); } else { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); } udelay(100); } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, enum nl80211_iftype opmode) { u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 imr_reg = AR_IMR_TXERR | AR_IMR_TXURN | AR_IMR_RXERR | AR_IMR_RXORN | AR_IMR_BCNMISC; if (AR_SREV_9340(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; if (AR_SREV_9300_20_OR_LATER(ah)) { imr_reg |= AR_IMR_RXOK_HP; if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK_LP; } else { if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK; } if (ah->config.tx_intr_mitigation) imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR; else imr_reg |= AR_IMR_TXOK; if (opmode == NL80211_IFTYPE_AP) imr_reg |= AR_IMR_MIB; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_IMR, imr_reg); ah->imrs2_reg |= AR_IMR_S2_GTT; REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0); } } static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us - 2); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val); } static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); } static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); } static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); } static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) { if (tu > 0xFFFF) { ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, "bad global tx timeout %u\n", tu); ah->globaltxtimeout = (u32) -1; return false; } else { REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); ah->globaltxtimeout = tu; return true; } } void ath9k_hw_init_global_settings(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; const struct ath9k_channel *chan = ah->curchan; int acktimeout; int slottime; int sifstime; int rx_lat = 0, tx_lat = 0, eifs = 0; u32 reg; ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", ah->misc_mode); if (!chan) return; if (ah->misc_mode != 0) REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode); rx_lat = 37; tx_lat = 54; if (IS_CHAN_HALF_RATE(chan)) { eifs = 175; rx_lat *= 2; tx_lat *= 2; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 11; slottime = 13; sifstime = 32; } else if (IS_CHAN_QUARTER_RATE(chan)) { eifs = 340; rx_lat *= 4; tx_lat *= 4; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 22; slottime = 21; sifstime = 64; } else { eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS); reg = REG_READ(ah, AR_USEC); rx_lat = MS(reg, AR_USEC_RX_LAT); tx_lat = MS(reg, AR_USEC_TX_LAT); slottime = ah->slottime; if (IS_CHAN_5GHZ(chan)) sifstime = 16; else sifstime = 10; } /* As defined by IEEE 802.11-2007 17.3.8.6 */ acktimeout = slottime + sifstime + 3 * ah->coverage_class; /* * Workaround for early ACK timeouts, add an offset to match the * initval's 64us ack timeout value. * This was initially only meant to work around an issue with delayed * BA frames in some implementations, but it has been found to fix ACK * timeout issues in other cases as well. */ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) acktimeout += 64 - sifstime - ah->slottime; ath9k_hw_set_sifs_time(ah, sifstime); ath9k_hw_setslottime(ah, slottime); ath9k_hw_set_ack_timeout(ah, acktimeout); ath9k_hw_set_cts_timeout(ah, acktimeout); if (ah->globaltxtimeout != (u32) -1) ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs)); REG_RMW(ah, AR_USEC, (common->clockrate - 1) | SM(rx_lat, AR_USEC_RX_LAT) | SM(tx_lat, AR_USEC_TX_LAT), AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC); } EXPORT_SYMBOL(ath9k_hw_init_global_settings); void ath9k_hw_deinit(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (common->state < ATH_HW_INITIALIZED) goto free_hw; ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); free_hw: ath9k_hw_rf_free_ext_banks(ah); } EXPORT_SYMBOL(ath9k_hw_deinit); /*******/ /* INI */ /*******/ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) { u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); if (IS_CHAN_B(chan)) ctl |= CTL_11B; else if (IS_CHAN_G(chan)) ctl |= CTL_11G; else ctl |= CTL_11A; return ctl; } /****************************************/ /* Reset and Channel Switching Routines */ /****************************************/ static inline void ath9k_hw_set_dma(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ENABLE_REGWRITE_BUFFER(ah); /* * set AHB_MODE not to do cacheline prefetches */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN); /* * let mac dma reads be in 128 byte chunks */ REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK); REGWRITE_BUFFER_FLUSH(ah); /* * Restore TX Trigger Level to its pre-reset value. * The initial value depends on whether aggregation is enabled, and is * adjusted whenever underruns are detected. */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); ENABLE_REGWRITE_BUFFER(ah); /* * let mac dma writes be in 128 byte chunks */ REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK); /* * Setup receive FIFO threshold to hold off TX activities */ REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1); REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1); ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); } /* * reduce the number of usable entries in PCU TXBUF to avoid * wrap around issues. */ if (AR_SREV_9285(ah)) { /* For AR9285 the number of Fifos are reduced to half. * So set the usable tx buf size also to half to * avoid data/delimiter underruns */ REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); } else if (!AR_SREV_9271(ah)) { REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_PCU_TXBUF_CTRL_USABLE_SIZE); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_reset_txstatus_ring(ah); } static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) { u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; u32 set = AR_STA_ID1_KSRCH_MODE; switch (opmode) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: set |= AR_STA_ID1_ADHOC; REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; case NL80211_IFTYPE_AP: set |= AR_STA_ID1_STA_AP; /* fall through */ case NL80211_IFTYPE_STATION: REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; default: if (!ah->is_monitoring) set = 0; break; } REG_RMW(ah, AR_STA_ID1, set, mask); } void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, u32 *coef_mantissa, u32 *coef_exponent) { u32 coef_exp, coef_man; for (coef_exp = 31; coef_exp > 0; coef_exp--) if ((coef_scaled >> coef_exp) & 0x1) break; coef_exp = 14 - (coef_exp - COEF_SCALE_S); coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); *coef_exponent = coef_exp - 16; } static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) { u32 rst_flags; u32 tmpReg; if (AR_SREV_9100(ah)) { REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK, AR_RTC_DERIVED_CLK_PERIOD, 1); (void)REG_READ(ah, AR_RTC_DERIVED_CLK); } ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (AR_SREV_9100(ah)) { rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); if (tmpReg & (AR_INTR_SYNC_LOCAL_TIMEOUT | AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { u32 val; REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); val = AR_RC_HOSTIF; if (!AR_SREV_9300_20_OR_LATER(ah)) val |= AR_RC_AHB; REG_WRITE(ah, AR_RC, val); } else if (!AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); rst_flags = AR_RTC_RC_MAC_WARM; if (type == ATH9K_RESET_COLD) rst_flags |= AR_RTC_RC_MAC_COLD; } if (AR_SREV_9330(ah)) { int npend = 0; int i; /* AR9330 WAR: * call external reset function to reset WMAC if: * - doing a cold reset * - we have pending frames in the TX queues */ for (i = 0; i < AR_NUM_QCU; i++) { npend = ath9k_hw_numtxpending(ah, i); if (npend) break; } if (ah->external_reset && (npend || type == ATH9K_RESET_COLD)) { int reset_err = 0; ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "reset MAC via external reset\n"); reset_err = ah->external_reset(); if (reset_err) { ath_err(ath9k_hw_common(ah), "External reset failed, err=%d\n", reset_err); return false; } REG_WRITE(ah, AR_RTC_RESET, 1); } } REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); udelay(50); REG_WRITE(ah, AR_RTC_RC, 0); if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "RTC stuck in MAC reset\n"); return false; } if (!AR_SREV_9100(ah)) REG_WRITE(ah, AR_RC, 0); if (AR_SREV_9100(ah)) udelay(50); return true; } static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); REG_WRITE(ah, AR_RTC_RESET, 0); REGWRITE_BUFFER_FLUSH(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) udelay(2); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, 0); REG_WRITE(ah, AR_RTC_RESET, 1); if (!ath9k_hw_wait(ah, AR_RTC_STATUS, AR_RTC_STATUS_M, AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "RTC not waking up\n"); return false; } return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); switch (type) { case ATH9K_RESET_POWER_ON: return ath9k_hw_set_reset_power_on(ah); case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: return ath9k_hw_set_reset(ah, type); default: return false; } } static bool ath9k_hw_chip_reset(struct ath_hw *ah, struct ath9k_channel *chan) { if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) return false; } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return false; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return false; ah->chip_fullsleep = false; ath9k_hw_init_pll(ah, chan); ath9k_hw_set_rfmode(ah, chan); return true; } static bool ath9k_hw_channel_change(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *channel = chan->chan; u32 qnum; int r; for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { ath_dbg(common, ATH_DBG_QUEUE, "Transmit frames pending on queue %d\n", qnum); return false; } } if (!ath9k_hw_rfbus_req(ah)) { ath_err(common, "Could not kill baseband RX\n"); return false; } ath9k_hw_set_channel_regs(ah, chan); r = ath9k_hw_rf_set_freq(ah, chan); if (r) { ath_err(common, "Failed to set channel\n"); return false; } ath9k_hw_set_clockrate(ah); ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(regulatory, chan), channel->max_antenna_gain * 2, channel->max_power * 2, min((u32) MAX_RATE_POWER, (u32) regulatory->power_limit), false); ath9k_hw_rfbus_done(ah); if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); return true; } static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) { u32 gpio_mask = ah->gpio_mask; int i; for (i = 0; gpio_mask; i++, gpio_mask >>= 1) { if (!(gpio_mask & 1)) continue; ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); } } bool ath9k_hw_check_alive(struct ath_hw *ah) { int count = 50; u32 reg; if (AR_SREV_9285_12_OR_LATER(ah)) return true; do { reg = REG_READ(ah, AR_OBS_BUS_1); if ((reg & 0x7E7FFFEF) == 0x00702400) continue; switch (reg & 0x7E000B00) { case 0x1E000000: case 0x52000B00: case 0x18000B00: continue; default: return true; } } while (count-- > 0); return false; } EXPORT_SYMBOL(ath9k_hw_check_alive); int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, bool bChannelChange) { struct ath_common *common = ath9k_hw_common(ah); u32 saveLedState; struct ath9k_channel *curchan = ah->curchan; u32 saveDefAntenna; u32 macStaId1; u64 tsf = 0; int i, r; ah->txchainmask = common->tx_chainmask; ah->rxchainmask = common->rx_chainmask; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; if (curchan && !ah->chip_fullsleep) ath9k_hw_getnf(ah, curchan); ah->caldata = caldata; if (caldata && (chan->channel != caldata->channel || (chan->channelFlags & ~CHANNEL_CW_INT) != (caldata->channelFlags & ~CHANNEL_CW_INT))) { /* Operating channel changed, reset channel calibration data */ memset(caldata, 0, sizeof(*caldata)); ath9k_init_nfcal_hist_buffer(ah, chan); } if (bChannelChange && (ah->chip_fullsleep != true) && (ah->curchan != NULL) && (chan->channel != ah->curchan->channel) && ((chan->channelFlags & CHANNEL_ALL) == (ah->curchan->channelFlags & CHANNEL_ALL)) && (!AR_SREV_9280(ah) || AR_DEVID_7010(ah))) { if (ath9k_hw_channel_change(ah, chan)) { ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, true); if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); return 0; } } saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); if (saveDefAntenna == 0) saveDefAntenna = 1; macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* For chips on which RTC reset is done, save TSF before it gets cleared */ if (AR_SREV_9100(ah) || (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))) tsf = ath9k_hw_gettsf64(ah); saveLedState = REG_READ(ah, AR_CFG_LED) & (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); ath9k_hw_mark_phy_inactive(ah); ah->paprd_table_write_done = false; /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_RADIO_RF_RST); udelay(50); } if (!ath9k_hw_chip_reset(ah, chan)) { ath_err(common, "Chip reset failed\n"); return -EINVAL; } /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { ah->htc_reset_init = false; REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_GATE_MAC_CTL); udelay(50); } /* Restore TSF */ if (tsf) ath9k_hw_settsf64(ah, tsf); if (AR_SREV_9280_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_enable_async_fifo(ah); r = ath9k_hw_process_ini(ah, chan); if (r) return r; /* * Some AR91xx SoC devices frequently fail to accept TSF writes * right after the chip reset. When that happens, write a new * value after the initvals have been applied, with an offset * based on measured time difference */ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { tsf += 1500; ath9k_hw_settsf64(ah, tsf); } /* Setup MFP options for CCMP */ if (AR_SREV_9280_20_OR_LATER(ah)) { /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt * frames when constructing CCMP AAD. */ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, 0xc7ff); ah->sw_mgmt_crypto = false; } else if (AR_SREV_9160_10_OR_LATER(ah)) { /* Disable hardware crypto for management frames */ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); ah->sw_mgmt_crypto = true; } else ah->sw_mgmt_crypto = true; if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); ah->eep_ops->set_board_values(ah, chan); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) | macStaId1 | AR_STA_ID1_RTS_USE_DEF | (ah->config. ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) | ah->sta_id1_defaults); ath_hw_setbssidmask(common); REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); ath9k_hw_write_associd(ah); REG_WRITE(ah, AR_ISR, ~0); REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); REGWRITE_BUFFER_FLUSH(ah); ath9k_hw_set_operating_mode(ah, ah->opmode); r = ath9k_hw_rf_set_freq(ah, chan); if (r) return r; ath9k_hw_set_clockrate(ah); ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < AR_NUM_DCU; i++) REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); REGWRITE_BUFFER_FLUSH(ah); ah->intr_txqs = 0; for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) ath9k_hw_resettxqueue(ah, i); ath9k_hw_init_interrupt_masks(ah, ah->opmode); ath9k_hw_ani_cache_ini_regs(ah); ath9k_hw_init_qos(ah); if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); ath9k_hw_init_global_settings(ah); if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_ENABLE_AGGWEP); } REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); ath9k_hw_set_dma(ah); REG_WRITE(ah, AR_OBS, 8); if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); } if (ah->config.tx_intr_mitigation) { REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); } ath9k_hw_init_bb(ah, chan); if (!ath9k_hw_init_cal(ah, chan)) return -EIO; ENABLE_REGWRITE_BUFFER(ah); ath9k_hw_restore_chainmask(ah); REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); REGWRITE_BUFFER_FLUSH(ah); /* * For big endian systems turn on swapping for descriptors */ if (AR_SREV_9100(ah)) { u32 mask; mask = REG_READ(ah, AR_CFG); if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { ath_dbg(common, ATH_DBG_RESET, "CFG Byte Swap Set 0x%x\n", mask); } else { mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; REG_WRITE(ah, AR_CFG, mask); ath_dbg(common, ATH_DBG_RESET, "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); } } else { if (common->bus_ops->ath_bus_type == ATH_USB) { /* Configure AR9271 target WLAN */ if (AR_SREV_9271(ah)) REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); } #ifdef __BIG_ENDIAN else if (AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); #endif } if (ah->btcoex_hw.enabled) ath9k_hw_btcoex_enable(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_hw_bb_watchdog_config(ah); ar9003_hw_disable_phy_restart(ah); } ath9k_hw_apply_gpio_override(ah); return 0; } EXPORT_SYMBOL(ath9k_hw_reset); /******************************/ /* Power Management (Chipset) */ /******************************/ /* * Notify Power Mgt is disabled in self-generated frames. * If requested, force chip to sleep. */ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); if (setChip) { /* * Clear the RTC force wake bit to allow the * mac to go to sleep. */ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); /* Shutdown chip. Active low */ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) REG_CLR_BIT(ah, (AR_RTC_RESET), AR_RTC_RESET_EN); } /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } /* * Notify Power Management is enabled in self-generating * frames. If request, set power mode of chip to * auto/normal. Duration in units of 128us (1/8 TU). */ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); if (setChip) { struct ath9k_hw_capabilities *pCap = &ah->caps; if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { /* Set WakeOnInterrupt bit; clear ForceWake bit */ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); } else { /* * Clear the RTC force wake bit to allow the * mac to go to sleep. */ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); } } /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) { u32 val; int i; /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } if (setChip) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON) != true) { return false; } if (!AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_init_pll(ah, NULL); } if (AR_SREV_9100(ah)) REG_SET_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); udelay(50); for (i = POWER_UP_TIME / 50; i > 0; i--) { val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; if (val == AR_RTC_STATUS_ON) break; udelay(50); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); } if (i == 0) { ath_err(ath9k_hw_common(ah), "Failed to wakeup in %uus\n", POWER_UP_TIME / 20); return false; } } REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); return true; } bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { struct ath_common *common = ath9k_hw_common(ah); int status = true, setChip = true; static const char *modes[] = { "AWAKE", "FULL-SLEEP", "NETWORK SLEEP", "UNDEFINED" }; if (ah->power_mode == mode) return status; ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n", modes[ah->power_mode], modes[mode]); switch (mode) { case ATH9K_PM_AWAKE: status = ath9k_hw_set_power_awake(ah, setChip); break; case ATH9K_PM_FULL_SLEEP: ath9k_set_power_sleep(ah, setChip); ah->chip_fullsleep = true; break; case ATH9K_PM_NETWORK_SLEEP: ath9k_set_power_network_sleep(ah, setChip); break; default: ath_err(common, "Unknown power mode %u\n", mode); return false; } ah->power_mode = mode; /* * XXX: If this warning never comes up after a while then * simply keep the ATH_DBG_WARN_ON_ONCE() but make * ath9k_hw_setpower() return type void. */ if (!(ah->ah_flags & AH_UNPLUGGED)) ATH_DBG_WARN_ON_ONCE(!status); return status; } EXPORT_SYMBOL(ath9k_hw_setpower); /*******************/ /* Beacon Handling */ /*******************/ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) { int flags = 0; ENABLE_REGWRITE_BUFFER(ah); switch (ah->opmode) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); flags |= AR_NDP_TIMER_EN; case NL80211_IFTYPE_AP: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - TU_TO_USEC(ah->config.dma_beacon_response_time)); REG_WRITE(ah, AR_NEXT_SWBA, next_beacon - TU_TO_USEC(ah->config.sw_beacon_response_time)); flags |= AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; break; default: ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON, "%s: unsupported opmode: %d\n", __func__, ah->opmode); return; break; } REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period); REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period); REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period); REG_WRITE(ah, AR_NDP_PERIOD, beacon_period); REGWRITE_BUFFER_FLUSH(ah); REG_SET_BIT(ah, AR_TIMER_MODE, flags); } EXPORT_SYMBOL(ath9k_hw_beaconinit); void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, const struct ath9k_beacon_state *bs) { u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(bs->bs_intval)); REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(bs->bs_intval)); REGWRITE_BUFFER_FLUSH(ah); REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); beaconintval = bs->bs_intval; if (bs->bs_sleepduration > beaconintval) beaconintval = bs->bs_sleepduration; dtimperiod = bs->bs_dtimperiod; if (bs->bs_sleepduration > dtimperiod) dtimperiod = bs->bs_sleepduration; if (beaconintval == dtimperiod) nextTbtt = bs->bs_nextdtim; else nextTbtt = bs->bs_nexttbtt; ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_NEXT_DTIM, TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); REG_WRITE(ah, AR_SLEEP1, SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) | AR_SLEEP1_ASSUME_DTIM); if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) beacontimeout = (BEACON_TIMEOUT_VAL << 3); else beacontimeout = MIN_BEACON_TIMEOUT_VAL; REG_WRITE(ah, AR_SLEEP2, SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); REGWRITE_BUFFER_FLUSH(ah); REG_SET_BIT(ah, AR_TIMER_MODE, AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | AR_DTIM_TIMER_EN); /* TSF Out of Range Threshold */ REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); } EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers); /*******************/ /* HW Capabilities */ /*******************/ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) { eeprom_chainmask &= chip_chainmask; if (eeprom_chainmask) return eeprom_chainmask; else return chip_chainmask; } int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; unsigned int chip_chainmask; u16 eeval; u8 ant_div_ctl1, tx_chainmask, rx_chainmask; eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); regulatory->current_rd = eeval; eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1); if (AR_SREV_9285_12_OR_LATER(ah)) eeval |= AR9285_RDEXT_DEFAULT; regulatory->current_rd_ext = eeval; if (ah->opmode != NL80211_IFTYPE_AP && ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { if (regulatory->current_rd == 0x64 || regulatory->current_rd == 0x65) regulatory->current_rd += 5; else if (regulatory->current_rd == 0x41) regulatory->current_rd = 0x43; ath_dbg(common, ATH_DBG_REGULATORY, "regdomain mapped to 0x%x\n", regulatory->current_rd); } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { ath_err(common, "no band has been marked as supported in EEPROM\n"); return -EINVAL; } if (eeval & AR5416_OPFLAGS_11A) pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; if (eeval & AR5416_OPFLAGS_11G) pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) chip_chainmask = 1; else if (!AR_SREV_9280_20_OR_LATER(ah)) chip_chainmask = 7; else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) chip_chainmask = 3; else chip_chainmask = 7; pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); /* * For AR9271 we will temporarilly uses the rx chainmax as read from * the EEPROM. */ if ((ah->hw_version.devid == AR5416_DEVID_PCI) && !(eeval & AR5416_OPFLAGS_11A) && !(AR_SREV_9271(ah))) /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; else if (AR_SREV_9100(ah)) pCap->rx_chainmask = 0x7; else /* Use rx_chainmask from EEPROM. */ pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask); pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask); ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; /* enable key search for every frame in an aggregate */ if (AR_SREV_9300_20_OR_LATER(ah)) ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH; common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; if (ah->hw_version.devid != AR2427_DEVID_PCIE) pCap->hw_caps |= ATH9K_HW_CAP_HT; else pCap->hw_caps &= ~ATH9K_HW_CAP_HT; if (AR_SREV_9271(ah)) pCap->num_gpio_pins = AR9271_NUM_GPIO; else if (AR_DEVID_7010(ah)) pCap->num_gpio_pins = AR7010_NUM_GPIO; else if (AR_SREV_9300_20_OR_LATER(ah)) pCap->num_gpio_pins = AR9300_NUM_GPIO; else if (AR_SREV_9287_11_OR_LATER(ah)) pCap->num_gpio_pins = AR9287_NUM_GPIO; else if (AR_SREV_9285_12_OR_LATER(ah)) pCap->num_gpio_pins = AR9285_NUM_GPIO; else if (AR_SREV_9280_20_OR_LATER(ah)) pCap->num_gpio_pins = AR928X_NUM_GPIO; else pCap->num_gpio_pins = AR_NUM_GPIO; if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_CST; pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; } else { pCap->rts_aggr_limit = (8 * 1024); } #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); if (ah->rfsilent & EEP_RFSILENT_ENABLED) { ah->rfkill_gpio = MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); ah->rfkill_polarity = MS(ah->rfsilent, EEP_RFSILENT_POLARITY); pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; } #endif if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; else pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; else pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; if (common->btcoex_enabled) { if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300; } else if (AR_SREV_9280_20_OR_LATER(ah)) { btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280; if (AR_SREV_9285(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285; } else { btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; } } } else { btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; } if (AR_SREV_9300_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) pCap->hw_caps |= ATH9K_HW_CAP_LDPC; pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH; pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); if (!ah->config.paprd_disable && ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah)) pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK; } if (AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; if (AR_SREV_9300_20_OR_LATER(ah)) ah->ent_mode = REG_READ(ah, AR_ENT_OTP); if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; if (AR_SREV_9285(ah)) if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9300_20_OR_LATER(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) pCap->hw_caps |= ATH9K_HW_CAP_APM; } if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * enable the diversity-combining algorithm only when * both enable_lna_div and enable_fast_div are set * Table for Diversity * ant_div_alt_lnaconf bit 0-1 * ant_div_main_lnaconf bit 2-3 * ant_div_alt_gaintb bit 4 * ant_div_main_gaintb bit 5 * enable_ant_div_lnadiv bit 6 * enable_ant_fast_div bit 7 */ if ((ant_div_ctl1 >> 0x6) == 0x3) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9485_10(ah)) { pCap->pcie_lcr_extsync_en = true; pCap->pcie_lcr_offset = 0x80; } tx_chainmask = pCap->tx_chainmask; rx_chainmask = pCap->rx_chainmask; while (tx_chainmask || rx_chainmask) { if (tx_chainmask & BIT(0)) pCap->max_txchains++; if (rx_chainmask & BIT(0)) pCap->max_rxchains++; tx_chainmask >>= 1; rx_chainmask >>= 1; } return 0; } /****************************/ /* GPIO / RFKILL / Antennae */ /****************************/ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) { int addr; u32 gpio_shift, tmp; if (gpio > 11) addr = AR_GPIO_OUTPUT_MUX3; else if (gpio > 5) addr = AR_GPIO_OUTPUT_MUX2; else addr = AR_GPIO_OUTPUT_MUX1; gpio_shift = (gpio % 6) * 5; if (AR_SREV_9280_20_OR_LATER(ah) || (addr != AR_GPIO_OUTPUT_MUX1)) { REG_RMW(ah, addr, (type << gpio_shift), (0x1f << gpio_shift)); } else { tmp = REG_READ(ah, addr); tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); tmp &= ~(0x1f << gpio_shift); tmp |= (type << gpio_shift); REG_WRITE(ah, addr, tmp); } } void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) { u32 gpio_shift; BUG_ON(gpio >= ah->caps.num_gpio_pins); if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_INPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } gpio_shift = gpio << 1; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) { #define MS_REG_READ(x, y) \ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) if (gpio >= ah->caps.num_gpio_pins) return 0xffffffff; if (AR_DEVID_7010(ah)) { u32 val; val = REG_READ(ah, AR7010_GPIO_IN); return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; } else if (AR_SREV_9300_20_OR_LATER(ah)) return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; else if (AR_SREV_9271(ah)) return MS_REG_READ(AR9271, gpio) != 0; else if (AR_SREV_9287_11_OR_LATER(ah)) return MS_REG_READ(AR9287, gpio) != 0; else if (AR_SREV_9285_12_OR_LATER(ah)) return MS_REG_READ(AR9285, gpio) != 0; else if (AR_SREV_9280_20_OR_LATER(ah)) return MS_REG_READ(AR928X, gpio) != 0; else return MS_REG_READ(AR, gpio) != 0; } EXPORT_SYMBOL(ath9k_hw_gpio_get); void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, u32 ah_signal_type) { u32 gpio_shift; if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); gpio_shift = 2 * gpio; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } EXPORT_SYMBOL(ath9k_hw_cfg_output); void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) { if (AR_DEVID_7010(ah)) { val = val ? 0 : 1; REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), AR_GPIO_BIT(gpio)); return; } if (AR_SREV_9271(ah)) val = ~val; REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), AR_GPIO_BIT(gpio)); } EXPORT_SYMBOL(ath9k_hw_set_gpio); u32 ath9k_hw_getdefantenna(struct ath_hw *ah) { return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; } EXPORT_SYMBOL(ath9k_hw_getdefantenna); void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) { REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); } EXPORT_SYMBOL(ath9k_hw_setantenna); /*********************/ /* General Operation */ /*********************/ u32 ath9k_hw_getrxfilter(struct ath_hw *ah) { u32 bits = REG_READ(ah, AR_RX_FILTER); u32 phybits = REG_READ(ah, AR_PHY_ERR); if (phybits & AR_PHY_ERR_RADAR) bits |= ATH9K_RX_FILTER_PHYRADAR; if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) bits |= ATH9K_RX_FILTER_PHYERR; return bits; } EXPORT_SYMBOL(ath9k_hw_getrxfilter); void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) { u32 phybits; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_RX_FILTER, bits); phybits = 0; if (bits & ATH9K_RX_FILTER_PHYRADAR) phybits |= AR_PHY_ERR_RADAR; if (bits & ATH9K_RX_FILTER_PHYERR) phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; REG_WRITE(ah, AR_PHY_ERR, phybits); if (phybits) REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); else REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); REGWRITE_BUFFER_FLUSH(ah); } EXPORT_SYMBOL(ath9k_hw_setrxfilter); bool ath9k_hw_phy_disable(struct ath_hw *ah) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return false; ath9k_hw_init_pll(ah, NULL); return true; } EXPORT_SYMBOL(ath9k_hw_phy_disable); bool ath9k_hw_disable(struct ath_hw *ah) { if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return false; if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) return false; ath9k_hw_init_pll(ah, NULL); return true; } EXPORT_SYMBOL(ath9k_hw_disable); void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath9k_channel *chan = ah->curchan; struct ieee80211_channel *channel = chan->chan; regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER); ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(regulatory, chan), channel->max_antenna_gain * 2, channel->max_power * 2, min((u32) MAX_RATE_POWER, (u32) regulatory->power_limit), test); } EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); void ath9k_hw_setopmode(struct ath_hw *ah) { ath9k_hw_set_operating_mode(ah, ah->opmode); } EXPORT_SYMBOL(ath9k_hw_setopmode); void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) { REG_WRITE(ah, AR_MCAST_FIL0, filter0); REG_WRITE(ah, AR_MCAST_FIL1, filter1); } EXPORT_SYMBOL(ath9k_hw_setmcastfilter); void ath9k_hw_write_associd(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); } EXPORT_SYMBOL(ath9k_hw_write_associd); #define ATH9K_MAX_TSF_READ 10 u64 ath9k_hw_gettsf64(struct ath_hw *ah) { u32 tsf_lower, tsf_upper1, tsf_upper2; int i; tsf_upper1 = REG_READ(ah, AR_TSF_U32); for (i = 0; i < ATH9K_MAX_TSF_READ; i++) { tsf_lower = REG_READ(ah, AR_TSF_L32); tsf_upper2 = REG_READ(ah, AR_TSF_U32); if (tsf_upper2 == tsf_upper1) break; tsf_upper1 = tsf_upper2; } WARN_ON( i == ATH9K_MAX_TSF_READ ); return (((u64)tsf_upper1 << 32) | tsf_lower); } EXPORT_SYMBOL(ath9k_hw_gettsf64); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) { REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); } EXPORT_SYMBOL(ath9k_hw_settsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah) { if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, AH_TSF_WRITE_TIMEOUT)) ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); } EXPORT_SYMBOL(ath9k_hw_reset_tsf); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) { if (setting) ah->misc_mode |= AR_PCU_TX_ADD_TSF; else ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; } EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); void ath9k_hw_set11nmac2040(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; u32 macmode; if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) macmode = AR_2040_JOINED_RX_CLEAR; else macmode = 0; REG_WRITE(ah, AR_2040_MODE, macmode); } /* HW Generic timers configuration */ static const struct ath_gen_timer_configuration gen_tmr_configuration[] = { {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001}, {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4, AR_NDP2_TIMER_MODE, 0x0002}, {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4, AR_NDP2_TIMER_MODE, 0x0004}, {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4, AR_NDP2_TIMER_MODE, 0x0008}, {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4, AR_NDP2_TIMER_MODE, 0x0010}, {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4, AR_NDP2_TIMER_MODE, 0x0020}, {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4, AR_NDP2_TIMER_MODE, 0x0040}, {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4, AR_NDP2_TIMER_MODE, 0x0080} }; /* HW generic timer primitives */ /* compute and clear index of rightmost 1 */ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask) { u32 b; b = *mask; b &= (0-b); *mask &= ~b; b *= debruijn32; b >>= 27; return timer_table->gen_timer_index[b]; } u32 ath9k_hw_gettsf32(struct ath_hw *ah) { return REG_READ(ah, AR_TSF_L32); } EXPORT_SYMBOL(ath9k_hw_gettsf32); struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, void (*trigger)(void *), void (*overflow)(void *), void *arg, u8 timer_index) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; struct ath_gen_timer *timer; timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); if (timer == NULL) { ath_err(ath9k_hw_common(ah), "Failed to allocate memory for hw timer[%d]\n", timer_index); return NULL; } /* allocate a hardware generic timer slot */ timer_table->timers[timer_index] = timer; timer->index = timer_index; timer->trigger = trigger; timer->overflow = overflow; timer->arg = arg; return timer; } EXPORT_SYMBOL(ath_gen_timer_alloc); void ath9k_hw_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer, u32 trig_timeout, u32 timer_period) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; u32 tsf, timer_next; BUG_ON(!timer_period); set_bit(timer->index, &timer_table->timer_mask.timer_bits); tsf = ath9k_hw_gettsf32(ah); timer_next = tsf + trig_timeout; ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER, "current tsf %x period %x timer_next %x\n", tsf, timer_period, timer_next); /* * Program generic timer registers */ REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, timer_next); REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr, timer_period); REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask); /* Enable both trigger and thresh interrupt masks */ REG_SET_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); } EXPORT_SYMBOL(ath9k_hw_gen_timer_start); void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; if ((timer->index < AR_FIRST_NDP_TIMER) || (timer->index >= ATH_MAX_GEN_TIMER)) { return; } /* Clear generic timer enable bits. */ REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask); /* Disable both trigger and thresh interrupt masks */ REG_CLR_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); clear_bit(timer->index, &timer_table->timer_mask.timer_bits); } EXPORT_SYMBOL(ath9k_hw_gen_timer_stop); void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; /* free the hardware generic timer slot */ timer_table->timers[timer->index] = NULL; kfree(timer); } EXPORT_SYMBOL(ath_gen_timer_free); /* * Generic Timer Interrupts handling */ void ath_gen_timer_isr(struct ath_hw *ah) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; struct ath_gen_timer *timer; struct ath_common *common = ath9k_hw_common(ah); u32 trigger_mask, thresh_mask, index; /* get hardware generic timer interrupt status */ trigger_mask = ah->intr_gen_timer_trigger; thresh_mask = ah->intr_gen_timer_thresh; trigger_mask &= timer_table->timer_mask.val; thresh_mask &= timer_table->timer_mask.val; trigger_mask &= ~thresh_mask; while (thresh_mask) { index = rightmost_index(timer_table, &thresh_mask); timer = timer_table->timers[index]; BUG_ON(!timer); ath_dbg(common, ATH_DBG_HWTIMER, "TSF overflow for Gen timer %d\n", index); timer->overflow(timer->arg); } while (trigger_mask) { index = rightmost_index(timer_table, &trigger_mask); timer = timer_table->timers[index]; BUG_ON(!timer); ath_dbg(common, ATH_DBG_HWTIMER, "Gen timer[%d] trigger\n", index); timer->trigger(timer->arg); } } EXPORT_SYMBOL(ath_gen_timer_isr); /********/ /* HTC */ /********/ void ath9k_hw_htc_resetinit(struct ath_hw *ah) { ah->htc_reset_init = true; } EXPORT_SYMBOL(ath9k_hw_htc_resetinit); static struct { u32 version; const char * name; } ath_mac_bb_names[] = { /* Devices with external radios */ { AR_SREV_VERSION_5416_PCI, "5416" }, { AR_SREV_VERSION_5416_PCIE, "5418" }, { AR_SREV_VERSION_9100, "9100" }, { AR_SREV_VERSION_9160, "9160" }, /* Single-chip solutions */ { AR_SREV_VERSION_9280, "9280" }, { AR_SREV_VERSION_9285, "9285" }, { AR_SREV_VERSION_9287, "9287" }, { AR_SREV_VERSION_9271, "9271" }, { AR_SREV_VERSION_9300, "9300" }, { AR_SREV_VERSION_9330, "9330" }, { AR_SREV_VERSION_9485, "9485" }, }; /* For devices with external radios */ static struct { u16 version; const char * name; } ath_rf_names[] = { { 0, "5133" }, { AR_RAD5133_SREV_MAJOR, "5133" }, { AR_RAD5122_SREV_MAJOR, "5122" }, { AR_RAD2133_SREV_MAJOR, "2133" }, { AR_RAD2122_SREV_MAJOR, "2122" } }; /* * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. */ static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) { int i; for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) { if (ath_mac_bb_names[i].version == mac_bb_version) { return ath_mac_bb_names[i].name; } } return "????"; } /* * Return the RF name. "????" is returned if the RF is unknown. * Used for devices with external radios. */ static const char *ath9k_hw_rf_name(u16 rf_version) { int i; for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) { if (ath_rf_names[i].version == rf_version) { return ath_rf_names[i].name; } } return "????"; } void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len) { int used; /* chipsets >= AR9280 are single-chip */ if (AR_SREV_9280_20_OR_LATER(ah)) { used = snprintf(hw_name, len, "Atheros AR%s Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev); } else { used = snprintf(hw_name, len, "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev, ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)), ah->hw_version.phyRev); } hw_name[used] = '\0'; } EXPORT_SYMBOL(ath9k_hw_name);
gpl-2.0
shangdawei/artekit_vga
STM32F10x_StdPeriph_Driver/src/stm32f10x_exti.c
602
6690
/** ****************************************************************************** * @file stm32f10x_exti.c * @author MCD Application Team * @version V3.5.0 * @date 11-March-2011 * @brief This file provides all the EXTI firmware functions. ****************************************************************************** * @attention * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2011 STMicroelectronics</center></h2> ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f10x_exti.h" /** @addtogroup STM32F10x_StdPeriph_Driver * @{ */ /** @defgroup EXTI * @brief EXTI driver modules * @{ */ /** @defgroup EXTI_Private_TypesDefinitions * @{ */ /** * @} */ /** @defgroup EXTI_Private_Defines * @{ */ #define EXTI_LINENONE ((uint32_t)0x00000) /* No interrupt selected */ /** * @} */ /** @defgroup EXTI_Private_Macros * @{ */ /** * @} */ /** @defgroup EXTI_Private_Variables * @{ */ /** * @} */ /** @defgroup EXTI_Private_FunctionPrototypes * @{ */ /** * @} */ /** @defgroup EXTI_Private_Functions * @{ */ /** * @brief Deinitializes the EXTI peripheral registers to their default reset values. * @param None * @retval None */ void EXTI_DeInit(void) { EXTI->IMR = 0x00000000; EXTI->EMR = 0x00000000; EXTI->RTSR = 0x00000000; EXTI->FTSR = 0x00000000; EXTI->PR = 0x000FFFFF; } /** * @brief Initializes the EXTI peripheral according to the specified * parameters in the EXTI_InitStruct. * @param EXTI_InitStruct: pointer to a EXTI_InitTypeDef structure * that contains the configuration information for the EXTI peripheral. * @retval None */ void EXTI_Init(EXTI_InitTypeDef* EXTI_InitStruct) { uint32_t tmp = 0; /* Check the parameters */ assert_param(IS_EXTI_MODE(EXTI_InitStruct->EXTI_Mode)); assert_param(IS_EXTI_TRIGGER(EXTI_InitStruct->EXTI_Trigger)); assert_param(IS_EXTI_LINE(EXTI_InitStruct->EXTI_Line)); assert_param(IS_FUNCTIONAL_STATE(EXTI_InitStruct->EXTI_LineCmd)); tmp = (uint32_t)EXTI_BASE; if (EXTI_InitStruct->EXTI_LineCmd != DISABLE) { /* Clear EXTI line configuration */ EXTI->IMR &= ~EXTI_InitStruct->EXTI_Line; EXTI->EMR &= ~EXTI_InitStruct->EXTI_Line; tmp += EXTI_InitStruct->EXTI_Mode; *(__IO uint32_t *) tmp |= EXTI_InitStruct->EXTI_Line; /* Clear Rising Falling edge configuration */ EXTI->RTSR &= ~EXTI_InitStruct->EXTI_Line; EXTI->FTSR &= ~EXTI_InitStruct->EXTI_Line; /* Select the trigger for the selected external interrupts */ if (EXTI_InitStruct->EXTI_Trigger == EXTI_Trigger_Rising_Falling) { /* Rising Falling edge */ EXTI->RTSR |= EXTI_InitStruct->EXTI_Line; EXTI->FTSR |= EXTI_InitStruct->EXTI_Line; } else { tmp = (uint32_t)EXTI_BASE; tmp += EXTI_InitStruct->EXTI_Trigger; *(__IO uint32_t *) tmp |= EXTI_InitStruct->EXTI_Line; } } else { tmp += EXTI_InitStruct->EXTI_Mode; /* Disable the selected external lines */ *(__IO uint32_t *) tmp &= ~EXTI_InitStruct->EXTI_Line; } } /** * @brief Fills each EXTI_InitStruct member with its reset value. * @param EXTI_InitStruct: pointer to a EXTI_InitTypeDef structure which will * be initialized. * @retval None */ void EXTI_StructInit(EXTI_InitTypeDef* EXTI_InitStruct) { EXTI_InitStruct->EXTI_Line = EXTI_LINENONE; EXTI_InitStruct->EXTI_Mode = EXTI_Mode_Interrupt; EXTI_InitStruct->EXTI_Trigger = EXTI_Trigger_Falling; EXTI_InitStruct->EXTI_LineCmd = DISABLE; } /** * @brief Generates a Software interrupt. * @param EXTI_Line: specifies the EXTI lines to be enabled or disabled. * This parameter can be any combination of EXTI_Linex where x can be (0..19). * @retval None */ void EXTI_GenerateSWInterrupt(uint32_t EXTI_Line) { /* Check the parameters */ assert_param(IS_EXTI_LINE(EXTI_Line)); EXTI->SWIER |= EXTI_Line; } /** * @brief Checks whether the specified EXTI line flag is set or not. * @param EXTI_Line: specifies the EXTI line flag to check. * This parameter can be: * @arg EXTI_Linex: External interrupt line x where x(0..19) * @retval The new state of EXTI_Line (SET or RESET). */ FlagStatus EXTI_GetFlagStatus(uint32_t EXTI_Line) { FlagStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_GET_EXTI_LINE(EXTI_Line)); if ((EXTI->PR & EXTI_Line) != (uint32_t)RESET) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the EXTI's line pending flags. * @param EXTI_Line: specifies the EXTI lines flags to clear. * This parameter can be any combination of EXTI_Linex where x can be (0..19). * @retval None */ void EXTI_ClearFlag(uint32_t EXTI_Line) { /* Check the parameters */ assert_param(IS_EXTI_LINE(EXTI_Line)); EXTI->PR = EXTI_Line; } /** * @brief Checks whether the specified EXTI line is asserted or not. * @param EXTI_Line: specifies the EXTI line to check. * This parameter can be: * @arg EXTI_Linex: External interrupt line x where x(0..19) * @retval The new state of EXTI_Line (SET or RESET). */ ITStatus EXTI_GetITStatus(uint32_t EXTI_Line) { ITStatus bitstatus = RESET; uint32_t enablestatus = 0; /* Check the parameters */ assert_param(IS_GET_EXTI_LINE(EXTI_Line)); enablestatus = EXTI->IMR & EXTI_Line; if (((EXTI->PR & EXTI_Line) != (uint32_t)RESET) && (enablestatus != (uint32_t)RESET)) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the EXTI's line pending bits. * @param EXTI_Line: specifies the EXTI lines to clear. * This parameter can be any combination of EXTI_Linex where x can be (0..19). * @retval None */ void EXTI_ClearITPendingBit(uint32_t EXTI_Line) { /* Check the parameters */ assert_param(IS_EXTI_LINE(EXTI_Line)); EXTI->PR = EXTI_Line; } /** * @} */ /** * @} */ /** * @} */ /******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
gpl-2.0
Herna1994/android_kernel_bq_vegetalte
arch/mips/kernel/unaligned.c
1114
41461
/* * Handle unaligned accesses by emulation. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * This file contains exception handler for address error exception with the * special capability to execute faulting instructions in software. The * handler does not try to handle the case when the program counter points * to an address not aligned to a word boundary. * * Putting data to unaligned addresses is a bad practice even on Intel where * only the performance is affected. Much worse is that such code is non- * portable. Due to several programs that die on MIPS due to alignment * problems I decided to implement this handler anyway though I originally * didn't intend to do this at all for user code. * * For now I enable fixing of address errors by default to make life easier. * I however intend to disable this somewhen in the future when the alignment * problems with user programs have been fixed. For programmers this is the * right way to go. * * Fixing address errors is a per process option. The option is inherited * across fork(2) and execve(2) calls. If you really want to use the * option in your user programs - I discourage the use of the software * emulation strongly - use the following code in your userland stuff: * * #include <sys/sysmips.h> * * ... * sysmips(MIPS_FIXADE, x); * ... * * The argument x is 0 for disabling software emulation, enabled otherwise. * * Below a little program to play around with this feature. * * #include <stdio.h> * #include <sys/sysmips.h> * * struct foo { * unsigned char bar[8]; * }; * * main(int argc, char *argv[]) * { * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; * unsigned int *p = (unsigned int *) (x.bar + 3); * int i; * * if (argc > 1) * sysmips(MIPS_FIXADE, atoi(argv[1])); * * printf("*p = %08lx\n", *p); * * *p = 0xdeadface; * * for(i = 0; i <= 7; i++) * printf("%02x ", x.bar[i]); * printf("\n"); * } * * Coprocessor loads are not supported; I think this case is unimportant * in the practice. * * TODO: Handle ndc (attempted store to doubleword in uncached memory) * exception for the R6000. * A store crossing a page boundary might be executed only partially. * Undo the partial store in this case. */ #include <linux/mm.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <linux/perf_event.h> #include <asm/asm.h> #include <asm/branch.h> #include <asm/byteorder.h> #include <asm/cop2.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/inst.h> #include <asm/uaccess.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> #define STR(x) __STR(x) #define __STR(x) #x enum { UNALIGNED_ACTION_QUIET, UNALIGNED_ACTION_SIGNAL, UNALIGNED_ACTION_SHOW, }; #ifdef CONFIG_DEBUG_FS static u32 unaligned_instructions; static u32 unaligned_action; #else #define unaligned_action UNALIGNED_ACTION_QUIET #endif extern void show_registers(struct pt_regs *regs); #ifdef __BIG_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\tlb\t%0, 0(%2)\n" \ "2:\tlbu\t$1, 1(%2)\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ "3:\t.set\tat\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tlwl\t%0, (%2)\n" \ "2:\tlwr\t%0, 3(%2)\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\tlbu\t%0, 0(%2)\n" \ "2:\tlbu\t$1, 1(%2)\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".set\tat\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tlwl\t%0, (%2)\n" \ "2:\tlwr\t%0, 3(%2)\n\t" \ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ "\t.section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tldl\t%0, (%2)\n" \ "2:\tldr\t%0, 7(%2)\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ "\t.section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\tsb\t%1, 1(%2)\n\t" \ "srl\t$1, %1, 0x8\n" \ "2:\tsb\t$1, 0(%2)\n\t" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tswl\t%1,(%2)\n" \ "2:\tswr\t%1, 3(%2)\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #define StoreDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tsdl\t%1,(%2)\n" \ "2:\tsdr\t%1, 7(%2)\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #endif #ifdef __LITTLE_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\tlb\t%0, 1(%2)\n" \ "2:\tlbu\t$1, 0(%2)\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ "3:\t.set\tat\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tlwl\t%0, 3(%2)\n" \ "2:\tlwr\t%0, (%2)\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\tlbu\t%0, 1(%2)\n" \ "2:\tlbu\t$1, 0(%2)\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".set\tat\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tlwl\t%0, 3(%2)\n" \ "2:\tlwr\t%0, (%2)\n\t" \ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ "\t.section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define LoadDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tldl\t%0, 7(%2)\n" \ "2:\tldr\t%0, (%2)\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ "\t.section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%1, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\tsb\t%1, 0(%2)\n\t" \ "srl\t$1,%1, 0x8\n" \ "2:\tsb\t$1, 1(%2)\n\t" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tswl\t%1, 3(%2)\n" \ "2:\tswr\t%1, (%2)\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #define StoreDW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\tsdl\t%1, 7(%2)\n" \ "2:\tsdr\t%1, (%2)\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ ".section\t.fixup,\"ax\"\n\t" \ "4:\tli\t%0, %3\n\t" \ "j\t3b\n\t" \ ".previous\n\t" \ ".section\t__ex_table,\"a\"\n\t" \ STR(PTR)"\t1b, 4b\n\t" \ STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); #endif static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { union mips_instruction insn; unsigned long value; unsigned int res; unsigned long origpc; unsigned long orig31; void __user *fault_addr = NULL; origpc = (unsigned long)pc; orig31 = regs->regs[31]; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); /* * This load never faults. */ __get_user(insn.word, pc); switch (insn.i_format.opcode) { /* * These are instructions that a compiler doesn't generate. We * can assume therefore that the code is MIPS-aware and * really buggy. Emulating these instructions would break the * semantics anyway. */ case ll_op: case lld_op: case sc_op: case scd_op: /* * For these instructions the only way to create an address * error is an attempted access to kernel/supervisor address * space. */ case ldl_op: case ldr_op: case lwl_op: case lwr_op: case sdl_op: case sdr_op: case swl_op: case swr_op: case lb_op: case lbu_op: case sb_op: goto sigbus; /* * The remaining opcodes are the ones that are really of * interest. */ case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lw_op: if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lhu_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lwu_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case ld_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; LoadDW(addr, value, res); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case sh_op: if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; StoreHW(addr, value, res); if (res) goto fault; break; case sw_op: if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; StoreW(addr, value, res); if (res) goto fault; break; case sd_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; StoreDW(addr, value, res); if (res) goto fault; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case lwc1_op: case ldc1_op: case swc1_op: case sdc1_op: die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); BUG_ON(!is_fpu_owner()); lose_fpu(1); /* Save FPU state for the emulator. */ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, &fault_addr); own_fpu(1); /* Restore FPU state. */ /* Signal if something went wrong. */ process_fpemu_return(res, fault_addr); if (res == 0) break; return; /* * COP2 is available to implementor for application specific use. * It's up to applications to register a notifier chain and do * whatever they have to do, including possible sending of signals. */ case lwc2_op: cu2_notifier_call_chain(CU2_LWC2_OP, regs); break; case ldc2_op: cu2_notifier_call_chain(CU2_LDC2_OP, regs); break; case swc2_op: cu2_notifier_call_chain(CU2_SWC2_OP, regs); break; case sdc2_op: cu2_notifier_call_chain(CU2_SDC2_OP, regs); break; default: /* * Pheeee... We encountered an yet unknown instruction or * cache coherence problem. Die sucker, die ... */ goto sigill; } #ifdef CONFIG_DEBUG_FS unaligned_instructions++; #endif return; fault: /* roll back jump/branch */ regs->cp0_epc = origpc; regs->regs[31] = orig31; /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGSEGV, current); return; sigbus: die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGBUS, current); return; sigill: die_if_kernel ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } /* Recode table from 16-bit register notation to 32-bit GPR. */ const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) { unsigned long value; unsigned int res; int i; unsigned int reg = 0, rvar; unsigned long orig31; u16 __user *pc16; u16 halfword; unsigned int word; unsigned long origpc, contpc; union mips_instruction insn; struct mm_decoded_insn mminsn; void __user *fault_addr = NULL; origpc = regs->cp0_epc; orig31 = regs->regs[31]; mminsn.micro_mips_mode = 1; /* * This load never faults. */ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); __get_user(halfword, pc16); pc16++; contpc = regs->cp0_epc + 2; word = ((unsigned int)halfword << 16); mminsn.pc_inc = 2; if (!mm_insn_16bit(halfword)) { __get_user(halfword, pc16); pc16++; contpc = regs->cp0_epc + 4; mminsn.pc_inc = 4; word |= halfword; } mminsn.insn = word; if (get_user(halfword, pc16)) goto fault; mminsn.next_pc_inc = 2; word = ((unsigned int)halfword << 16); if (!mm_insn_16bit(halfword)) { pc16++; if (get_user(halfword, pc16)) goto fault; mminsn.next_pc_inc = 4; word |= halfword; } mminsn.next_insn = word; insn = (union mips_instruction)(mminsn.insn); if (mm_isBranchInstr(regs, mminsn, &contpc)) insn = (union mips_instruction)(mminsn.next_insn); /* Parse instruction to find what to do */ switch (insn.mm_i_format.opcode) { case mm_pool32a_op: switch (insn.mm_x_format.func) { case mm_lwxs_op: reg = insn.mm_x_format.rd; goto loadW; } goto sigbus; case mm_pool32b_op: switch (insn.mm_m_format.func) { case mm_lwp_func: reg = insn.mm_m_format.rd; if (reg == 31) goto sigbus; if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; LoadW(addr, value, res); if (res) goto fault; regs->regs[reg] = value; addr += 4; LoadW(addr, value, res); if (res) goto fault; regs->regs[reg + 1] = value; goto success; case mm_swp_func: reg = insn.mm_m_format.rd; if (reg == 31) goto sigbus; if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; value = regs->regs[reg]; StoreW(addr, value, res); if (res) goto fault; addr += 4; value = regs->regs[reg + 1]; StoreW(addr, value, res); if (res) goto fault; goto success; case mm_ldp_func: #ifdef CONFIG_64BIT reg = insn.mm_m_format.rd; if (reg == 31) goto sigbus; if (!access_ok(VERIFY_READ, addr, 16)) goto sigbus; LoadDW(addr, value, res); if (res) goto fault; regs->regs[reg] = value; addr += 8; LoadDW(addr, value, res); if (res) goto fault; regs->regs[reg + 1] = value; goto success; #endif /* CONFIG_64BIT */ goto sigill; case mm_sdp_func: #ifdef CONFIG_64BIT reg = insn.mm_m_format.rd; if (reg == 31) goto sigbus; if (!access_ok(VERIFY_WRITE, addr, 16)) goto sigbus; value = regs->regs[reg]; StoreDW(addr, value, res); if (res) goto fault; addr += 8; value = regs->regs[reg + 1]; StoreDW(addr, value, res); if (res) goto fault; goto success; #endif /* CONFIG_64BIT */ goto sigill; case mm_lwm32_func: reg = insn.mm_m_format.rd; rvar = reg & 0xf; if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { if (!access_ok (VERIFY_READ, addr, 4 * (rvar + 1))) goto sigbus; } else { if (!access_ok(VERIFY_READ, addr, 4 * rvar)) goto sigbus; } if (rvar == 9) rvar = 8; for (i = 16; rvar; rvar--, i++) { LoadW(addr, value, res); if (res) goto fault; addr += 4; regs->regs[i] = value; } if ((reg & 0xf) == 9) { LoadW(addr, value, res); if (res) goto fault; addr += 4; regs->regs[30] = value; } if (reg & 0x10) { LoadW(addr, value, res); if (res) goto fault; regs->regs[31] = value; } goto success; case mm_swm32_func: reg = insn.mm_m_format.rd; rvar = reg & 0xf; if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { if (!access_ok (VERIFY_WRITE, addr, 4 * (rvar + 1))) goto sigbus; } else { if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) goto sigbus; } if (rvar == 9) rvar = 8; for (i = 16; rvar; rvar--, i++) { value = regs->regs[i]; StoreW(addr, value, res); if (res) goto fault; addr += 4; } if ((reg & 0xf) == 9) { value = regs->regs[30]; StoreW(addr, value, res); if (res) goto fault; addr += 4; } if (reg & 0x10) { value = regs->regs[31]; StoreW(addr, value, res); if (res) goto fault; } goto success; case mm_ldm_func: #ifdef CONFIG_64BIT reg = insn.mm_m_format.rd; rvar = reg & 0xf; if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { if (!access_ok (VERIFY_READ, addr, 8 * (rvar + 1))) goto sigbus; } else { if (!access_ok(VERIFY_READ, addr, 8 * rvar)) goto sigbus; } if (rvar == 9) rvar = 8; for (i = 16; rvar; rvar--, i++) { LoadDW(addr, value, res); if (res) goto fault; addr += 4; regs->regs[i] = value; } if ((reg & 0xf) == 9) { LoadDW(addr, value, res); if (res) goto fault; addr += 8; regs->regs[30] = value; } if (reg & 0x10) { LoadDW(addr, value, res); if (res) goto fault; regs->regs[31] = value; } goto success; #endif /* CONFIG_64BIT */ goto sigill; case mm_sdm_func: #ifdef CONFIG_64BIT reg = insn.mm_m_format.rd; rvar = reg & 0xf; if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { if (!access_ok (VERIFY_WRITE, addr, 8 * (rvar + 1))) goto sigbus; } else { if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) goto sigbus; } if (rvar == 9) rvar = 8; for (i = 16; rvar; rvar--, i++) { value = regs->regs[i]; StoreDW(addr, value, res); if (res) goto fault; addr += 8; } if ((reg & 0xf) == 9) { value = regs->regs[30]; StoreDW(addr, value, res); if (res) goto fault; addr += 8; } if (reg & 0x10) { value = regs->regs[31]; StoreDW(addr, value, res); if (res) goto fault; } goto success; #endif /* CONFIG_64BIT */ goto sigill; /* LWC2, SWC2, LDC2, SDC2 are not serviced */ } goto sigbus; case mm_pool32c_op: switch (insn.mm_m_format.func) { case mm_lwu_func: reg = insn.mm_m_format.rd; goto loadWU; } /* LL,SC,LLD,SCD are not serviced */ goto sigbus; case mm_pool32f_op: switch (insn.mm_x_format.func) { case mm_lwxc1_func: case mm_swxc1_func: case mm_ldxc1_func: case mm_sdxc1_func: goto fpu_emul; } goto sigbus; case mm_ldc132_op: case mm_sdc132_op: case mm_lwc132_op: case mm_swc132_op: fpu_emul: /* roll back jump/branch */ regs->cp0_epc = origpc; regs->regs[31] = orig31; die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); BUG_ON(!is_fpu_owner()); lose_fpu(1); /* save the FPU state for the emulator */ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, &fault_addr); own_fpu(1); /* restore FPU state */ /* If something went wrong, signal */ process_fpemu_return(res, fault_addr); if (res == 0) goto success; return; case mm_lh32_op: reg = insn.mm_i_format.rt; goto loadHW; case mm_lhu32_op: reg = insn.mm_i_format.rt; goto loadHWU; case mm_lw32_op: reg = insn.mm_i_format.rt; goto loadW; case mm_sh32_op: reg = insn.mm_i_format.rt; goto storeHW; case mm_sw32_op: reg = insn.mm_i_format.rt; goto storeW; case mm_ld32_op: reg = insn.mm_i_format.rt; goto loadDW; case mm_sd32_op: reg = insn.mm_i_format.rt; goto storeDW; case mm_pool16c_op: switch (insn.mm16_m_format.func) { case mm_lwm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; if (!access_ok(VERIFY_READ, addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { LoadW(addr, value, res); if (res) goto fault; addr += 4; regs->regs[i] = value; } LoadW(addr, value, res); if (res) goto fault; regs->regs[31] = value; goto success; case mm_swm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { value = regs->regs[i]; StoreW(addr, value, res); if (res) goto fault; addr += 4; } value = regs->regs[31]; StoreW(addr, value, res); if (res) goto fault; goto success; } goto sigbus; case mm_lhu16_op: reg = reg16to32[insn.mm16_rb_format.rt]; goto loadHWU; case mm_lw16_op: reg = reg16to32[insn.mm16_rb_format.rt]; goto loadW; case mm_sh16_op: reg = reg16to32st[insn.mm16_rb_format.rt]; goto storeHW; case mm_sw16_op: reg = reg16to32st[insn.mm16_rb_format.rt]; goto storeW; case mm_lwsp16_op: reg = insn.mm16_r5_format.rt; goto loadW; case mm_swsp16_op: reg = insn.mm16_r5_format.rt; goto storeW; case mm_lwgp16_op: reg = reg16to32[insn.mm16_r3_format.rt]; goto loadW; default: goto sigill; } loadHW: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) goto fault; regs->regs[reg] = value; goto success; loadHWU: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHWU(addr, value, res); if (res) goto fault; regs->regs[reg] = value; goto success; loadW: if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) goto fault; regs->regs[reg] = value; goto success; loadWU: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadWU(addr, value, res); if (res) goto fault; regs->regs[reg] = value; goto success; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; loadDW: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; LoadDW(addr, value, res); if (res) goto fault; regs->regs[reg] = value; goto success; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; storeHW: if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; value = regs->regs[reg]; StoreHW(addr, value, res); if (res) goto fault; goto success; storeW: if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; value = regs->regs[reg]; StoreW(addr, value, res); if (res) goto fault; goto success; storeDW: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; value = regs->regs[reg]; StoreDW(addr, value, res); if (res) goto fault; goto success; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; success: regs->cp0_epc = contpc; /* advance or branch */ #ifdef CONFIG_DEBUG_FS unaligned_instructions++; #endif return; fault: /* roll back jump/branch */ regs->cp0_epc = origpc; regs->regs[31] = orig31; /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGSEGV, current); return; sigbus: die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGBUS, current); return; sigill: die_if_kernel ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) { unsigned long value; unsigned int res; int reg; unsigned long orig31; u16 __user *pc16; unsigned long origpc; union mips16e_instruction mips16inst, oldinst; origpc = regs->cp0_epc; orig31 = regs->regs[31]; pc16 = (unsigned short __user *)msk_isa16_mode(origpc); /* * This load never faults. */ __get_user(mips16inst.full, pc16); oldinst = mips16inst; /* skip EXTEND instruction */ if (mips16inst.ri.opcode == MIPS16e_extend_op) { pc16++; __get_user(mips16inst.full, pc16); } else if (delay_slot(regs)) { /* skip jump instructions */ /* JAL/JALX are 32 bits but have OPCODE in first short int */ if (mips16inst.ri.opcode == MIPS16e_jal_op) pc16++; pc16++; if (get_user(mips16inst.full, pc16)) goto sigbus; } switch (mips16inst.ri.opcode) { case MIPS16e_i64_op: /* I64 or RI64 instruction */ switch (mips16inst.i64.func) { /* I64/RI64 func field check */ case MIPS16e_ldpc_func: case MIPS16e_ldsp_func: reg = reg16to32[mips16inst.ri64.ry]; goto loadDW; case MIPS16e_sdsp_func: reg = reg16to32[mips16inst.ri64.ry]; goto writeDW; case MIPS16e_sdrasp_func: reg = 29; /* GPRSP */ goto writeDW; } goto sigbus; case MIPS16e_swsp_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: reg = reg16to32[mips16inst.ri.rx]; break; case MIPS16e_i8_op: if (mips16inst.i8.func != MIPS16e_swrasp_func) goto sigbus; reg = 29; /* GPRSP */ break; default: reg = reg16to32[mips16inst.rri.ry]; break; } switch (mips16inst.ri.opcode) { case MIPS16e_lb_op: case MIPS16e_lbu_op: case MIPS16e_sb_op: goto sigbus; case MIPS16e_lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) goto fault; MIPS16e_compute_return_epc(regs, &oldinst); regs->regs[reg] = value; break; case MIPS16e_lhu_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; LoadHWU(addr, value, res); if (res) goto fault; MIPS16e_compute_return_epc(regs, &oldinst); regs->regs[reg] = value; break; case MIPS16e_lw_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) goto fault; MIPS16e_compute_return_epc(regs, &oldinst); regs->regs[reg] = value; break; case MIPS16e_lwu_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; LoadWU(addr, value, res); if (res) goto fault; MIPS16e_compute_return_epc(regs, &oldinst); regs->regs[reg] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case MIPS16e_ld_op: loadDW: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; LoadDW(addr, value, res); if (res) goto fault; MIPS16e_compute_return_epc(regs, &oldinst); regs->regs[reg] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case MIPS16e_sh_op: if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); value = regs->regs[reg]; StoreHW(addr, value, res); if (res) goto fault; break; case MIPS16e_sw_op: case MIPS16e_swsp_op: case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); value = regs->regs[reg]; StoreW(addr, value, res); if (res) goto fault; break; case MIPS16e_sd_op: writeDW: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); value = regs->regs[reg]; StoreDW(addr, value, res); if (res) goto fault; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; default: /* * Pheeee... We encountered an yet unknown instruction or * cache coherence problem. Die sucker, die ... */ goto sigill; } #ifdef CONFIG_DEBUG_FS unaligned_instructions++; #endif return; fault: /* roll back jump/branch */ regs->cp0_epc = origpc; regs->regs[31] = orig31; /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGSEGV, current); return; sigbus: die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGBUS, current); return; sigill: die_if_kernel ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } asmlinkage void do_ade(struct pt_regs *regs) { unsigned int __user *pc; mm_segment_t seg; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? */ if (regs->cp0_badvaddr == regs->cp0_epc) goto sigbus; if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ /* * Are we running in microMIPS mode? */ if (get_isa16_mode(regs->cp0_epc)) { /* * Did we catch a fault trying to load an instruction in * 16-bit mode? */ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); if (cpu_has_mmips) { seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_microMIPS(regs, (void __user *)regs->cp0_badvaddr); set_fs(seg); return; } if (cpu_has_mips16) { seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_MIPS16e(regs, (void __user *)regs->cp0_badvaddr); set_fs(seg); return; } goto sigbus; } if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); pc = (unsigned int __user *)exception_epc(regs); seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); set_fs(seg); return; sigbus: die_if_kernel("Kernel unaligned instruction access", regs); force_sig(SIGBUS, current); /* * XXX On return from the signal handler we should advance the epc */ } #ifdef CONFIG_DEBUG_FS extern struct dentry *mips_debugfs_dir; static int __init debugfs_unaligned(void) { struct dentry *d; if (!mips_debugfs_dir) return -ENODEV; d = debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, &unaligned_instructions); if (!d) return -ENOMEM; d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, mips_debugfs_dir, &unaligned_action); if (!d) return -ENOMEM; return 0; } __initcall(debugfs_unaligned); #endif
gpl-2.0
chrbayer/linux-sunxi
arch/powerpc/platforms/85xx/sbc8548.c
1626
3323
/* * Wind River SBC8548 setup and early boot code. * * Copyright 2007 Wind River Systems Inc. * * By Paul Gortmaker (see MAINTAINERS for contact information) * * Based largely on the MPC8548CDS support - Copyright 2005 Freescale Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <asm/pgtable.h> #include <asm/page.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" static int sbc_rev; static void __init sbc8548_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* Extract the HW Rev from the EPLD on the board */ static int __init sbc8548_hw_rev(void) { struct device_node *np; struct resource res; unsigned int *rev; int board_rev = 0; np = of_find_compatible_node(NULL, NULL, "hw-rev"); if (np == NULL) { printk("No HW-REV found in DTB.\n"); return -ENODEV; } of_address_to_resource(np, 0, &res); of_node_put(np); rev = ioremap(res.start,sizeof(unsigned int)); board_rev = (*rev) >> 28; iounmap(rev); return board_rev; } /* * Setup the architecture */ static void __init sbc8548_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("sbc8548_setup_arch()", 0); fsl_pci_assign_primary(); sbc_rev = sbc8548_hw_rev(); } static void sbc8548_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Wind River\n"); seq_printf(m, "Machine\t\t: SBC8548 v%d\n", sbc_rev); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init sbc8548_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "SBC8548"); } define_machine(sbc8548) { .name = "SBC8548", .probe = sbc8548_probe, .setup_arch = sbc8548_setup_arch, .init_IRQ = sbc8548_pic_init, .show_cpuinfo = sbc8548_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
hiikezoe/android_kernel_fujitsu_f12nad
ipc/ipc_sysctl.c
3674
5210
/* * Copyright (C) 2007 * * Author: Eric Biederman <ebiederm@xmision.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/module.h> #include <linux/ipc.h> #include <linux/nsproxy.h> #include <linux/sysctl.h> #include <linux/uaccess.h> #include <linux/ipc_namespace.h> #include <linux/msg.h> #include "util.h" static void *get_ipc(ctl_table *table) { char *which = table->data; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; return which; } #ifdef CONFIG_PROC_SYSCTL static int proc_ipc_dointvec(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); } static int proc_ipc_callback_dointvec(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; size_t lenp_bef = *lenp; int rc; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) /* * Tunable has successfully been changed by hand. Disable its * automatic adjustment. This simply requires unregistering * the notifiers that trigger recalculation. */ unregister_ipcns_notifier(current->nsproxy->ipc_ns); return rc; } static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_doulongvec_minmax(&ipc_table, write, buffer, lenp, ppos); } /* * Routine that is called when the file "auto_msgmni" has successfully been * written. * Two values are allowed: * 0: unregister msgmni's callback routine from the ipc namespace notifier * chain. This means that msgmni won't be recomputed anymore upon memory * add/remove or ipc namespace creation/removal. * 1: register back the callback routine. */ static void ipc_auto_callback(int val) { if (!val) unregister_ipcns_notifier(current->nsproxy->ipc_ns); else { /* * Re-enable automatic recomputing only if not already * enabled. */ recompute_msgmni(current->nsproxy->ipc_ns); cond_register_ipcns_notifier(current->nsproxy->ipc_ns); } } static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; size_t lenp_bef = *lenp; int oldval; int rc; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); oldval = *((int *)(ipc_table.data)); rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) { int newval = *((int *)(ipc_table.data)); /* * The file "auto_msgmni" has correctly been set. * React by (un)registering the corresponding tunable, if the * value has changed. */ if (newval != oldval) ipc_auto_callback(newval); } return rc; } #else #define proc_ipc_doulongvec_minmax NULL #define proc_ipc_dointvec NULL #define proc_ipc_callback_dointvec NULL #define proc_ipcauto_dointvec_minmax NULL #endif static int zero; static int one = 1; static struct ctl_table ipc_kern_table[] = { { .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, .maxlen = sizeof (init_ipc_ns.shm_ctlmax), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmall", .data = &init_ipc_ns.shm_ctlall, .maxlen = sizeof (init_ipc_ns.shm_ctlall), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmmni", .data = &init_ipc_ns.shm_ctlmni, .maxlen = sizeof (init_ipc_ns.shm_ctlmni), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "msgmax", .data = &init_ipc_ns.msg_ctlmax, .maxlen = sizeof (init_ipc_ns.msg_ctlmax), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, .maxlen = sizeof (init_ipc_ns.msg_ctlmni), .mode = 0644, .proc_handler = proc_ipc_callback_dointvec, }, { .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "sem", .data = &init_ipc_ns.sem_ctls, .maxlen = 4*sizeof (int), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "auto_msgmni", .data = &init_ipc_ns.auto_msgmni, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_ipcauto_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, {} }; static struct ctl_table ipc_root_table[] = { { .procname = "kernel", .mode = 0555, .child = ipc_kern_table, }, {} }; static int __init ipc_sysctl_init(void) { register_sysctl_table(ipc_root_table); return 0; } __initcall(ipc_sysctl_init);
gpl-2.0
MoKee/android_kernel_lge_star
arch/powerpc/kernel/rtas_pci.c
3930
7202
/* * Copyright (C) 2001 Dave Engebretsen, IBM Corporation * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * * RTAS specific routines for PCI. * * Based on code from pci.c, chrp_pci.c and pSeries_pci.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/rtas.h> #include <asm/mpic.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> /* RTAS tokens */ static int read_pci_config; static int write_pci_config; static int ibm_read_pci_config; static int ibm_write_pci_config; static inline int config_access_valid(struct pci_dn *dn, int where) { if (where < 256) return 1; if (where < 4096 && dn->pci_ext_config_space) return 1; return 0; } int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) { int returnval = -1; unsigned long buid, addr; int ret; if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; if (buid) { ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, addr, BUID_HI(buid), BUID_LO(buid), size); } else { ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size); } *val = returnval; if (ret) return PCIBIOS_DEVICE_NOT_FOUND; if (returnval == EEH_IO_ERROR_VALUE(size) && eeh_dn_check_failure (pdn->node, NULL)) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; } static int rtas_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct device_node *busdn, *dn; busdn = pci_bus_to_OF_node(bus); /* Search only direct children of the bus */ for (dn = busdn->child; dn; dn = dn->sibling) { struct pci_dn *pdn = PCI_DN(dn); if (pdn && pdn->devfn == devfn && of_device_is_available(dn)) return rtas_read_config(pdn, where, size, val); } return PCIBIOS_DEVICE_NOT_FOUND; } int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) { unsigned long buid, addr; int ret; if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; if (buid) { ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, BUID_HI(buid), BUID_LO(buid), size, (ulong) val); } else { ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val); } if (ret) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; } static int rtas_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct device_node *busdn, *dn; busdn = pci_bus_to_OF_node(bus); /* Search only direct children of the bus */ for (dn = busdn->child; dn; dn = dn->sibling) { struct pci_dn *pdn = PCI_DN(dn); if (pdn && pdn->devfn == devfn && of_device_is_available(dn)) return rtas_write_config(pdn, where, size, val); } return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops rtas_pci_ops = { .read = rtas_pci_read_config, .write = rtas_pci_write_config, }; static int is_python(struct device_node *dev) { const char *model = of_get_property(dev, "model", NULL); if (model && strstr(model, "Python")) return 1; return 0; } static void python_countermeasures(struct device_node *dev) { struct resource registers; void __iomem *chip_regs; volatile u32 val; if (of_address_to_resource(dev, 0, &registers)) { printk(KERN_ERR "Can't get address for Python workarounds !\n"); return; } /* Python's register file is 1 MB in size. */ chip_regs = ioremap(registers.start & ~(0xfffffUL), 0x100000); /* * Firmware doesn't always clear this bit which is critical * for good performance - Anton */ #define PRG_CL_RESET_VALID 0x00010000 val = in_be32(chip_regs + 0xf6030); if (val & PRG_CL_RESET_VALID) { printk(KERN_INFO "Python workaround: "); val &= ~PRG_CL_RESET_VALID; out_be32(chip_regs + 0xf6030, val); /* * We must read it back for changes to * take effect */ val = in_be32(chip_regs + 0xf6030); printk("reg0: %x\n", val); } iounmap(chip_regs); } void __init init_pci_config_tokens (void) { read_pci_config = rtas_token("read-pci-config"); write_pci_config = rtas_token("write-pci-config"); ibm_read_pci_config = rtas_token("ibm,read-pci-config"); ibm_write_pci_config = rtas_token("ibm,write-pci-config"); } unsigned long __devinit get_phb_buid (struct device_node *phb) { struct resource r; if (ibm_read_pci_config == -1) return 0; if (of_address_to_resource(phb, 0, &r)) return 0; return r.start; } static int phb_set_bus_ranges(struct device_node *dev, struct pci_controller *phb) { const int *bus_range; unsigned int len; bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { return 1; } phb->first_busno = bus_range[0]; phb->last_busno = bus_range[1]; return 0; } int __devinit rtas_setup_phb(struct pci_controller *phb) { struct device_node *dev = phb->dn; if (is_python(dev)) python_countermeasures(dev); if (phb_set_bus_ranges(dev, phb)) return 1; phb->ops = &rtas_pci_ops; phb->buid = get_phb_buid(dev); return 0; } void __init find_and_init_phbs(void) { struct device_node *node; struct pci_controller *phb; struct device_node *root = of_find_node_by_path("/"); for_each_child_of_node(root, node) { if (node->type == NULL || (strcmp(node->type, "pci") != 0 && strcmp(node->type, "pciex") != 0)) continue; phb = pcibios_alloc_controller(node); if (!phb) continue; rtas_setup_phb(phb); pci_process_bridge_OF_ranges(phb, node, 0); isa_bridge_find_early(phb); } of_node_put(root); pci_devs_phb_init(); /* * pci_probe_only and pci_assign_all_buses can be set via properties * in chosen. */ if (of_chosen) { const int *prop; prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL); if (prop) pci_probe_only = *prop; #ifdef CONFIG_PPC32 /* Will be made generic soon */ prop = of_get_property(of_chosen, "linux,pci-assign-all-buses", NULL); if (prop && *prop) ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; #endif /* CONFIG_PPC32 */ } }
gpl-2.0
CarbonTEAM/android_kernel_htc_msm8974
fs/efs/super.c
4698
8610
/* * super.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/init.h> #include <linux/module.h> #include <linux/exportfs.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include "efs.h" #include <linux/efs_vh.h> #include <linux/efs_fs_sb.h> static int efs_statfs(struct dentry *dentry, struct kstatfs *buf); static int efs_fill_super(struct super_block *s, void *d, int silent); static struct dentry *efs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, efs_fill_super); } static struct file_system_type efs_fs_type = { .owner = THIS_MODULE, .name = "efs", .mount = efs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static struct pt_types sgi_pt_types[] = { {0x00, "SGI vh"}, {0x01, "SGI trkrepl"}, {0x02, "SGI secrepl"}, {0x03, "SGI raw"}, {0x04, "SGI bsd"}, {SGI_SYSV, "SGI sysv"}, {0x06, "SGI vol"}, {SGI_EFS, "SGI efs"}, {0x08, "SGI lv"}, {0x09, "SGI rlv"}, {0x0A, "SGI xfs"}, {0x0B, "SGI xfslog"}, {0x0C, "SGI xlv"}, {0x82, "Linux swap"}, {0x83, "Linux native"}, {0, NULL} }; static struct kmem_cache * efs_inode_cachep; static struct inode *efs_alloc_inode(struct super_block *sb) { struct efs_inode_info *ei; ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void efs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } static void efs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, efs_i_callback); } static void init_once(void *foo) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { efs_inode_cachep = kmem_cache_create("efs_inode_cache", sizeof(struct efs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (efs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(efs_inode_cachep); } static void efs_put_super(struct super_block *s) { kfree(s->s_fs_info); s->s_fs_info = NULL; } static int efs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_RDONLY; return 0; } static const struct super_operations efs_superblock_operations = { .alloc_inode = efs_alloc_inode, .destroy_inode = efs_destroy_inode, .put_super = efs_put_super, .statfs = efs_statfs, .remount_fs = efs_remount, }; static const struct export_operations efs_export_ops = { .fh_to_dentry = efs_fh_to_dentry, .fh_to_parent = efs_fh_to_parent, .get_parent = efs_get_parent, }; static int __init init_efs_fs(void) { int err; printk("EFS: "EFS_VERSION" - http://aeschi.ch.eu.org/efs/\n"); err = init_inodecache(); if (err) goto out1; err = register_filesystem(&efs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_efs_fs(void) { unregister_filesystem(&efs_fs_type); destroy_inodecache(); } module_init(init_efs_fs) module_exit(exit_efs_fs) static efs_block_t efs_validate_vh(struct volume_header *vh) { int i; __be32 cs, *ui; int csum; efs_block_t sblock = 0; /* shuts up gcc */ struct pt_types *pt_entry; int pt_type, slice = -1; if (be32_to_cpu(vh->vh_magic) != VHMAGIC) { /* * assume that we're dealing with a partition and allow * read_super() to try and detect a valid superblock * on the next block. */ return 0; } ui = ((__be32 *) (vh + 1)) - 1; for(csum = 0; ui >= ((__be32 *) vh);) { cs = *ui--; csum += be32_to_cpu(cs); } if (csum) { printk(KERN_INFO "EFS: SGI disklabel: checksum bad, label corrupted\n"); return 0; } #ifdef DEBUG printk(KERN_DEBUG "EFS: bf: \"%16s\"\n", vh->vh_bootfile); for(i = 0; i < NVDIR; i++) { int j; char name[VDNAMESIZE+1]; for(j = 0; j < VDNAMESIZE; j++) { name[j] = vh->vh_vd[i].vd_name[j]; } name[j] = (char) 0; if (name[0]) { printk(KERN_DEBUG "EFS: vh: %8s block: 0x%08x size: 0x%08x\n", name, (int) be32_to_cpu(vh->vh_vd[i].vd_lbn), (int) be32_to_cpu(vh->vh_vd[i].vd_nbytes)); } } #endif for(i = 0; i < NPARTAB; i++) { pt_type = (int) be32_to_cpu(vh->vh_pt[i].pt_type); for(pt_entry = sgi_pt_types; pt_entry->pt_name; pt_entry++) { if (pt_type == pt_entry->pt_type) break; } #ifdef DEBUG if (be32_to_cpu(vh->vh_pt[i].pt_nblks)) { printk(KERN_DEBUG "EFS: pt %2d: start: %08d size: %08d type: 0x%02x (%s)\n", i, (int) be32_to_cpu(vh->vh_pt[i].pt_firstlbn), (int) be32_to_cpu(vh->vh_pt[i].pt_nblks), pt_type, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown"); } #endif if (IS_EFS(pt_type)) { sblock = be32_to_cpu(vh->vh_pt[i].pt_firstlbn); slice = i; } } if (slice == -1) { printk(KERN_NOTICE "EFS: partition table contained no EFS partitions\n"); #ifdef DEBUG } else { printk(KERN_INFO "EFS: using slice %d (type %s, offset 0x%x)\n", slice, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown", sblock); #endif } return sblock; } static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) { if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) return -1; sb->fs_magic = be32_to_cpu(super->fs_magic); sb->total_blocks = be32_to_cpu(super->fs_size); sb->first_block = be32_to_cpu(super->fs_firstcg); sb->group_size = be32_to_cpu(super->fs_cgfsize); sb->data_free = be32_to_cpu(super->fs_tfree); sb->inode_free = be32_to_cpu(super->fs_tinode); sb->inode_blocks = be16_to_cpu(super->fs_cgisize); sb->total_groups = be16_to_cpu(super->fs_ncg); return 0; } static int efs_fill_super(struct super_block *s, void *d, int silent) { struct efs_sb_info *sb; struct buffer_head *bh; struct inode *root; int ret = -EINVAL; sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); if (!sb) return -ENOMEM; s->s_fs_info = sb; s->s_magic = EFS_SUPER_MAGIC; if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) { printk(KERN_ERR "EFS: device does not support %d byte blocks\n", EFS_BLOCKSIZE); goto out_no_fs_ul; } /* read the vh (volume header) block */ bh = sb_bread(s, 0); if (!bh) { printk(KERN_ERR "EFS: cannot read volume header\n"); goto out_no_fs_ul; } /* * if this returns zero then we didn't find any partition table. * this isn't (yet) an error - just assume for the moment that * the device is valid and go on to search for a superblock. */ sb->fs_start = efs_validate_vh((struct volume_header *) bh->b_data); brelse(bh); if (sb->fs_start == -1) { goto out_no_fs_ul; } bh = sb_bread(s, sb->fs_start + EFS_SUPER); if (!bh) { printk(KERN_ERR "EFS: cannot read superblock\n"); goto out_no_fs_ul; } if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) { #ifdef DEBUG printk(KERN_WARNING "EFS: invalid superblock at block %u\n", sb->fs_start + EFS_SUPER); #endif brelse(bh); goto out_no_fs_ul; } brelse(bh); if (!(s->s_flags & MS_RDONLY)) { #ifdef DEBUG printk(KERN_INFO "EFS: forcing read-only mode\n"); #endif s->s_flags |= MS_RDONLY; } s->s_op = &efs_superblock_operations; s->s_export_op = &efs_export_ops; root = efs_iget(s, EFS_ROOTINODE); if (IS_ERR(root)) { printk(KERN_ERR "EFS: get root inode failed\n"); ret = PTR_ERR(root); goto out_no_fs; } s->s_root = d_make_root(root); if (!(s->s_root)) { printk(KERN_ERR "EFS: get root dentry failed\n"); ret = -ENOMEM; goto out_no_fs; } return 0; out_no_fs_ul: out_no_fs: s->s_fs_info = NULL; kfree(sb); return ret; } static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct efs_sb_info *sbi = SUPER_INFO(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = EFS_SUPER_MAGIC; /* efs magic number */ buf->f_bsize = EFS_BLOCKSIZE; /* blocksize */ buf->f_blocks = sbi->total_groups * /* total data blocks */ (sbi->group_size - sbi->inode_blocks); buf->f_bfree = sbi->data_free; /* free data blocks */ buf->f_bavail = sbi->data_free; /* free blocks for non-root */ buf->f_files = sbi->total_groups * /* total inodes */ sbi->inode_blocks * (EFS_BLOCKSIZE / sizeof(struct efs_dinode)); buf->f_ffree = sbi->inode_free; /* free inodes */ buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = EFS_MAXNAMELEN; /* max filename length */ return 0; }
gpl-2.0
wan5xp/android_kernel_xiaomi_armani
drivers/mfd/s5m-core.c
4698
4789
/* * s5m87xx.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/mfd/core.h> #include <linux/mfd/s5m87xx/s5m-core.h> #include <linux/mfd/s5m87xx/s5m-pmic.h> #include <linux/mfd/s5m87xx/s5m-rtc.h> #include <linux/regmap.h> static struct mfd_cell s5m8751_devs[] = { { .name = "s5m8751-pmic", }, { .name = "s5m-charger", }, { .name = "s5m8751-codec", }, }; static struct mfd_cell s5m8763_devs[] = { { .name = "s5m8763-pmic", }, { .name = "s5m-rtc", }, { .name = "s5m-charger", }, }; static struct mfd_cell s5m8767_devs[] = { { .name = "s5m8767-pmic", }, { .name = "s5m-rtc", }, }; int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest) { return regmap_read(s5m87xx->regmap, reg, dest); } EXPORT_SYMBOL_GPL(s5m_reg_read); int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf) { return regmap_bulk_read(s5m87xx->regmap, reg, buf, count); } EXPORT_SYMBOL_GPL(s5m_bulk_read); int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value) { return regmap_write(s5m87xx->regmap, reg, value); } EXPORT_SYMBOL_GPL(s5m_reg_write); int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf) { return regmap_raw_write(s5m87xx->regmap, reg, buf, count); } EXPORT_SYMBOL_GPL(s5m_bulk_write); int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask) { return regmap_update_bits(s5m87xx->regmap, reg, mask, val); } EXPORT_SYMBOL_GPL(s5m_reg_update); static struct regmap_config s5m_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int s5m87xx_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct s5m_platform_data *pdata = i2c->dev.platform_data; struct s5m87xx_dev *s5m87xx; int ret; s5m87xx = devm_kzalloc(&i2c->dev, sizeof(struct s5m87xx_dev), GFP_KERNEL); if (s5m87xx == NULL) return -ENOMEM; i2c_set_clientdata(i2c, s5m87xx); s5m87xx->dev = &i2c->dev; s5m87xx->i2c = i2c; s5m87xx->irq = i2c->irq; s5m87xx->type = id->driver_data; if (pdata) { s5m87xx->device_type = pdata->device_type; s5m87xx->ono = pdata->ono; s5m87xx->irq_base = pdata->irq_base; s5m87xx->wakeup = pdata->wakeup; } s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config); if (IS_ERR(s5m87xx->regmap)) { ret = PTR_ERR(s5m87xx->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); goto err; } s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); i2c_set_clientdata(s5m87xx->rtc, s5m87xx); if (pdata && pdata->cfg_pmic_irq) pdata->cfg_pmic_irq(); s5m_irq_init(s5m87xx); pm_runtime_set_active(s5m87xx->dev); switch (s5m87xx->device_type) { case S5M8751X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8751_devs, ARRAY_SIZE(s5m8751_devs), NULL, 0); break; case S5M8763X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8763_devs, ARRAY_SIZE(s5m8763_devs), NULL, 0); break; case S5M8767X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8767_devs, ARRAY_SIZE(s5m8767_devs), NULL, 0); break; default: /* If this happens the probe function is problem */ BUG(); } if (ret < 0) goto err; return ret; err: mfd_remove_devices(s5m87xx->dev); s5m_irq_exit(s5m87xx); i2c_unregister_device(s5m87xx->rtc); regmap_exit(s5m87xx->regmap); return ret; } static int s5m87xx_i2c_remove(struct i2c_client *i2c) { struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c); mfd_remove_devices(s5m87xx->dev); s5m_irq_exit(s5m87xx); i2c_unregister_device(s5m87xx->rtc); regmap_exit(s5m87xx->regmap); return 0; } static const struct i2c_device_id s5m87xx_i2c_id[] = { { "s5m87xx", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id); static struct i2c_driver s5m87xx_i2c_driver = { .driver = { .name = "s5m87xx", .owner = THIS_MODULE, }, .probe = s5m87xx_i2c_probe, .remove = s5m87xx_i2c_remove, .id_table = s5m87xx_i2c_id, }; static int __init s5m87xx_i2c_init(void) { return i2c_add_driver(&s5m87xx_i2c_driver); } subsys_initcall(s5m87xx_i2c_init); static void __exit s5m87xx_i2c_exit(void) { i2c_del_driver(&s5m87xx_i2c_driver); } module_exit(s5m87xx_i2c_exit); MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_DESCRIPTION("Core support for the S5M MFD"); MODULE_LICENSE("GPL");
gpl-2.0
ahmedammar/linux-xlnx
drivers/video/hecubafb.c
8026
7692
/* * linux/drivers/video/hecubafb.c -- FB driver for Hecuba/Apollo controller * * Copyright (C) 2006, Jaya Kumar * This work was sponsored by CIS(M) Sdn Bhd * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. * This work was possible because of apollo display code from E-Ink's website * http://support.eink.com/community * All information used to write this code is from public material made * available by E-Ink on its support site. Some commands such as 0xA4 * were found by looping through cmd=0x00 thru 0xFF and supplying random * values. There are other commands that the display is capable of, * beyond the 5 used here but they are more complex. * * This driver is written to be used with the Hecuba display architecture. * The actual display chip is called Apollo and the interface electronics * it needs is called Hecuba. * * It is intended to be architecture independent. A board specific driver * must be used to perform all the physical IO interactions. An example * is provided as n411.c * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/uaccess.h> #include <video/hecubafb.h> /* Display specific information */ #define DPY_W 600 #define DPY_H 800 static struct fb_fix_screeninfo hecubafb_fix __devinitdata = { .id = "hecubafb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = DPY_W, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo hecubafb_var __devinitdata = { .xres = DPY_W, .yres = DPY_H, .xres_virtual = DPY_W, .yres_virtual = DPY_H, .bits_per_pixel = 1, .nonstd = 1, }; /* main hecubafb functions */ static void apollo_send_data(struct hecubafb_par *par, unsigned char data) { /* set data */ par->board->set_data(par, data); /* set DS low */ par->board->set_ctl(par, HCB_DS_BIT, 0); /* wait for ack */ par->board->wait_for_ack(par, 0); /* set DS hi */ par->board->set_ctl(par, HCB_DS_BIT, 1); /* wait for ack to clear */ par->board->wait_for_ack(par, 1); } static void apollo_send_command(struct hecubafb_par *par, unsigned char data) { /* command so set CD to high */ par->board->set_ctl(par, HCB_CD_BIT, 1); /* actually strobe with command */ apollo_send_data(par, data); /* clear CD back to low */ par->board->set_ctl(par, HCB_CD_BIT, 0); } static void hecubafb_dpy_update(struct hecubafb_par *par) { int i; unsigned char *buf = (unsigned char __force *)par->info->screen_base; apollo_send_command(par, APOLLO_START_NEW_IMG); for (i=0; i < (DPY_W*DPY_H/8); i++) { apollo_send_data(par, *(buf++)); } apollo_send_command(par, APOLLO_STOP_IMG_DATA); apollo_send_command(par, APOLLO_DISPLAY_IMG); } /* this is called back from the deferred io workqueue */ static void hecubafb_dpy_deferred_io(struct fb_info *info, struct list_head *pagelist) { hecubafb_dpy_update(info->par); } static void hecubafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct hecubafb_par *par = info->par; sys_fillrect(info, rect); hecubafb_dpy_update(par); } static void hecubafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct hecubafb_par *par = info->par; sys_copyarea(info, area); hecubafb_dpy_update(par); } static void hecubafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct hecubafb_par *par = info->par; sys_imageblit(info, image); hecubafb_dpy_update(par); } /* * this is the slow path from userspace. they can seek and write to * the fb. it's inefficient to do anything less than a full screen draw */ static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { struct hecubafb_par *par = info->par; unsigned long p = *ppos; void *dst; int err = 0; unsigned long total_size; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->fix.smem_len; if (p > total_size) return -EFBIG; if (count > total_size) { err = -EFBIG; count = total_size; } if (count + p > total_size) { if (!err) err = -ENOSPC; count = total_size - p; } dst = (void __force *) (info->screen_base + p); if (copy_from_user(dst, buf, count)) err = -EFAULT; if (!err) *ppos += count; hecubafb_dpy_update(par); return (err) ? err : count; } static struct fb_ops hecubafb_ops = { .owner = THIS_MODULE, .fb_read = fb_sys_read, .fb_write = hecubafb_write, .fb_fillrect = hecubafb_fillrect, .fb_copyarea = hecubafb_copyarea, .fb_imageblit = hecubafb_imageblit, }; static struct fb_deferred_io hecubafb_defio = { .delay = HZ, .deferred_io = hecubafb_dpy_deferred_io, }; static int __devinit hecubafb_probe(struct platform_device *dev) { struct fb_info *info; struct hecuba_board *board; int retval = -ENOMEM; int videomemorysize; unsigned char *videomemory; struct hecubafb_par *par; /* pick up board specific routines */ board = dev->dev.platform_data; if (!board) return -EINVAL; /* try to count device specific driver, if can't, platform recalls */ if (!try_module_get(board->owner)) return -ENODEV; videomemorysize = (DPY_W*DPY_H)/8; videomemory = vzalloc(videomemorysize); if (!videomemory) goto err_videomem_alloc; info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); if (!info) goto err_fballoc; info->screen_base = (char __force __iomem *)videomemory; info->fbops = &hecubafb_ops; info->var = hecubafb_var; info->fix = hecubafb_fix; info->fix.smem_len = videomemorysize; par = info->par; par->info = info; par->board = board; par->send_command = apollo_send_command; par->send_data = apollo_send_data; info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB; info->fbdefio = &hecubafb_defio; fb_deferred_io_init(info); retval = register_framebuffer(info); if (retval < 0) goto err_fbreg; platform_set_drvdata(dev, info); printk(KERN_INFO "fb%d: Hecuba frame buffer device, using %dK of video memory\n", info->node, videomemorysize >> 10); /* this inits the dpy */ retval = par->board->init(par); if (retval < 0) goto err_fbreg; return 0; err_fbreg: framebuffer_release(info); err_fballoc: vfree(videomemory); err_videomem_alloc: module_put(board->owner); return retval; } static int __devexit hecubafb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); if (info) { struct hecubafb_par *par = info->par; fb_deferred_io_cleanup(info); unregister_framebuffer(info); vfree((void __force *)info->screen_base); if (par->board->remove) par->board->remove(par); module_put(par->board->owner); framebuffer_release(info); } return 0; } static struct platform_driver hecubafb_driver = { .probe = hecubafb_probe, .remove = __devexit_p(hecubafb_remove), .driver = { .owner = THIS_MODULE, .name = "hecubafb", }, }; static int __init hecubafb_init(void) { return platform_driver_register(&hecubafb_driver); } static void __exit hecubafb_exit(void) { platform_driver_unregister(&hecubafb_driver); } module_init(hecubafb_init); module_exit(hecubafb_exit); MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
jyizheng/goldfish_pasr
drivers/staging/comedi/drivers/dt2815.c
8282
7564
/* comedi/drivers/dt2815.c Hardware driver for Data Translation DT2815 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dt2815 Description: Data Translation DT2815 Author: ds Status: mostly complete, untested Devices: [Data Translation] DT2815 (dt2815) I'm not sure anyone has ever tested this board. If you have information contrary, please update. Configuration options: [0] - I/O port base base address [1] - IRQ (unused) [2] - Voltage unipolar/bipolar configuration 0 == unipolar 5V (0V -- +5V) 1 == bipolar 5V (-5V -- +5V) [3] - Current offset configuration 0 == disabled (0mA -- +32mAV) 1 == enabled (+4mA -- +20mAV) [4] - Firmware program configuration 0 == program 1 (see manual table 5-4) 1 == program 2 (see manual table 5-4) 2 == program 3 (see manual table 5-4) 3 == program 4 (see manual table 5-4) [5] - Analog output 0 range configuration 0 == voltage 1 == current [6] - Analog output 1 range configuration (same options) [7] - Analog output 2 range configuration (same options) [8] - Analog output 3 range configuration (same options) [9] - Analog output 4 range configuration (same options) [10] - Analog output 5 range configuration (same options) [11] - Analog output 6 range configuration (same options) [12] - Analog output 7 range configuration (same options) */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> static const struct comedi_lrange range_dt2815_ao_32_current = {1, {RANGE_mA(0, 32)} }; static const struct comedi_lrange range_dt2815_ao_20_current = {1, {RANGE_mA(4, 20)} }; #define DT2815_SIZE 2 #define DT2815_DATA 0 #define DT2815_STATUS 1 static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dt2815_detach(struct comedi_device *dev); static struct comedi_driver driver_dt2815 = { .driver_name = "dt2815", .module = THIS_MODULE, .attach = dt2815_attach, .detach = dt2815_detach, }; static int __init driver_dt2815_init_module(void) { return comedi_driver_register(&driver_dt2815); } static void __exit driver_dt2815_cleanup_module(void) { comedi_driver_unregister(&driver_dt2815); } module_init(driver_dt2815_init_module); module_exit(driver_dt2815_cleanup_module); static void dt2815_free_resources(struct comedi_device *dev); struct dt2815_private { const struct comedi_lrange *range_type_list[8]; unsigned int ao_readback[8]; }; #define devpriv ((struct dt2815_private *)dev->private) static int dt2815_wait_for_status(struct comedi_device *dev, int status) { int i; for (i = 0; i < 100; i++) { if (inb(dev->iobase + DT2815_STATUS) == status) break; } return status; } static int dt2815_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); unsigned int status; unsigned int lo, hi; for (i = 0; i < insn->n; i++) { lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; hi = (data[i] & 0xff0) >> 4; status = dt2815_wait_for_status(dev, 0x00); if (status != 0) { printk(KERN_WARNING "dt2815: failed to write low byte " "on %d reason %x\n", chan, status); return -EBUSY; } outb(lo, dev->iobase + DT2815_DATA); status = dt2815_wait_for_status(dev, 0x10); if (status != 0x10) { printk(KERN_WARNING "dt2815: failed to write high byte " "on %d reason %x\n", chan, status); return -EBUSY; } devpriv->ao_readback[chan] = data[i]; } return i; } /* options[0] Board base address options[1] IRQ (not applicable) options[2] Voltage unipolar/bipolar configuration 0 == unipolar 5V (0V -- +5V) 1 == bipolar 5V (-5V -- +5V) options[3] Current offset configuration 0 == disabled (0mA -- +32mAV) 1 == enabled (+4mA -- +20mAV) options[4] Firmware program configuration 0 == program 1 (see manual table 5-4) 1 == program 2 (see manual table 5-4) 2 == program 3 (see manual table 5-4) 3 == program 4 (see manual table 5-4) options[5] Analog output 0 range configuration 0 == voltage 1 == current options[6] Analog output 1 range configuration ... options[12] Analog output 7 range configuration 0 == voltage 1 == current */ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int i; const struct comedi_lrange *current_range_type, *voltage_range_type; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: dt2815: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, DT2815_SIZE, "dt2815")) { printk(KERN_WARNING "I/O port conflict\n"); return -EIO; } dev->iobase = iobase; dev->board_name = "dt2815"; if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; if (alloc_private(dev, sizeof(struct dt2815_private)) < 0) return -ENOMEM; s = dev->subdevices; /* ao subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->maxdata = 0xfff; s->n_chan = 8; s->insn_write = dt2815_ao_insn; s->insn_read = dt2815_ao_insn_read; s->range_table_list = devpriv->range_type_list; current_range_type = (it->options[3]) ? &range_dt2815_ao_20_current : &range_dt2815_ao_32_current; voltage_range_type = (it->options[2]) ? &range_bipolar5 : &range_unipolar5; for (i = 0; i < 8; i++) { devpriv->range_type_list[i] = (it->options[5 + i]) ? current_range_type : voltage_range_type; } /* Init the 2815 */ outb(0x00, dev->iobase + DT2815_STATUS); for (i = 0; i < 100; i++) { /* This is incredibly slow (approx 20 ms) */ unsigned int status; udelay(1000); status = inb(dev->iobase + DT2815_STATUS); if (status == 4) { unsigned int program; program = (it->options[4] & 0x3) << 3 | 0x7; outb(program, dev->iobase + DT2815_DATA); printk(KERN_INFO ", program: 0x%x (@t=%d)\n", program, i); break; } else if (status != 0x00) { printk(KERN_WARNING "dt2815: unexpected status 0x%x " "(@t=%d)\n", status, i); if (status & 0x60) outb(0x00, dev->iobase + DT2815_STATUS); } } return 0; } static void dt2815_free_resources(struct comedi_device *dev) { if (dev->iobase) release_region(dev->iobase, DT2815_SIZE); } static int dt2815_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: dt2815: remove\n", dev->minor); dt2815_free_resources(dev); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
kyleterry/linux
drivers/s390/cio/itcw.c
12890
11950
/* * Functions for incremental construction of fcx enabled I/O control blocks. * * Copyright IBM Corp. 2008 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/module.h> #include <asm/fcx.h> #include <asm/itcw.h> /** * struct itcw - incremental tcw helper data type * * This structure serves as a handle for the incremental construction of a * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate * tcw and associated data. The data structures are contained inside a single * contiguous buffer provided by the user. * * The itcw construction functions take care of overall data integrity: * - reset unused fields to zero * - fill in required pointers * - ensure required alignment for data structures * - prevent data structures to cross 4k-byte boundary where required * - calculate tccb-related length fields * - optionally provide ready-made interrogate tcw and associated structures * * Restrictions apply to the itcws created with these construction functions: * - tida only supported for data address, not for tccb * - only contiguous tidaw-lists (no ttic) * - total number of bytes required per itcw may not exceed 4k bytes * - either read or write operation (may not work with r=0 and w=0) * * Example: * struct itcw *itcw; * void *buffer; * size_t size; * * size = itcw_calc_size(1, 2, 0); * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); * if (!buffer) * return -ENOMEM; * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); * if (IS_ERR(itcw)) * return PTR_ER(itcw); * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); * itcw_add_tidaw(itcw, 0, 0x30000, 20); * itcw_add_tidaw(itcw, 0, 0x40000, 52); * itcw_finalize(itcw); * */ struct itcw { struct tcw *tcw; struct tcw *intrg_tcw; int num_tidaws; int max_tidaws; int intrg_num_tidaws; int intrg_max_tidaws; }; /** * itcw_get_tcw - return pointer to tcw associated with the itcw * @itcw: address of the itcw * * Return pointer to the tcw associated with the itcw. */ struct tcw *itcw_get_tcw(struct itcw *itcw) { return itcw->tcw; } EXPORT_SYMBOL(itcw_get_tcw); /** * itcw_calc_size - return the size of an itcw with the given parameters * @intrg: if non-zero, add an interrogate tcw * @max_tidaws: maximum number of tidaws to be used for data addressing or zero * if no tida is to be used. * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing * by the interrogate tcw, if specified * * Calculate and return the number of bytes required to hold an itcw with the * given parameters and assuming tccbs with maximum size. * * Note that the resulting size also contains bytes needed for alignment * padding as well as padding to ensure that data structures don't cross a * 4k-boundary where required. */ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) { size_t len; int cross_count; /* Main data. */ len = sizeof(struct itcw); len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + /* TSB */ sizeof(struct tsb) + /* TIDAL */ max_tidaws * sizeof(struct tidaw); /* Interrogate data. */ if (intrg) { len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + /* TSB */ sizeof(struct tsb) + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); } /* Maximum required alignment padding. */ len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; /* TIDAW lists may not cross a 4k boundary. To cross a * boundary we need to add a TTIC TIDAW. We need to reserve * one additional TIDAW for a TTIC that we may need to add due * to the placement of the data chunk in memory, and a further * TIDAW for each page boundary that the TIDAW list may cross * due to it's own size. */ if (max_tidaws) { cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); len += cross_count * sizeof(struct tidaw); } if (intrg_max_tidaws) { cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); len += cross_count * sizeof(struct tidaw); } return len; } EXPORT_SYMBOL(itcw_calc_size); #define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, int align, int check_4k) { addr_t addr; addr = ALIGN(*start, align); if (check_4k && CROSS4K(addr, len)) { addr = ALIGN(addr, 4096); addr = ALIGN(addr, align); } if (addr + len > end) return ERR_PTR(-ENOSPC); *start = addr + len; return (void *) addr; } /** * itcw_init - initialize incremental tcw data structure * @buffer: address of buffer to use for data structures * @size: number of bytes in buffer * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write * operation tcw * @intrg: if non-zero, add and initialize an interrogate tcw * @max_tidaws: maximum number of tidaws to be used for data addressing or zero * if no tida is to be used. * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing * by the interrogate tcw, if specified * * Prepare the specified buffer to be used as an incremental tcw, i.e. a * helper data structure that can be used to construct a valid tcw by * successive calls to other helper functions. Note: the buffer needs to be * located below the 2G address limit. The resulting tcw has the following * restrictions: * - no tccb tidal * - input/output tidal is contiguous (no ttic) * - total data should not exceed 4k * - tcw specifies either read or write operation * * On success, return pointer to the resulting incremental tcw data structure, * ERR_PTR otherwise. */ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, int max_tidaws, int intrg_max_tidaws) { struct itcw *itcw; void *chunk; addr_t start; addr_t end; int cross_count; /* Check for 2G limit. */ start = (addr_t) buffer; end = start + size; if (end > (1 << 31)) return ERR_PTR(-EINVAL); memset(buffer, 0, size); /* ITCW. */ chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); if (IS_ERR(chunk)) return chunk; itcw = chunk; /* allow for TTIC tidaws that may be needed to cross a page boundary */ cross_count = 0; if (max_tidaws) cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); itcw->max_tidaws = max_tidaws + cross_count; cross_count = 0; if (intrg_max_tidaws) cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count; /* Main TCW. */ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); if (IS_ERR(chunk)) return chunk; itcw->tcw = chunk; tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, (op == ITCW_OP_WRITE) ? 1 : 0); /* Interrogate TCW. */ if (intrg) { chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); if (IS_ERR(chunk)) return chunk; itcw->intrg_tcw = chunk; tcw_init(itcw->intrg_tcw, 1, 0); tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); } /* Data TIDAL. */ if (max_tidaws > 0) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * itcw->max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->tcw, chunk, 1); } /* Interrogate data TIDAL. */ if (intrg && (intrg_max_tidaws > 0)) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * itcw->intrg_max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->intrg_tcw, chunk, 1); } /* TSB. */ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); if (IS_ERR(chunk)) return chunk; tsb_init(chunk); tcw_set_tsb(itcw->tcw, chunk); /* Interrogate TSB. */ if (intrg) { chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); if (IS_ERR(chunk)) return chunk; tsb_init(chunk); tcw_set_tsb(itcw->intrg_tcw, chunk); } /* TCCB. */ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); if (IS_ERR(chunk)) return chunk; tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); tcw_set_tccb(itcw->tcw, chunk); /* Interrogate TCCB. */ if (intrg) { chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); if (IS_ERR(chunk)) return chunk; tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); tcw_set_tccb(itcw->intrg_tcw, chunk); tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, sizeof(struct dcw_intrg_data), 0); tcw_finalize(itcw->intrg_tcw, 0); } return itcw; } EXPORT_SYMBOL(itcw_init); /** * itcw_add_dcw - add a dcw to the itcw * @itcw: address of the itcw * @cmd: the dcw command * @flags: flags for the dcw * @cd: address of control data for this dcw or NULL if none is required * @cd_count: number of control data bytes for this dcw * @count: number of data bytes for this dcw * * Add a new dcw to the specified itcw by writing the dcw information specified * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw * would exceed the available space. * * Note: the tcal field of the tccb header will be updated to reflect added * content. */ struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, u8 cd_count, u32 count) { return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, flags, cd, cd_count, count); } EXPORT_SYMBOL(itcw_add_dcw); /** * itcw_add_tidaw - add a tidaw to the itcw * @itcw: address of the itcw * @flags: flags for the new tidaw * @addr: address value for the new tidaw * @count: count value for the new tidaw * * Add a new tidaw to the input/output data tidaw-list of the specified itcw * (depending on the value of the r-flag and w-flag). Return a pointer to * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the * available space. * * Note: TTIC tidaws are automatically added when needed, so explicitly calling * this interface with the TTIC flag is not supported. The last-tidaw flag * for the last tidaw in the list will be set by itcw_finalize. */ struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) { struct tidaw *following; if (itcw->num_tidaws >= itcw->max_tidaws) return ERR_PTR(-ENOSPC); /* * Is the tidaw, which follows the one we are about to fill, on the next * page? Then we have to insert a TTIC tidaw first, that points to the * tidaw on the new page. */ following = ((struct tidaw *) tcw_get_data(itcw->tcw)) + itcw->num_tidaws + 1; if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) { tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, TIDAW_FLAGS_TTIC, following, 0); if (itcw->num_tidaws >= itcw->max_tidaws) return ERR_PTR(-ENOSPC); } return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); } EXPORT_SYMBOL(itcw_add_tidaw); /** * itcw_set_data - set data address and tida flag of the itcw * @itcw: address of the itcw * @addr: the data address * @use_tidal: zero of the data address specifies a contiguous block of data, * non-zero if it specifies a list if tidaws. * * Set the input/output data address of the itcw (depending on the value of the * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag * is set as well. */ void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) { tcw_set_data(itcw->tcw, addr, use_tidal); } EXPORT_SYMBOL(itcw_set_data); /** * itcw_finalize - calculate length and count fields of the itcw * @itcw: address of the itcw * * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. * In case input- or output-tida is used, the tidaw-list must be stored in * continuous storage (no ttic). The tcal field in the tccb must be * up-to-date. */ void itcw_finalize(struct itcw *itcw) { tcw_finalize(itcw->tcw, itcw->num_tidaws); } EXPORT_SYMBOL(itcw_finalize);
gpl-2.0
s05427226/linux
arch/mips/sibyte/common/cfe_console.c
13658
1737
#include <linux/init.h> #include <linux/errno.h> #include <linux/console.h> #include <asm/sibyte/board.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> extern int cfe_cons_handle; static void cfe_console_write(struct console *cons, const char *str, unsigned int count) { int i, last, written; for (i=0, last=0; i<count; i++) { if (!str[i]) /* XXXKW can/should this ever happen? */ return; if (str[i] == '\n') { do { written = cfe_write(cfe_cons_handle, &str[last], i-last); if (written < 0) ; last += written; } while (last < i); while (cfe_write(cfe_cons_handle, "\r", 1) <= 0) ; } } if (last != count) { do { written = cfe_write(cfe_cons_handle, &str[last], count-last); if (written < 0) ; last += written; } while (last < count); } } static int cfe_console_setup(struct console *cons, char *str) { char consdev[32]; /* XXXKW think about interaction with 'console=' cmdline arg */ /* If none of the console options are configured, the build will break. */ if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) { #ifdef CONFIG_SERIAL_SB1250_DUART if (!strcmp(consdev, "uart0")) { setleds("u0cn"); } else if (!strcmp(consdev, "uart1")) { setleds("u1cn"); } else #endif #ifdef CONFIG_VGA_CONSOLE if (!strcmp(consdev, "pcconsole0")) { setleds("pccn"); } else #endif return -ENODEV; } return 0; } static struct console sb1250_cfe_cons = { .name = "cfe", .write = cfe_console_write, .setup = cfe_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init sb1250_cfe_console_init(void) { register_console(&sb1250_cfe_cons); return 0; } console_initcall(sb1250_cfe_console_init);
gpl-2.0
Dm47021/CAF-MSM-34
arch/mips/sibyte/common/cfe_console.c
13658
1737
#include <linux/init.h> #include <linux/errno.h> #include <linux/console.h> #include <asm/sibyte/board.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> extern int cfe_cons_handle; static void cfe_console_write(struct console *cons, const char *str, unsigned int count) { int i, last, written; for (i=0, last=0; i<count; i++) { if (!str[i]) /* XXXKW can/should this ever happen? */ return; if (str[i] == '\n') { do { written = cfe_write(cfe_cons_handle, &str[last], i-last); if (written < 0) ; last += written; } while (last < i); while (cfe_write(cfe_cons_handle, "\r", 1) <= 0) ; } } if (last != count) { do { written = cfe_write(cfe_cons_handle, &str[last], count-last); if (written < 0) ; last += written; } while (last < count); } } static int cfe_console_setup(struct console *cons, char *str) { char consdev[32]; /* XXXKW think about interaction with 'console=' cmdline arg */ /* If none of the console options are configured, the build will break. */ if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) { #ifdef CONFIG_SERIAL_SB1250_DUART if (!strcmp(consdev, "uart0")) { setleds("u0cn"); } else if (!strcmp(consdev, "uart1")) { setleds("u1cn"); } else #endif #ifdef CONFIG_VGA_CONSOLE if (!strcmp(consdev, "pcconsole0")) { setleds("pccn"); } else #endif return -ENODEV; } return 0; } static struct console sb1250_cfe_cons = { .name = "cfe", .write = cfe_console_write, .setup = cfe_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init sb1250_cfe_console_init(void) { register_console(&sb1250_cfe_cons); return 0; } console_initcall(sb1250_cfe_console_init);
gpl-2.0
dirkbehme/linux-renesas-rcar-gen3
drivers/gpu/drm/armada/armada_fb.c
91
4440
/* * Copyright (C) 2012 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include "armada_drm.h" #include "armada_fb.h" #include "armada_gem.h" #include "armada_hw.h" static void armada_fb_destroy(struct drm_framebuffer *fb) { struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); drm_framebuffer_cleanup(&dfb->fb); drm_gem_object_unreference_unlocked(&dfb->obj->obj); kfree(dfb); } static int armada_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *dfile, unsigned int *handle) { struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); return drm_gem_handle_create(dfile, &dfb->obj->obj, handle); } static const struct drm_framebuffer_funcs armada_fb_funcs = { .destroy = armada_fb_destroy, .create_handle = armada_fb_create_handle, }; struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) { struct armada_framebuffer *dfb; uint8_t format, config; int ret; switch (mode->pixel_format) { #define FMT(drm, fmt, mod) \ case DRM_FORMAT_##drm: \ format = CFG_##fmt; \ config = mod; \ break FMT(RGB565, 565, CFG_SWAPRB); FMT(BGR565, 565, 0); FMT(ARGB1555, 1555, CFG_SWAPRB); FMT(ABGR1555, 1555, 0); FMT(RGB888, 888PACK, CFG_SWAPRB); FMT(BGR888, 888PACK, 0); FMT(XRGB8888, X888, CFG_SWAPRB); FMT(XBGR8888, X888, 0); FMT(ARGB8888, 8888, CFG_SWAPRB); FMT(ABGR8888, 8888, 0); FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV); FMT(UYVY, 422PACK, CFG_YUV2RGB); FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV); FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU); FMT(YUV422, 422, CFG_YUV2RGB); FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV); FMT(YUV420, 420, CFG_YUV2RGB); FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV); FMT(C8, PSEUDO8, 0); #undef FMT default: return ERR_PTR(-EINVAL); } dfb = kzalloc(sizeof(*dfb), GFP_KERNEL); if (!dfb) { DRM_ERROR("failed to allocate Armada fb object\n"); return ERR_PTR(-ENOMEM); } dfb->fmt = format; dfb->mod = config; dfb->obj = obj; drm_helper_mode_fill_fb_struct(&dfb->fb, mode); ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); if (ret) { kfree(dfb); return ERR_PTR(ret); } /* * Take a reference on our object as we're successful - the * caller already holds a reference, which keeps us safe for * the above call, but the caller will drop their reference * to it. Hence we need to take our own reference. */ drm_gem_object_reference(&obj->obj); return dfb; } static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode) { struct armada_gem_object *obj; struct armada_framebuffer *dfb; int ret; DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n", mode->width, mode->height, mode->pixel_format, mode->flags, mode->pitches[0], mode->pitches[1], mode->pitches[2]); /* We can only handle a single plane at the moment */ if (drm_format_num_planes(mode->pixel_format) > 1 && (mode->handles[0] != mode->handles[1] || mode->handles[0] != mode->handles[2])) { ret = -EINVAL; goto err; } obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]); if (!obj) { ret = -ENOENT; goto err; } if (obj->obj.import_attach && !obj->sgt) { ret = armada_gem_map_import(obj); if (ret) goto err_unref; } /* Framebuffer objects must have a valid device address for scanout */ if (obj->dev_addr == DMA_ERROR_CODE) { ret = -EINVAL; goto err_unref; } dfb = armada_framebuffer_create(dev, mode, obj); if (IS_ERR(dfb)) { ret = PTR_ERR(dfb); goto err; } drm_gem_object_unreference_unlocked(&obj->obj); return &dfb->fb; err_unref: drm_gem_object_unreference_unlocked(&obj->obj); err: DRM_ERROR("failed to initialize framebuffer: %d\n", ret); return ERR_PTR(ret); } static void armada_output_poll_changed(struct drm_device *dev) { struct armada_private *priv = dev->dev_private; struct drm_fb_helper *fbh = priv->fbdev; if (fbh) drm_fb_helper_hotplug_event(fbh); } const struct drm_mode_config_funcs armada_drm_mode_config_funcs = { .fb_create = armada_fb_create, .output_poll_changed = armada_output_poll_changed, };
gpl-2.0
hernstrom/linux
drivers/hid/hid-multitouch.c
91
41569
/* * HID driver for multitouch panels * * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010-2013 Benjamin Tissoires <benjamin.tissoires@gmail.com> * Copyright (c) 2010-2012 Ecole Nationale de l'Aviation Civile, France * Copyright (c) 2012-2013 Red Hat, Inc * * This code is partly based on hid-egalax.c: * * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> * Copyright (c) 2010 Canonical, Ltd. * * This code is partly based on hid-3m-pct.c: * * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> * Copyright (c) 2010 Canonical, Ltd. * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * This driver is regularly tested thanks to the tool hid-test[1]. * This tool relies on hid-replay[2] and a database of hid devices[3]. * Please run these regression tests before patching this module so that * your patch won't break existing known devices. * * [1] https://github.com/bentiss/hid-test * [2] https://github.com/bentiss/hid-replay * [3] https://github.com/bentiss/hid-devices */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/input/mt.h> #include <linux/string.h> MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_DESCRIPTION("HID multitouch panels"); MODULE_LICENSE("GPL"); #include "hid-ids.h" /* quirks to control the device */ #define MT_QUIRK_NOT_SEEN_MEANS_UP (1 << 0) #define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1) #define MT_QUIRK_CYPRESS (1 << 2) #define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3) #define MT_QUIRK_ALWAYS_VALID (1 << 4) #define MT_QUIRK_VALID_IS_INRANGE (1 << 5) #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6) #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8) #define MT_QUIRK_NO_AREA (1 << 9) #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) #define MT_QUIRK_HOVERING (1 << 11) #define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12) struct mt_slot { __s32 x, y, cx, cy, p, w, h; __s32 contactid; /* the device ContactID assigned to this slot */ bool touch_state; /* is the touch valid? */ bool inrange_state; /* is the finger in proximity of the sensor? */ }; struct mt_class { __s32 name; /* MT_CLS */ __s32 quirks; __s32 sn_move; /* Signal/noise ratio for move events */ __s32 sn_width; /* Signal/noise ratio for width events */ __s32 sn_height; /* Signal/noise ratio for height events */ __s32 sn_pressure; /* Signal/noise ratio for pressure events */ __u8 maxcontacts; bool is_indirect; /* true for touchpads */ }; struct mt_fields { unsigned usages[HID_MAX_FIELDS]; unsigned int length; }; struct mt_device { struct mt_slot curdata; /* placeholder of incoming data */ struct mt_class mtclass; /* our mt device class */ struct mt_fields *fields; /* temporary placeholder for storing the multitouch fields */ int cc_index; /* contact count field index in the report */ int cc_value_index; /* contact count value index in the field */ unsigned last_slot_field; /* the last field of a slot */ unsigned mt_report_id; /* the report ID of the multitouch device */ unsigned pen_report_id; /* the report ID of the pen device */ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */ __s16 inputmode_index; /* InputMode HID feature index in the report */ __s16 maxcontact_report_id; /* Maximum Contact Number HID feature, -1 if non-existent */ __u8 num_received; /* how many contacts we received */ __u8 num_expected; /* expected last contact index */ __u8 maxcontacts; __u8 touches_by_report; /* how many touches are present in one report: * 1 means we should use a serial protocol * > 1 means hybrid (multitouch) protocol */ bool serial_maybe; /* need to check for serial protocol */ bool curvalid; /* is the current contact valid? */ unsigned mt_flags; /* flags to pass to input-mt */ }; static void mt_post_parse_default_settings(struct mt_device *td); static void mt_post_parse(struct mt_device *td); /* classes of device behavior */ #define MT_CLS_DEFAULT 0x0001 #define MT_CLS_SERIAL 0x0002 #define MT_CLS_CONFIDENCE 0x0003 #define MT_CLS_CONFIDENCE_CONTACT_ID 0x0004 #define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005 #define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006 #define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007 #define MT_CLS_DUAL_NSMU_CONTACTID 0x0008 #define MT_CLS_INRANGE_CONTACTNUMBER 0x0009 #define MT_CLS_NSMU 0x000a #define MT_CLS_DUAL_CONTACT_NUMBER 0x0010 #define MT_CLS_DUAL_CONTACT_ID 0x0011 /* vendor specific classes */ #define MT_CLS_3M 0x0101 #define MT_CLS_CYPRESS 0x0102 #define MT_CLS_EGALAX 0x0103 #define MT_CLS_EGALAX_SERIAL 0x0104 #define MT_CLS_TOPSEED 0x0105 #define MT_CLS_PANASONIC 0x0106 #define MT_CLS_FLATFROG 0x0107 #define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108 #define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109 #define MT_DEFAULT_MAXCONTACT 10 #define MT_MAX_MAXCONTACT 250 #define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p) #define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p) /* * these device-dependent functions determine what slot corresponds * to a valid contact that was just read. */ static int cypress_compute_slot(struct mt_device *td) { if (td->curdata.contactid != 0 || td->num_received == 0) return td->curdata.contactid; else return -1; } static struct mt_class mt_classes[] = { { .name = MT_CLS_DEFAULT, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE }, { .name = MT_CLS_NSMU, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP }, { .name = MT_CLS_SERIAL, .quirks = MT_QUIRK_ALWAYS_VALID}, { .name = MT_CLS_CONFIDENCE, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE }, { .name = MT_CLS_CONFIDENCE_CONTACT_ID, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID }, { .name = MT_CLS_CONFIDENCE_MINUS_ONE, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE }, { .name = MT_CLS_DUAL_INRANGE_CONTACTID, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTNUMBER, .maxcontacts = 2 }, { .name = MT_CLS_DUAL_NSMU_CONTACTID, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_INRANGE_CONTACTNUMBER, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTNUMBER }, { .name = MT_CLS_DUAL_CONTACT_NUMBER, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_SLOT_IS_CONTACTNUMBER, .maxcontacts = 2 }, { .name = MT_CLS_DUAL_CONTACT_ID, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, /* * vendor specific classes */ { .name = MT_CLS_3M, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID, .sn_move = 2048, .sn_width = 128, .sn_height = 128, .maxcontacts = 60, }, { .name = MT_CLS_CYPRESS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_CYPRESS, .maxcontacts = 10 }, { .name = MT_CLS_EGALAX, .quirks = MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_VALID_IS_INRANGE, .sn_move = 4096, .sn_pressure = 32, }, { .name = MT_CLS_EGALAX_SERIAL, .quirks = MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_ALWAYS_VALID, .sn_move = 4096, .sn_pressure = 32, }, { .name = MT_CLS_TOPSEED, .quirks = MT_QUIRK_ALWAYS_VALID, .is_indirect = true, .maxcontacts = 2, }, { .name = MT_CLS_PANASONIC, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP, .maxcontacts = 4 }, { .name = MT_CLS_GENERALTOUCH_TWOFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_SLOT_IS_CONTACTID }, { .name = MT_CLS_FLATFROG, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_NO_AREA, .sn_move = 2048, .maxcontacts = 40, }, { } }; static void mt_free_input_name(struct hid_input *hi) { struct hid_device *hdev = hi->report->device; const char *name = hi->input->name; if (name != hdev->name) { hi->input->name = hdev->name; kfree(name); } } static ssize_t mt_show_quirks(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct mt_device *td = hid_get_drvdata(hdev); return sprintf(buf, "%u\n", td->mtclass.quirks); } static ssize_t mt_set_quirks(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct mt_device *td = hid_get_drvdata(hdev); unsigned long val; if (kstrtoul(buf, 0, &val)) return -EINVAL; td->mtclass.quirks = val; if (td->cc_index < 0) td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; return count; } static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks); static struct attribute *sysfs_attrs[] = { &dev_attr_quirks.attr, NULL }; static struct attribute_group mt_attribute_group = { .attrs = sysfs_attrs }; static void mt_feature_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct mt_device *td = hid_get_drvdata(hdev); switch (usage->hid) { case HID_DG_INPUTMODE: /* Ignore if value index is out of bounds. */ if (usage->usage_index >= field->report_count) { dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n"); break; } td->inputmode = field->report->id; td->inputmode_index = usage->usage_index; break; case HID_DG_CONTACTMAX: td->maxcontact_report_id = field->report->id; td->maxcontacts = field->value[0]; if (!td->maxcontacts && field->logical_maximum <= MT_MAX_MAXCONTACT) td->maxcontacts = field->logical_maximum; if (td->mtclass.maxcontacts) /* check if the maxcontacts is given by the class */ td->maxcontacts = td->mtclass.maxcontacts; break; case 0xff0000c5: if (field->report_count == 256 && field->report_size == 8) { /* Win 8 devices need special quirks */ __s32 *quirks = &td->mtclass.quirks; *quirks |= MT_QUIRK_ALWAYS_VALID; *quirks |= MT_QUIRK_IGNORE_DUPLICATES; *quirks |= MT_QUIRK_HOVERING; *quirks |= MT_QUIRK_CONTACT_CNT_ACCURATE; *quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP; *quirks &= ~MT_QUIRK_VALID_IS_INRANGE; *quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE; } break; } } static void set_abs(struct input_dev *input, unsigned int code, struct hid_field *field, int snratio) { int fmin = field->logical_minimum; int fmax = field->logical_maximum; int fuzz = snratio ? (fmax - fmin) / snratio : 0; input_set_abs_params(input, code, fmin, fmax, fuzz, 0); input_abs_set_res(input, code, hidinput_calc_abs_res(field, code)); } static void mt_store_field(struct hid_usage *usage, struct mt_device *td, struct hid_input *hi) { struct mt_fields *f = td->fields; if (f->length >= HID_MAX_FIELDS) return; f->usages[f->length++] = usage->hid; } static int mt_pen_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct mt_device *td = hid_get_drvdata(hdev); td->pen_report_id = field->report->id; return 0; } static int mt_pen_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { return 0; } static int mt_pen_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* let hid-input handle it */ return 0; } static void mt_pen_report(struct hid_device *hid, struct hid_report *report) { struct hid_field *field = report->field[0]; input_sync(field->hidinput->input); } static void mt_pen_input_configured(struct hid_device *hdev, struct hid_input *hi) { char *name = kzalloc(strlen(hi->input->name) + 5, GFP_KERNEL); if (name) { sprintf(name, "%s Pen", hi->input->name); mt_free_input_name(hi); hi->input->name = name; } /* force BTN_STYLUS to allow tablet matching in udev */ __set_bit(BTN_STYLUS, hi->input->keybit); } static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; int code; struct hid_usage *prev_usage = NULL; if (field->application == HID_DG_TOUCHSCREEN) td->mt_flags |= INPUT_MT_DIRECT; /* * Model touchscreens providing buttons as touchpads. */ if (field->application == HID_DG_TOUCHPAD || (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) td->mt_flags |= INPUT_MT_POINTER; if (usage->usage_index) prev_usage = &field->usage[usage->usage_index - 1]; switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_GENDESK: switch (usage->hid) { case HID_GD_X: if (prev_usage && (prev_usage->hid == usage->hid)) { hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOOL_X); set_abs(hi->input, ABS_MT_TOOL_X, field, cls->sn_move); } else { hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_POSITION_X); set_abs(hi->input, ABS_MT_POSITION_X, field, cls->sn_move); } mt_store_field(usage, td, hi); return 1; case HID_GD_Y: if (prev_usage && (prev_usage->hid == usage->hid)) { hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOOL_Y); set_abs(hi->input, ABS_MT_TOOL_Y, field, cls->sn_move); } else { hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_POSITION_Y); set_abs(hi->input, ABS_MT_POSITION_Y, field, cls->sn_move); } mt_store_field(usage, td, hi); return 1; } return 0; case HID_UP_DIGITIZER: switch (usage->hid) { case HID_DG_INRANGE: if (cls->quirks & MT_QUIRK_HOVERING) { hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_DISTANCE); input_set_abs_params(hi->input, ABS_MT_DISTANCE, 0, 1, 0, 0); } mt_store_field(usage, td, hi); return 1; case HID_DG_CONFIDENCE: mt_store_field(usage, td, hi); return 1; case HID_DG_TIPSWITCH: hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); input_set_capability(hi->input, EV_KEY, BTN_TOUCH); mt_store_field(usage, td, hi); return 1; case HID_DG_CONTACTID: mt_store_field(usage, td, hi); td->touches_by_report++; td->mt_report_id = field->report->id; return 1; case HID_DG_WIDTH: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOUCH_MAJOR); if (!(cls->quirks & MT_QUIRK_NO_AREA)) set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, cls->sn_width); mt_store_field(usage, td, hi); return 1; case HID_DG_HEIGHT: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOUCH_MINOR); if (!(cls->quirks & MT_QUIRK_NO_AREA)) { set_abs(hi->input, ABS_MT_TOUCH_MINOR, field, cls->sn_height); input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 0, 1, 0, 0); } mt_store_field(usage, td, hi); return 1; case HID_DG_TIPPRESSURE: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_PRESSURE); set_abs(hi->input, ABS_MT_PRESSURE, field, cls->sn_pressure); mt_store_field(usage, td, hi); return 1; case HID_DG_CONTACTCOUNT: /* Ignore if indexes are out of bounds. */ if (field->index >= field->report->maxfield || usage->usage_index >= field->report_count) return 1; td->cc_index = field->index; td->cc_value_index = usage->usage_index; return 1; case HID_DG_CONTACTMAX: /* we don't set td->last_slot_field as contactcount and * contact max are global to the report */ return -1; case HID_DG_TOUCH: /* Legacy devices use TIPSWITCH and not TOUCH. * Let's just ignore this field. */ return -1; } /* let hid-input decide for the others */ return 0; case HID_UP_BUTTON: code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); input_set_capability(hi->input, EV_KEY, code); return 1; case 0xff000000: /* we do not want to map these: no input-oriented meaning */ return -1; } return 0; } static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if (usage->type == EV_KEY || usage->type == EV_ABS) set_bit(usage->type, hi->input->evbit); return -1; } static int mt_compute_slot(struct mt_device *td, struct input_dev *input) { __s32 quirks = td->mtclass.quirks; if (quirks & MT_QUIRK_SLOT_IS_CONTACTID) return td->curdata.contactid; if (quirks & MT_QUIRK_CYPRESS) return cypress_compute_slot(td); if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER) return td->num_received; if (quirks & MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE) return td->curdata.contactid - 1; return input_mt_get_slot_by_key(input, td->curdata.contactid); } /* * this function is called when a whole contact has been processed, * so that it can assign it to a slot and store the data there */ static void mt_complete_slot(struct mt_device *td, struct input_dev *input) { if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) && td->num_received >= td->num_expected) return; if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) { int slotnum = mt_compute_slot(td, input); struct mt_slot *s = &td->curdata; struct input_mt *mt = input->mt; if (slotnum < 0 || slotnum >= td->maxcontacts) return; if ((td->mtclass.quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) { struct input_mt_slot *slot = &mt->slots[slotnum]; if (input_mt_is_active(slot) && input_mt_is_used(mt, slot)) return; } input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, s->touch_state || s->inrange_state); if (s->touch_state || s->inrange_state) { /* this finger is in proximity of the sensor */ int wide = (s->w > s->h); /* divided by two to match visual scale of touch */ int major = max(s->w, s->h) >> 1; int minor = min(s->w, s->h) >> 1; input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x); input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y); input_event(input, EV_ABS, ABS_MT_TOOL_X, s->cx); input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy); input_event(input, EV_ABS, ABS_MT_DISTANCE, !s->touch_state); input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide); input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p); input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); } } td->num_received++; } /* * this function is called when a whole packet has been received and processed, * so that it can decide what to send to the input layer. */ static void mt_sync_frame(struct mt_device *td, struct input_dev *input) { input_mt_sync_frame(input); input_sync(input); td->num_received = 0; } static int mt_touch_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* we will handle the hidinput part later, now remains hiddev */ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) hid->hiddev_hid_event(hid, field, usage, value); return 1; } static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct mt_device *td = hid_get_drvdata(hid); __s32 quirks = td->mtclass.quirks; struct input_dev *input = field->hidinput->input; if (hid->claimed & HID_CLAIMED_INPUT) { switch (usage->hid) { case HID_DG_INRANGE: if (quirks & MT_QUIRK_VALID_IS_INRANGE) td->curvalid = value; if (quirks & MT_QUIRK_HOVERING) td->curdata.inrange_state = value; break; case HID_DG_TIPSWITCH: if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) td->curvalid = value; td->curdata.touch_state = value; break; case HID_DG_CONFIDENCE: if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) td->curvalid = value; break; case HID_DG_CONTACTID: td->curdata.contactid = value; break; case HID_DG_TIPPRESSURE: td->curdata.p = value; break; case HID_GD_X: if (usage->code == ABS_MT_TOOL_X) td->curdata.cx = value; else td->curdata.x = value; break; case HID_GD_Y: if (usage->code == ABS_MT_TOOL_Y) td->curdata.cy = value; else td->curdata.y = value; break; case HID_DG_WIDTH: td->curdata.w = value; break; case HID_DG_HEIGHT: td->curdata.h = value; break; case HID_DG_CONTACTCOUNT: break; case HID_DG_TOUCH: /* do nothing */ break; default: if (usage->type) input_event(input, usage->type, usage->code, value); return; } if (usage->usage_index + 1 == field->report_count) { /* we only take into account the last report. */ if (usage->hid == td->last_slot_field) mt_complete_slot(td, field->hidinput->input); } } } static void mt_touch_report(struct hid_device *hid, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hid); struct hid_field *field; unsigned count; int r, n; /* * Includes multi-packet support where subsequent * packets are sent with zero contactcount. */ if (td->cc_index >= 0) { struct hid_field *field = report->field[td->cc_index]; int value = field->value[td->cc_value_index]; if (value) td->num_expected = value; } for (r = 0; r < report->maxfield; r++) { field = report->field[r]; count = field->report_count; if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) continue; for (n = 0; n < count; n++) mt_process_mt_event(hid, field, &field->usage[n], field->value[n]); } if (td->num_received >= td->num_expected) mt_sync_frame(td, report->field[0]->hidinput->input); } static void mt_touch_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; struct input_dev *input = hi->input; if (!td->maxcontacts) td->maxcontacts = MT_DEFAULT_MAXCONTACT; mt_post_parse(td); if (td->serial_maybe) mt_post_parse_default_settings(td); if (cls->is_indirect) td->mt_flags |= INPUT_MT_POINTER; if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) td->mt_flags |= INPUT_MT_DROP_UNUSED; input_mt_init_slots(input, td->maxcontacts, td->mt_flags); td->mt_flags = 0; } static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* Only map fields from TouchScreen or TouchPad collections. * We need to ignore fields that belong to other collections * such as Mouse that might have the same GenericDesktop usages. */ if (field->application != HID_DG_TOUCHSCREEN && field->application != HID_DG_PEN && field->application != HID_DG_TOUCHPAD) return -1; if (field->physical == HID_DG_STYLUS) return mt_pen_input_mapping(hdev, hi, field, usage, bit, max); return mt_touch_input_mapping(hdev, hi, field, usage, bit, max); } static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if (field->physical == HID_DG_STYLUS) return mt_pen_input_mapped(hdev, hi, field, usage, bit, max); return mt_touch_input_mapped(hdev, hi, field, usage, bit, max); } static int mt_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct mt_device *td = hid_get_drvdata(hid); if (field->report->id == td->mt_report_id) return mt_touch_event(hid, field, usage, value); if (field->report->id == td->pen_report_id) return mt_pen_event(hid, field, usage, value); /* ignore other reports */ return 1; } static void mt_report(struct hid_device *hid, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hid); if (!(hid->claimed & HID_CLAIMED_INPUT)) return; if (report->id == td->mt_report_id) mt_touch_report(hid, report); if (report->id == td->pen_report_id) mt_pen_report(hid, report); } static void mt_set_input_mode(struct hid_device *hdev) { struct mt_device *td = hid_get_drvdata(hdev); struct hid_report *r; struct hid_report_enum *re; if (td->inputmode < 0) return; re = &(hdev->report_enum[HID_FEATURE_REPORT]); r = re->report_id_hash[td->inputmode]; if (r) { r->field[0]->value[td->inputmode_index] = 0x02; hid_hw_request(hdev, r, HID_REQ_SET_REPORT); } } static void mt_set_maxcontacts(struct hid_device *hdev) { struct mt_device *td = hid_get_drvdata(hdev); struct hid_report *r; struct hid_report_enum *re; int fieldmax, max; if (td->maxcontact_report_id < 0) return; if (!td->mtclass.maxcontacts) return; re = &hdev->report_enum[HID_FEATURE_REPORT]; r = re->report_id_hash[td->maxcontact_report_id]; if (r) { max = td->mtclass.maxcontacts; fieldmax = r->field[0]->logical_maximum; max = min(fieldmax, max); if (r->field[0]->value[0] != max) { r->field[0]->value[0] = max; hid_hw_request(hdev, r, HID_REQ_SET_REPORT); } } } static void mt_post_parse_default_settings(struct mt_device *td) { __s32 quirks = td->mtclass.quirks; /* unknown serial device needs special quirks */ if (td->touches_by_report == 1) { quirks |= MT_QUIRK_ALWAYS_VALID; quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP; quirks &= ~MT_QUIRK_VALID_IS_INRANGE; quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE; quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } td->mtclass.quirks = quirks; } static void mt_post_parse(struct mt_device *td) { struct mt_fields *f = td->fields; struct mt_class *cls = &td->mtclass; if (td->touches_by_report > 0) { int field_count_per_touch = f->length / td->touches_by_report; td->last_slot_field = f->usages[field_count_per_touch - 1]; } if (td->cc_index < 0) cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct mt_device *td = hid_get_drvdata(hdev); char *name = kstrdup(hdev->name, GFP_KERNEL); if (name) hi->input->name = name; if (hi->report->id == td->mt_report_id) mt_touch_input_configured(hdev, hi); if (hi->report->id == td->pen_report_id) mt_pen_input_configured(hdev, hi); } static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret, i; struct mt_device *td; struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */ struct hid_input *hi; for (i = 0; mt_classes[i].name ; i++) { if (id->driver_data == mt_classes[i].name) { mtclass = &(mt_classes[i]); break; } } /* This allows the driver to correctly support devices * that emit events over several HID messages. */ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC; /* * This allows the driver to handle different input sensors * that emits events through different reports on the same HID * device. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT; td = kzalloc(sizeof(struct mt_device), GFP_KERNEL); if (!td) { dev_err(&hdev->dev, "cannot allocate multitouch data\n"); return -ENOMEM; } td->mtclass = *mtclass; td->inputmode = -1; td->maxcontact_report_id = -1; td->cc_index = -1; td->mt_report_id = -1; td->pen_report_id = -1; hid_set_drvdata(hdev, td); td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL); if (!td->fields) { dev_err(&hdev->dev, "cannot allocate multitouch fields data\n"); ret = -ENOMEM; goto fail; } if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID) td->serial_maybe = true; ret = hid_parse(hdev); if (ret != 0) goto fail; ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) goto hid_fail; ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group); mt_set_maxcontacts(hdev); mt_set_input_mode(hdev); kfree(td->fields); td->fields = NULL; return 0; hid_fail: list_for_each_entry(hi, &hdev->inputs, list) mt_free_input_name(hi); fail: kfree(td->fields); kfree(td); return ret; } #ifdef CONFIG_PM static int mt_reset_resume(struct hid_device *hdev) { mt_set_maxcontacts(hdev); mt_set_input_mode(hdev); return 0; } static int mt_resume(struct hid_device *hdev) { /* Some Elan legacy devices require SET_IDLE to be set on resume. * It should be safe to send it to other devices too. * Tested on 3M, Stantum, Cypress, Zytronic, eGalax, and Elan panels. */ hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE); return 0; } #endif static void mt_remove(struct hid_device *hdev) { struct mt_device *td = hid_get_drvdata(hdev); struct hid_input *hi; sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); list_for_each_entry(hi, &hdev->inputs, list) mt_free_input_name(hi); hid_hw_stop(hdev); kfree(td); hid_set_drvdata(hdev, NULL); } static const struct hid_device_id mt_devices[] = { /* 3M panels */ { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) }, { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M3266) }, /* ActionStar panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR, USB_DEVICE_ID_ACTIONSTAR_1011) }, /* Atmel panels */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ATMEL, USB_DEVICE_ID_ATMEL_MULTITOUCH) }, { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ATMEL, USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) }, /* Baanto multitouch devices */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_BAANTO, USB_DEVICE_ID_BAANTO_MT_190W2) }, /* Cando panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, { .driver_data = MT_CLS_DUAL_CONTACT_NUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) }, { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, /* Chunghwa Telecom touch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, /* CVTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, /* Cypress panel */ { .driver_data = MT_CLS_CYPRESS, HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, /* eGalax devices (resistive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) }, /* eGalax devices (capacitive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0) }, { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) }, /* Elo TouchSystems IntelliTouch Plus panel */ { .driver_data = MT_CLS_DUAL_CONTACT_ID, MT_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) }, /* Flatfrog Panels */ { .driver_data = MT_CLS_FLATFROG, MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, USB_DEVICE_ID_MULTITOUCH_3200) }, /* GeneralTouch panel */ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) }, { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) }, /* Gametel game controller */ { .driver_data = MT_CLS_NSMU, MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL, USB_DEVICE_ID_GAMETEL_MT_MODE) }, /* GoodTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH, USB_DEVICE_ID_GOODTOUCH_000f) }, /* Hanvon panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, /* Ideacom panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) }, { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6651) }, /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) }, /* IRTOUCH panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) }, /* LG Display panels */ { .driver_data = MT_CLS_DEFAULT, MT_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) }, /* Lumio panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) }, /* Nexio panels */ { .driver_data = MT_CLS_DEFAULT, MT_USB_DEVICE(USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_420)}, /* Panasonic panels */ { .driver_data = MT_CLS_PANASONIC, MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC, USB_DEVICE_ID_PANABOARD_UBT780) }, { .driver_data = MT_CLS_PANASONIC, MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC, USB_DEVICE_ID_PANABOARD_UBT880) }, /* Novatek Panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_PCT) }, /* PenMount panels */ { .driver_data = MT_CLS_CONFIDENCE, MT_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) }, /* PixArt optical touch screen */ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) }, { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) }, { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) }, /* PixCir-based panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) }, { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) }, /* Quanta-based panels */ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID, MT_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID, MT_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) }, { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID, MT_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) }, /* SiS panels */ { .driver_data = MT_CLS_DEFAULT, HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) }, { .driver_data = MT_CLS_DEFAULT, HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) }, /* Stantum panels */ { .driver_data = MT_CLS_CONFIDENCE, MT_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP)}, { .driver_data = MT_CLS_CONFIDENCE, MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM)}, { .driver_data = MT_CLS_DEFAULT, MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX)}, /* TopSeed panels */ { .driver_data = MT_CLS_TOPSEED, MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_PERIPAD_701) }, /* Touch International panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL, USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) }, /* Unitec panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, /* Wistron panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_WISTRON, USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) }, /* XAT */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) }, /* Xiroku */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) }, /* Zytronic panels */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC, USB_DEVICE_ID_ZYTRONIC_ZXY100) }, /* Generic MT device */ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, mt_devices); static const struct hid_usage_id mt_grabbed_usages[] = { { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver mt_driver = { .name = "hid-multitouch", .id_table = mt_devices, .probe = mt_probe, .remove = mt_remove, .input_mapping = mt_input_mapping, .input_mapped = mt_input_mapped, .input_configured = mt_input_configured, .feature_mapping = mt_feature_mapping, .usage_table = mt_grabbed_usages, .event = mt_event, .report = mt_report, #ifdef CONFIG_PM .reset_resume = mt_reset_resume, .resume = mt_resume, #endif }; module_hid_driver(mt_driver);
gpl-2.0
juanfont/linux-sunxi
drivers/net/wireless/rtxx7x/common/rt_os_util.c
91
5809
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2010, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * *************************************************************************/ #define RTMP_MODULE_OS #define RTMP_MODULE_OS_UTIL /*#include "rt_config.h" */ #include "rtmp_comm.h" #include "rt_os_util.h" #include "rtmp_osabl.h" UINT32 RalinkRate[256] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 109, 110, 111, 112, 13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234, 260, 39, 78, 117, 156, 234, 312, 351, 390, 27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540, 81, 162, 243, 324, 486, 648, 729, 810, 14, 29, 43, 57, 87, 115, 130, 144, 29, 59, 87, 115, 173, 230, 260, 288, 43, 87, 130, 173, 260, 317, 390, 433, 30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600, 90, 180, 270, 360, 540, 720, 810, 900, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19, 20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39, 40,41,42,43,44,45,46,47}; /* 3*3 */ VOID RtmpDrvMaxRateGet( IN VOID *pReserved, /* IN PHTTRANSMIT_SETTING pHtPhyMode, */ IN UINT8 MODE, IN UINT8 ShortGI, IN UINT8 BW, IN UINT8 MCS, OUT UINT32 *pRate) { int rate_index = 0; #ifdef DOT11_N_SUPPORT if (MODE >= MODE_HTMIX) { /* rate_index = 16 + ((UCHAR)pHtPhyMode->field.BW *16) + ((UCHAR)pHtPhyMode->field.ShortGI *32) + ((UCHAR)pHtPhyMode->field.MCS); */ rate_index = 16 + ((UCHAR)BW *24) + ((UCHAR)ShortGI *48) + ((UCHAR)MCS); } else #endif /* DOT11_N_SUPPORT */ if (MODE == MODE_OFDM) rate_index = (UCHAR)(MCS) + 4; else rate_index = (UCHAR)(MCS); if (rate_index < 0) rate_index = 0; if (rate_index > 255) rate_index = 255; *pRate = RalinkRate[rate_index] * 500000; } char * rtstrchr(const char * s, int c) { for(; *s != (char) c; ++s) if (*s == '\0') return NULL; return (char *) s; } VOID RtmpMeshDown( IN VOID *pDrvCtrlBK, IN BOOLEAN WaitFlag, IN BOOLEAN (*RtmpMeshLinkCheck)(IN VOID *pAd)) { } BOOLEAN RtmpOsCmdDisplayLenCheck( IN UINT32 LenSrc, IN UINT32 Offset) { if (LenSrc > (IW_PRIV_SIZE_MASK - Offset)) return FALSE; return TRUE; } #ifdef CONFIG_STA_SUPPORT #ifdef WPA_SUPPLICANT_SUPPORT VOID WpaSendMicFailureToWpaSupplicant( IN PNET_DEV pNetDev, IN BOOLEAN bUnicast) { char custom[IW_CUSTOM_MAX] = {0}; snprintf(custom, sizeof(custom), "MLME-MICHAELMICFAILURE.indication"); if(bUnicast) sprintf(custom, "%s unicast", custom); RtmpOSWrielessEventSend(pNetDev, RT_WLAN_EVENT_CUSTOM, -1, NULL, (PUCHAR)custom, strlen(custom)); return; } #endif /* WPA_SUPPLICANT_SUPPORT */ #endif /* CONFIG_STA_SUPPORT */ #ifdef NATIVE_WPA_SUPPLICANT_SUPPORT int wext_notify_event_assoc( IN PNET_DEV pNetDev, IN UCHAR *ReqVarIEs, IN UINT32 ReqVarIELen) { char custom[IW_CUSTOM_MAX] = {0}; #if WIRELESS_EXT > 17 if (ReqVarIELen <= IW_CUSTOM_MAX) { NdisMoveMemory(custom, ReqVarIEs, ReqVarIELen); RtmpOSWrielessEventSend(pNetDev, RT_WLAN_EVENT_ASSOC_REQ_IE, -1, NULL, (UCHAR *)custom, ReqVarIELen); } else DBGPRINT(RT_DEBUG_TRACE, ("pAd->StaCfg.ReqVarIELen > MAX_CUSTOM_LEN\n")); #else int len; len = (ReqVarIELen*2) + 17; if (len <= IW_CUSTOM_MAX) { UCHAR idx; snprintf(custom, sizeof(custom), "ASSOCINFO(ReqIEs="); for (idx=0; idx<ReqVarIELen; idx++) sprintf(custom, "%s%02x", custom, ReqVarIEs[idx]); RtmpOSWrielessEventSend(pNetDev, RT_WLAN_EVENT_CUSTOM, -1, NULL, custom, len); } else DBGPRINT(RT_DEBUG_TRACE, ("len(%d) > MAX_CUSTOM_LEN\n", len)); #endif return 0; } #endif /* NATIVE_WPA_SUPPLICANT_SUPPORT */ #ifdef WPA_SUPPLICANT_SUPPORT #ifndef NATIVE_WPA_SUPPLICANT_SUPPORT VOID SendAssocIEsToWpaSupplicant( IN PNET_DEV pNetDev, IN UCHAR *ReqVarIEs, IN UINT32 ReqVarIELen) { STRING custom[IW_CUSTOM_MAX] = {0}; if ((ReqVarIELen + 17) <= IW_CUSTOM_MAX) { snprintf(custom, sizeof(custom), "ASSOCINFO_ReqIEs="); NdisMoveMemory(custom+17, ReqVarIEs, ReqVarIELen); RtmpOSWrielessEventSend(pNetDev, RT_WLAN_EVENT_CUSTOM, RT_REQIE_EVENT_FLAG, NULL, (PUCHAR)custom, ReqVarIELen + 17); RtmpOSWrielessEventSend(pNetDev, RT_WLAN_EVENT_CUSTOM, RT_ASSOCINFO_EVENT_FLAG, NULL, NULL, 0); } else DBGPRINT(RT_DEBUG_TRACE, ("pAd->StaCfg.ReqVarIELen + 17 > MAX_CUSTOM_LEN\n")); return; } #endif /* NATIVE_WPA_SUPPLICANT_SUPPORT */ #endif /* WPA_SUPPLICANT_SUPPORT */ /* End of rtmp_os_util.c */
gpl-2.0
nc543/linux-release
drivers/staging/tm6000/tm6000-alsa.c
91
10713
/* * * Support for audio capture for tm5600/6000/6010 * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com> * * Based on cx88-alsa.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include "tm6000.h" #include "tm6000-regs.h" #undef dprintk #define dprintk(level, fmt, arg...) do { \ if (debug >= level) \ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg); \ } while (0) /**************************************************************************** Module global static vars ****************************************************************************/ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled."); module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s)."); /**************************************************************************** Module macros ****************************************************************************/ MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Trident,tm5600}," "{{Trident,tm6000}," "{{Trident,tm6010}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); /**************************************************************************** Module specific funtions ****************************************************************************/ /* * BOARD Specific: Sets audio DMA */ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip) { struct tm6000_core *core = chip->core; int val; dprintk(1, "Starting audio DMA\n"); /* Enables audio */ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0); val |= 0x20; tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val); tm6000_set_audio_bitrate(core, 48000); tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0x80); return 0; } /* * BOARD Specific: Resets audio DMA */ static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip) { struct tm6000_core *core = chip->core; int val; dprintk(1, "Stopping audio DMA\n"); /* Enables audio */ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0); val &= ~0x20; tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val); tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0); return 0; } static void dsp_buffer_free(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); dprintk(2, "Freeing buffer\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; substream->runtime->dma_bytes = 0; } static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); dprintk(2, "Allocating buffer\n"); if (substream->runtime->dma_area) { if (substream->runtime->dma_bytes > size) return 0; dsp_buffer_free(substream); } substream->runtime->dma_area = vmalloc(size); if (!substream->runtime->dma_area) return -ENOMEM; substream->runtime->dma_bytes = size; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static struct snd_pcm_hardware snd_tm6000_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .period_bytes_min = 62720, .period_bytes_max = 62720, .periods_min = 1, .periods_max = 1024, .buffer_bytes_max = 62720 * 8, }; /* * audio pcm capture open callback */ static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_tm6000_digital_hw; return 0; _error: dprintk(1, "Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_tm6000_close(struct snd_pcm_substream *substream) { return 0; } static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size) { struct snd_tm6000_card *chip = core->adev; struct snd_pcm_substream *substream = chip->substream; struct snd_pcm_runtime *runtime; int period_elapsed = 0; unsigned int stride, buf_pos; if (!size || !substream) return -EINVAL; runtime = substream->runtime; if (!runtime || !runtime->dma_area) return -EINVAL; buf_pos = chip->buf_pos; stride = runtime->frame_bits >> 3; dprintk(1, "Copying %d bytes at %p[%d] - buf size=%d x %d\n", size, runtime->dma_area, buf_pos, (unsigned int)runtime->buffer_size, stride); if (buf_pos + size >= runtime->buffer_size * stride) { unsigned int cnt = runtime->buffer_size * stride - buf_pos; memcpy(runtime->dma_area + buf_pos, buf, cnt); memcpy(runtime->dma_area, buf + cnt, size - cnt); } else memcpy(runtime->dma_area + buf_pos, buf, size); chip->buf_pos += size; if (chip->buf_pos >= runtime->buffer_size * stride) chip->buf_pos -= runtime->buffer_size * stride; chip->period_pos += size; if (chip->period_pos >= runtime->period_size) { chip->period_pos -= runtime->period_size; period_elapsed = 1; } if (period_elapsed) snd_pcm_period_elapsed(substream); return 0; } /* * hw_params callback */ static int snd_tm6000_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int size, rc; size = params_period_bytes(hw_params) * params_periods(hw_params); rc = dsp_buffer_alloc(substream, size); if (rc < 0) return rc; return 0; } /* * hw free callback */ static int snd_tm6000_hw_free(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); _tm6000_stop_audio_dma(chip); return 0; } /* * prepare callback */ static int snd_tm6000_prepare(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); chip->buf_pos = 0; chip->period_pos = 0; return 0; } /* * trigger callback */ static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); int err; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = _tm6000_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err = _tm6000_stop_audio_dma(chip); break; default: err = -EINVAL; break; } spin_unlock(&chip->reg_lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); return chip->buf_pos; } /* * operators */ static struct snd_pcm_ops snd_tm6000_pcm_ops = { .open = snd_tm6000_pcm_open, .close = snd_tm6000_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_tm6000_hw_params, .hw_free = snd_tm6000_hw_free, .prepare = snd_tm6000_prepare, .trigger = snd_tm6000_card_trigger, .pointer = snd_tm6000_pointer, }; /* * create a PCM device */ /* FIXME: Control interface - How to control volume/mute? */ /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * Alsa Constructor - Component probe */ int tm6000_audio_init(struct tm6000_core *dev) { struct snd_card *card; struct snd_tm6000_card *chip; int rc; static int devnr; char component[14]; struct snd_pcm *pcm; if (!dev) return 0; if (devnr >= SNDRV_CARDS) return -ENODEV; if (!enable[devnr]) return -ENOENT; rc = snd_card_create(index[devnr], "tm6000", THIS_MODULE, 0, &card); if (rc < 0) { snd_printk(KERN_ERR "cannot create card instance %d\n", devnr); return rc; } strcpy(card->driver, "tm6000-alsa"); strcpy(card->shortname, "TM5600/60x0"); sprintf(card->longname, "TM5600/60x0 Audio at bus %d device %d", dev->udev->bus->busnum, dev->udev->devnum); sprintf(component, "USB%04x:%04x", le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct)); snd_component_add(card, component); snd_card_set_dev(card, &dev->udev->dev); chip = kzalloc(sizeof(struct snd_tm6000_card), GFP_KERNEL); if (!chip) { rc = -ENOMEM; goto error; } chip->core = dev; chip->card = card; dev->adev = chip; spin_lock_init(&chip->reg_lock); rc = snd_pcm_new(card, "TM6000 Audio", 0, 0, 1, &pcm); if (rc < 0) goto error; pcm->info_flags = 0; pcm->private_data = chip; strcpy(pcm->name, "Trident TM5600/60x0"); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops); rc = snd_card_register(card); if (rc < 0) goto error; dprintk(1,"Registered audio driver for %s\n", card->longname); return 0; error: snd_card_free(card); return rc; } static int tm6000_audio_fini(struct tm6000_core *dev) { struct snd_tm6000_card *chip = dev->adev; if (!dev) return 0; if (!chip) return 0; if (!chip->card) return 0; snd_card_free(chip->card); chip->card = NULL; kfree(chip); dev->adev = NULL; return 0; } struct tm6000_ops audio_ops = { .type = TM6000_AUDIO, .name = "TM6000 Audio Extension", .init = tm6000_audio_init, .fini = tm6000_audio_fini, .fillbuf = tm6000_fillbuf, }; static int __init tm6000_alsa_register(void) { return tm6000_register_extension(&audio_ops); } static void __exit tm6000_alsa_unregister(void) { tm6000_unregister_extension(&audio_ops); } module_init(tm6000_alsa_register); module_exit(tm6000_alsa_unregister);
gpl-2.0
Shimejing/linux
net/batman-adv/soft-interface.c
347
31692
/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "soft-interface.h" #include "main.h" #include <linux/atomic.h> #include <linux/byteorder/generic.h> #include <linux/cache.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/fs.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/percpu.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include "bridge_loop_avoidance.h" #include "debugfs.h" #include "distributed-arp-table.h" #include "gateway_client.h" #include "gateway_common.h" #include "hard-interface.h" #include "multicast.h" #include "network-coding.h" #include "packet.h" #include "send.h" #include "sysfs.h" #include "translation-table.h" static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); static void batadv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); static u32 batadv_get_msglevel(struct net_device *dev); static void batadv_set_msglevel(struct net_device *dev, u32 value); static u32 batadv_get_link(struct net_device *dev); static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data); static void batadv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data); static int batadv_get_sset_count(struct net_device *dev, int stringset); static const struct ethtool_ops batadv_ethtool_ops = { .get_settings = batadv_get_settings, .get_drvinfo = batadv_get_drvinfo, .get_msglevel = batadv_get_msglevel, .set_msglevel = batadv_set_msglevel, .get_link = batadv_get_link, .get_strings = batadv_get_strings, .get_ethtool_stats = batadv_get_ethtool_stats, .get_sset_count = batadv_get_sset_count, }; int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) { int result; /* TODO: We must check if we can release all references to non-payload * data using skb_header_release in our skbs to allow skb_cow_header to * work optimally. This means that those skbs are not allowed to read * or write any data which is before the current position of skb->data * after that call and thus allow other skbs with the same data buffer * to write freely in that area. */ result = skb_cow_head(skb, len); if (result < 0) return result; skb_push(skb, len); return 0; } static int batadv_interface_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int batadv_interface_release(struct net_device *dev) { netif_stop_queue(dev); return 0; } static struct net_device_stats *batadv_interface_stats(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); struct net_device_stats *stats = &bat_priv->stats; stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); return stats; } static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) { struct batadv_priv *bat_priv = netdev_priv(dev); struct batadv_softif_vlan *vlan; struct sockaddr *addr = p; u8 old_addr[ETH_ALEN]; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(dev->dev_addr, addr->sa_data); /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) return 0; rcu_read_lock(); hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { batadv_tt_local_remove(bat_priv, old_addr, vlan->vid, "mac address changed", false); batadv_tt_local_add(dev, addr->sa_data, vlan->vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); } rcu_read_unlock(); return 0; } static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { /* check ranges */ if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev))) return -EINVAL; dev->mtu = new_mtu; return 0; } /** * batadv_interface_set_rx_mode - set the rx mode of a device * @dev: registered network device to modify * * We do not actually need to set any rx filters for the virtual batman * soft interface. However a dummy handler enables a user to set static * multicast listeners for instance. */ static void batadv_interface_set_rx_mode(struct net_device *dev) { } static int batadv_interface_tx(struct sk_buff *skb, struct net_device *soft_iface) { struct ethhdr *ethhdr; struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_hard_iface *primary_if = NULL; struct batadv_bcast_packet *bcast_packet; __be16 ethertype = htons(ETH_P_BATMAN); static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}; static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, 0x00, 0x00}; enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO; u8 *dst_hint = NULL, chaddr[ETH_ALEN]; struct vlan_ethhdr *vhdr; unsigned int header_len = 0; int data_len = skb->len, ret; unsigned long brd_delay = 1; bool do_bcast = false, client_added; unsigned short vid; u32 seqno; int gw_mode; enum batadv_forw_mode forw_mode; struct batadv_orig_node *mcast_single_orig = NULL; int network_offset = ETH_HLEN; if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; soft_iface->trans_start = jiffies; vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: vhdr = vlan_eth_hdr(skb); if (vhdr->h_vlan_encapsulated_proto != ethertype) { network_offset += VLAN_HLEN; break; } /* fall through */ case ETH_P_BATMAN: goto dropped; } skb_set_network_header(skb, network_offset); if (batadv_bla_tx(bat_priv, skb, vid)) goto dropped; /* skb->data might have been reallocated by batadv_bla_tx() */ ethhdr = eth_hdr(skb); /* Register the client MAC in the transtable */ if (!is_multicast_ether_addr(ethhdr->h_source)) { client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source, vid, skb->skb_iif, skb->mark); if (!client_added) goto dropped; } /* don't accept stp packets. STP does not help in meshes. * better use the bridge loop avoidance ... * * The same goes for ECTP sent at least by some Cisco Switches, * it might confuse the mesh when used with bridge loop avoidance. */ if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) goto dropped; if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) goto dropped; gw_mode = atomic_read(&bat_priv->gw_mode); if (is_multicast_ether_addr(ethhdr->h_dest)) { /* if gw mode is off, broadcast every packet */ if (gw_mode == BATADV_GW_MODE_OFF) { do_bcast = true; goto send; } dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len, chaddr); /* skb->data may have been modified by * batadv_gw_dhcp_recipient_get() */ ethhdr = eth_hdr(skb); /* if gw_mode is on, broadcast any non-DHCP message. * All the DHCP packets are going to be sent as unicast */ if (dhcp_rcp == BATADV_DHCP_NO) { do_bcast = true; goto send; } if (dhcp_rcp == BATADV_DHCP_TO_CLIENT) dst_hint = chaddr; else if ((gw_mode == BATADV_GW_MODE_SERVER) && (dhcp_rcp == BATADV_DHCP_TO_SERVER)) /* gateways should not forward any DHCP message if * directed to a DHCP server */ goto dropped; send: if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) { forw_mode = batadv_mcast_forw_mode(bat_priv, skb, &mcast_single_orig); if (forw_mode == BATADV_FORW_NONE) goto dropped; if (forw_mode == BATADV_FORW_SINGLE) do_bcast = false; } } batadv_skb_set_priority(skb, 0); /* ethernet packet should be broadcasted */ if (do_bcast) { primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto dropped; /* in case of ARP request, we do not immediately broadcasti the * packet, instead we first wait for DAT to try to retrieve the * correct ARP entry */ if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) goto dropped; bcast_packet = (struct batadv_bcast_packet *)skb->data; bcast_packet->version = BATADV_COMPAT_VERSION; bcast_packet->ttl = BATADV_TTL; /* batman packet type: broadcast */ bcast_packet->packet_type = BATADV_BCAST; bcast_packet->reserved = 0; /* hw address of first interface is the orig mac because only * this mac is known throughout the mesh */ ether_addr_copy(bcast_packet->orig, primary_if->net_dev->dev_addr); /* set broadcast sequence number */ seqno = atomic_inc_return(&bat_priv->bcast_seqno); bcast_packet->seqno = htonl(seqno); batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); /* a copy is stored in the bcast list, therefore removing * the original skb. */ kfree_skb(skb); /* unicast packet */ } else { /* DHCP packets going to a server will use the GW feature */ if (dhcp_rcp == BATADV_DHCP_TO_SERVER) { ret = batadv_gw_out_of_range(bat_priv, skb); if (ret) goto dropped; ret = batadv_send_skb_via_gw(bat_priv, skb, vid); } else if (mcast_single_orig) { ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, mcast_single_orig, vid); } else { if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) goto dropped; batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint, vid); } if (ret == NET_XMIT_DROP) goto dropped_freed; } batadv_inc_counter(bat_priv, BATADV_CNT_TX); batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); goto end; dropped: kfree_skb(skb); dropped_freed: batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); end: if (primary_if) batadv_hardif_free_ref(primary_if); return NETDEV_TX_OK; } void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_bcast_packet *batadv_bcast_packet; struct batadv_priv *bat_priv = netdev_priv(soft_iface); __be16 ethertype = htons(ETH_P_BATMAN); struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; bool is_bcast; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) goto dropped; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); /* clean the netfilter state now that the batman-adv header has been * removed */ nf_reset(skb); vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: vhdr = (struct vlan_ethhdr *)skb->data; if (vhdr->h_vlan_encapsulated_proto != ethertype) break; /* fall through */ case ETH_P_BATMAN: goto dropped; } /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; skb->protocol = eth_type_trans(skb, soft_iface); /* should not be necessary anymore as we use skb_pull_rcsum() * TODO: please verify this and remove this TODO * -- Dec 21st 2009, Simon Wunderlich */ /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); soft_iface->last_rx = jiffies; /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) goto out; if (orig_node) batadv_tt_add_temporary_global_entry(bat_priv, orig_node, ethhdr->h_source, vid); if (is_multicast_ether_addr(ethhdr->h_dest)) { /* set the mark on broadcast packets if AP isolation is ON and * the packet is coming from an "isolated" client */ if (batadv_vlan_ap_isola_get(bat_priv, vid) && batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source, vid)) { /* save bits in skb->mark not covered by the mask and * apply the mark on the rest */ skb->mark &= ~bat_priv->isolation_mark_mask; skb->mark |= bat_priv->isolation_mark; } } else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid)) { goto dropped; } netif_rx(skb); goto out; dropped: kfree_skb(skb); out: return; } /** * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and * possibly free it * @softif_vlan: the vlan object to release */ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) { if (!vlan) return; if (atomic_dec_and_test(&vlan->refcount)) { spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); hlist_del_rcu(&vlan->list); spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock); kfree_rcu(vlan, rcu); } } /** * batadv_softif_vlan_get - get the vlan object for a specific vid * @bat_priv: the bat priv with all the soft interface information * @vid: the identifier of the vlan object to retrieve * * Returns the private data of the vlan matching the vid passed as argument or * NULL otherwise. The refcounter of the returned object is incremented by 1. */ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, unsigned short vid) { struct batadv_softif_vlan *vlan_tmp, *vlan = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) { if (vlan_tmp->vid != vid) continue; if (!atomic_inc_not_zero(&vlan_tmp->refcount)) continue; vlan = vlan_tmp; break; } rcu_read_unlock(); return vlan; } /** * batadv_create_vlan - allocate the needed resources for a new vlan * @bat_priv: the bat priv with all the soft interface information * @vid: the VLAN identifier * * Returns 0 on success, a negative error otherwise. */ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) { struct batadv_softif_vlan *vlan; int err; vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_free_ref(vlan); return -EEXIST; } vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); if (!vlan) return -ENOMEM; vlan->bat_priv = bat_priv; vlan->vid = vid; atomic_set(&vlan->refcount, 1); atomic_set(&vlan->ap_isolation, 0); err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { kfree(vlan); return err; } spin_lock_bh(&bat_priv->softif_vlan_list_lock); hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); spin_unlock_bh(&bat_priv->softif_vlan_list_lock); /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ batadv_tt_local_add(bat_priv->soft_iface, bat_priv->soft_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); return 0; } /** * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object * @bat_priv: the bat priv with all the soft interface information * @vlan: the object to remove */ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, struct batadv_softif_vlan *vlan) { /* explicitly remove the associated TT local entry because it is marked * with the NOPURGE flag */ batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, vlan->vid, "vlan interface destroyed", false); batadv_sysfs_del_vlan(bat_priv, vlan); batadv_softif_vlan_free_ref(vlan); } /** * batadv_interface_add_vid - ndo_add_vid API implementation * @dev: the netdev of the mesh interface * @vid: identifier of the new vlan * * Set up all the internal structures for handling the new vlan on top of the * mesh interface * * Returns 0 on success or a negative error code in case of failure. */ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, unsigned short vid) { struct batadv_priv *bat_priv = netdev_priv(dev); struct batadv_softif_vlan *vlan; int ret; /* only 802.1Q vlans are supported. * batman-adv does not know how to handle other types */ if (proto != htons(ETH_P_8021Q)) return -EINVAL; vid |= BATADV_VLAN_HAS_TAG; /* if a new vlan is getting created and it already exists, it means that * it was not deleted yet. batadv_softif_vlan_get() increases the * refcount in order to revive the object. * * if it does not exist then create it. */ vlan = batadv_softif_vlan_get(bat_priv, vid); if (!vlan) return batadv_softif_create_vlan(bat_priv, vid); /* recreate the sysfs object if it was already destroyed (and it should * be since we received a kill_vid() for this vlan */ if (!vlan->kobj) { ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (ret) { batadv_softif_vlan_free_ref(vlan); return ret; } } /* add a new TT local entry. This one will be marked with the NOPURGE * flag. This must be added again, even if the vlan object already * exists, because the entry was deleted by kill_vid() */ batadv_tt_local_add(bat_priv->soft_iface, bat_priv->soft_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); return 0; } /** * batadv_interface_kill_vid - ndo_kill_vid API implementation * @dev: the netdev of the mesh interface * @vid: identifier of the deleted vlan * * Destroy all the internal structures used to handle the vlan identified by vid * on top of the mesh interface * * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q * or -ENOENT if the specified vlan id wasn't registered. */ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, unsigned short vid) { struct batadv_priv *bat_priv = netdev_priv(dev); struct batadv_softif_vlan *vlan; /* only 802.1Q vlans are supported. batman-adv does not know how to * handle other types */ if (proto != htons(ETH_P_8021Q)) return -EINVAL; vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); if (!vlan) return -ENOENT; batadv_softif_destroy_vlan(bat_priv, vlan); /* finally free the vlan object */ batadv_softif_vlan_free_ref(vlan); return 0; } /* batman-adv network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key batadv_netdev_xmit_lock_key; static struct lock_class_key batadv_netdev_addr_lock_key; /** * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue * @dev: device which owns the tx queue * @txq: tx queue to modify * @_unused: always NULL */ static void batadv_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); } /** * batadv_set_lockdep_class - Set txq and addr_list lockdep class * @dev: network device to modify */ static void batadv_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); } /** * batadv_softif_destroy_finish - cleans up the remains of a softif * @work: work queue item * * Free the parts of the soft interface which can not be removed under * rtnl lock (to prevent deadlock situations). */ static void batadv_softif_destroy_finish(struct work_struct *work) { struct batadv_softif_vlan *vlan; struct batadv_priv *bat_priv; struct net_device *soft_iface; bat_priv = container_of(work, struct batadv_priv, cleanup_work); soft_iface = bat_priv->soft_iface; /* destroy the "untagged" VLAN */ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); if (vlan) { batadv_softif_destroy_vlan(bat_priv, vlan); batadv_softif_vlan_free_ref(vlan); } batadv_sysfs_del_meshif(soft_iface); unregister_netdev(soft_iface); } /** * batadv_softif_init_late - late stage initialization of soft interface * @dev: registered network device to modify * * Returns error code on failures */ static int batadv_softif_init_late(struct net_device *dev) { struct batadv_priv *bat_priv; u32 random_seqno; int ret; size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; batadv_set_lockdep_class(dev); bat_priv = netdev_priv(dev); bat_priv->soft_iface = dev; INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish); /* batadv_interface_stats() needs to be available as soon as * register_netdevice() has been called */ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64)); if (!bat_priv->bat_counters) return -ENOMEM; atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); #ifdef CONFIG_BATMAN_ADV_BLA atomic_set(&bat_priv->bridge_loop_avoidance, 1); #endif #ifdef CONFIG_BATMAN_ADV_DAT atomic_set(&bat_priv->distributed_arp_table, 1); #endif #ifdef CONFIG_BATMAN_ADV_MCAST bat_priv->mcast.flags = BATADV_NO_FLAGS; atomic_set(&bat_priv->multicast_mode, 1); atomic_set(&bat_priv->mcast.num_disabled, 0); atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0); atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0); atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); #endif atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); atomic_set(&bat_priv->gw_sel_class, 20); atomic_set(&bat_priv->gw.bandwidth_down, 100); atomic_set(&bat_priv->gw.bandwidth_up, 20); atomic_set(&bat_priv->orig_interval, 1000); atomic_set(&bat_priv->hop_penalty, 30); #ifdef CONFIG_BATMAN_ADV_DEBUG atomic_set(&bat_priv->log_level, 0); #endif atomic_set(&bat_priv->fragmentation, 1); atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN); atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); atomic_set(&bat_priv->bcast_seqno, 1); atomic_set(&bat_priv->tt.vn, 0); atomic_set(&bat_priv->tt.local_changes, 0); atomic_set(&bat_priv->tt.ogm_append_cnt, 0); #ifdef CONFIG_BATMAN_ADV_BLA atomic_set(&bat_priv->bla.num_requests, 0); #endif bat_priv->tt.last_changeset = NULL; bat_priv->tt.last_changeset_len = 0; bat_priv->isolation_mark = 0; bat_priv->isolation_mark_mask = 0; /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); atomic_set(&bat_priv->frag_seqno, random_seqno); bat_priv->primary_if = NULL; bat_priv->num_ifaces = 0; batadv_nc_init_bat_priv(bat_priv); ret = batadv_algo_select(bat_priv, batadv_routing_algo); if (ret < 0) goto free_bat_counters; ret = batadv_debugfs_add_meshif(dev); if (ret < 0) goto free_bat_counters; ret = batadv_mesh_init(dev); if (ret < 0) goto unreg_debugfs; return 0; unreg_debugfs: batadv_debugfs_del_meshif(dev); free_bat_counters: free_percpu(bat_priv->bat_counters); bat_priv->bat_counters = NULL; return ret; } /** * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface * @dev: batadv_soft_interface used as master interface * @slave_dev: net_device which should become the slave interface * * Return 0 if successful or error otherwise. */ static int batadv_softif_slave_add(struct net_device *dev, struct net_device *slave_dev) { struct batadv_hard_iface *hard_iface; int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); if (!hard_iface || hard_iface->soft_iface) goto out; ret = batadv_hardif_enable_interface(hard_iface, dev->name); out: if (hard_iface) batadv_hardif_free_ref(hard_iface); return ret; } /** * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface * @dev: batadv_soft_interface used as master interface * @slave_dev: net_device which should be removed from the master interface * * Return 0 if successful or error otherwise. */ static int batadv_softif_slave_del(struct net_device *dev, struct net_device *slave_dev) { struct batadv_hard_iface *hard_iface; int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); if (!hard_iface || hard_iface->soft_iface != dev) goto out; batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP); ret = 0; out: if (hard_iface) batadv_hardif_free_ref(hard_iface); return ret; } static const struct net_device_ops batadv_netdev_ops = { .ndo_init = batadv_softif_init_late, .ndo_open = batadv_interface_open, .ndo_stop = batadv_interface_release, .ndo_get_stats = batadv_interface_stats, .ndo_vlan_rx_add_vid = batadv_interface_add_vid, .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid, .ndo_set_mac_address = batadv_interface_set_mac_addr, .ndo_change_mtu = batadv_interface_change_mtu, .ndo_set_rx_mode = batadv_interface_set_rx_mode, .ndo_start_xmit = batadv_interface_tx, .ndo_validate_addr = eth_validate_addr, .ndo_add_slave = batadv_softif_slave_add, .ndo_del_slave = batadv_softif_slave_del, }; /** * batadv_softif_free - Deconstructor of batadv_soft_interface * @dev: Device to cleanup and remove */ static void batadv_softif_free(struct net_device *dev) { batadv_debugfs_del_meshif(dev); batadv_mesh_free(dev); /* some scheduled RCU callbacks need the bat_priv struct to accomplish * their tasks. Wait for them all to be finished before freeing the * netdev and its private data (bat_priv) */ rcu_barrier(); free_netdev(dev); } /** * batadv_softif_init_early - early stage initialization of soft interface * @dev: registered network device to modify */ static void batadv_softif_init_early(struct net_device *dev) { struct batadv_priv *priv = netdev_priv(dev); ether_setup(dev); dev->netdev_ops = &batadv_netdev_ops; dev->destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; dev->priv_flags |= IFF_NO_QUEUE; /* can't call min_mtu, because the needed variables * have not been initialized yet */ dev->mtu = ETH_DATA_LEN; /* generate random address */ eth_hw_addr_random(dev); dev->ethtool_ops = &batadv_ethtool_ops; memset(priv, 0, sizeof(*priv)); } struct net_device *batadv_softif_create(const char *name) { struct net_device *soft_iface; int ret; soft_iface = alloc_netdev(sizeof(struct batadv_priv), name, NET_NAME_UNKNOWN, batadv_softif_init_early); if (!soft_iface) return NULL; soft_iface->rtnl_link_ops = &batadv_link_ops; ret = register_netdevice(soft_iface); if (ret < 0) { pr_err("Unable to register the batman interface '%s': %i\n", name, ret); free_netdev(soft_iface); return NULL; } return soft_iface; } /** * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs * @soft_iface: the to-be-removed batman-adv interface */ void batadv_softif_destroy_sysfs(struct net_device *soft_iface) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); } /** * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink * @soft_iface: the to-be-removed batman-adv interface * @head: list pointer */ static void batadv_softif_destroy_netlink(struct net_device *soft_iface, struct list_head *head) { struct batadv_hard_iface *hard_iface; list_for_each_entry(hard_iface, &batadv_hardif_list, list) { if (hard_iface->soft_iface == soft_iface) batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP); } batadv_sysfs_del_meshif(soft_iface); unregister_netdevice_queue(soft_iface, head); } int batadv_softif_is_valid(const struct net_device *net_dev) { if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) return 1; return 0; } struct rtnl_link_ops batadv_link_ops __read_mostly = { .kind = "batadv", .priv_size = sizeof(struct batadv_priv), .setup = batadv_softif_init_early, .dellink = batadv_softif_destroy_netlink, }; /* ethtool */ static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void batadv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, "batman", sizeof(info->bus_info)); } static u32 batadv_get_msglevel(struct net_device *dev) { return -EOPNOTSUPP; } static void batadv_set_msglevel(struct net_device *dev, u32 value) { } static u32 batadv_get_link(struct net_device *dev) { return 1; } /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 * Declare each description string in struct.name[] to get fixed sized buffer * and compile time checking for strings longer than ETH_GSTRING_LEN. */ static const struct { const char name[ETH_GSTRING_LEN]; } batadv_counters_strings[] = { { "tx" }, { "tx_bytes" }, { "tx_dropped" }, { "rx" }, { "rx_bytes" }, { "forward" }, { "forward_bytes" }, { "mgmt_tx" }, { "mgmt_tx_bytes" }, { "mgmt_rx" }, { "mgmt_rx_bytes" }, { "frag_tx" }, { "frag_tx_bytes" }, { "frag_rx" }, { "frag_rx_bytes" }, { "frag_fwd" }, { "frag_fwd_bytes" }, { "tt_request_tx" }, { "tt_request_rx" }, { "tt_response_tx" }, { "tt_response_rx" }, { "tt_roam_adv_tx" }, { "tt_roam_adv_rx" }, #ifdef CONFIG_BATMAN_ADV_DAT { "dat_get_tx" }, { "dat_get_rx" }, { "dat_put_tx" }, { "dat_put_rx" }, { "dat_cached_reply_tx" }, #endif #ifdef CONFIG_BATMAN_ADV_NC { "nc_code" }, { "nc_code_bytes" }, { "nc_recode" }, { "nc_recode_bytes" }, { "nc_buffer" }, { "nc_decode" }, { "nc_decode_bytes" }, { "nc_decode_failed" }, { "nc_sniffed" }, #endif }; static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, batadv_counters_strings, sizeof(batadv_counters_strings)); } static void batadv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct batadv_priv *bat_priv = netdev_priv(dev); int i; for (i = 0; i < BATADV_CNT_NUM; i++) data[i] = batadv_sum_counter(bat_priv, i); } static int batadv_get_sset_count(struct net_device *dev, int stringset) { if (stringset == ETH_SS_STATS) return BATADV_CNT_NUM; return -EOPNOTSUPP; }
gpl-2.0
Pivosgroup/buildroot-linux-kernel
drivers/staging/comedi/drivers/ni_atmio16d.c
859
24827
/* comedi/drivers/ni_atmio16d.c Hardware driver for National Instruments AT-MIO16D board Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_atmio16d Description: National Instruments AT-MIO-16D Author: Chris R. Baugher <baugher@enteract.com> Status: unknown Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d) */ /* * I must give credit here to Michal Dobes <dobes@tesnet.cz> who * wrote the driver for Advantec's pcl812 boards. I used the interrupt * handling code from his driver as an example for this one. * * Chris Baugher * 5/1/2000 * */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include "8255.h" /* Configuration and Status Registers */ #define COM_REG_1 0x00 /* wo 16 */ #define STAT_REG 0x00 /* ro 16 */ #define COM_REG_2 0x02 /* wo 16 */ /* Event Strobe Registers */ #define START_CONVERT_REG 0x08 /* wo 16 */ #define START_DAQ_REG 0x0A /* wo 16 */ #define AD_CLEAR_REG 0x0C /* wo 16 */ #define EXT_STROBE_REG 0x0E /* wo 16 */ /* Analog Output Registers */ #define DAC0_REG 0x10 /* wo 16 */ #define DAC1_REG 0x12 /* wo 16 */ #define INT2CLR_REG 0x14 /* wo 16 */ /* Analog Input Registers */ #define MUX_CNTR_REG 0x04 /* wo 16 */ #define MUX_GAIN_REG 0x06 /* wo 16 */ #define AD_FIFO_REG 0x16 /* ro 16 */ #define DMA_TC_INT_CLR_REG 0x16 /* wo 16 */ /* AM9513A Counter/Timer Registers */ #define AM9513A_DATA_REG 0x18 /* rw 16 */ #define AM9513A_COM_REG 0x1A /* wo 16 */ #define AM9513A_STAT_REG 0x1A /* ro 16 */ /* MIO-16 Digital I/O Registers */ #define MIO_16_DIG_IN_REG 0x1C /* ro 16 */ #define MIO_16_DIG_OUT_REG 0x1C /* wo 16 */ /* RTSI Switch Registers */ #define RTSI_SW_SHIFT_REG 0x1E /* wo 8 */ #define RTSI_SW_STROBE_REG 0x1F /* wo 8 */ /* DIO-24 Registers */ #define DIO_24_PORTA_REG 0x00 /* rw 8 */ #define DIO_24_PORTB_REG 0x01 /* rw 8 */ #define DIO_24_PORTC_REG 0x02 /* rw 8 */ #define DIO_24_CNFG_REG 0x03 /* wo 8 */ /* Command Register bits */ #define COMREG1_2SCADC 0x0001 #define COMREG1_1632CNT 0x0002 #define COMREG1_SCANEN 0x0008 #define COMREG1_DAQEN 0x0010 #define COMREG1_DMAEN 0x0020 #define COMREG1_CONVINTEN 0x0080 #define COMREG2_SCN2 0x0010 #define COMREG2_INTEN 0x0080 #define COMREG2_DOUTEN0 0x0100 #define COMREG2_DOUTEN1 0x0200 /* Status Register bits */ #define STAT_AD_OVERRUN 0x0100 #define STAT_AD_OVERFLOW 0x0200 #define STAT_AD_DAQPROG 0x0800 #define STAT_AD_CONVAVAIL 0x2000 #define STAT_AD_DAQSTOPINT 0x4000 /* AM9513A Counter/Timer defines */ #define CLOCK_1_MHZ 0x8B25 #define CLOCK_100_KHZ 0x8C25 #define CLOCK_10_KHZ 0x8D25 #define CLOCK_1_KHZ 0x8E25 #define CLOCK_100_HZ 0x8F25 /* Other miscellaneous defines */ #define ATMIO16D_SIZE 32 /* bus address range */ #define devpriv ((struct atmio16d_private *)dev->private) #define ATMIO16D_TIMEOUT 10 struct atmio16_board_t { const char *name; int has_8255; }; static const struct atmio16_board_t atmio16_boards[] = { { .name = "atmio16", .has_8255 = 0, }, { .name = "atmio16d", .has_8255 = 1, }, }; #define n_atmio16_boards ARRAY_SIZE(atmio16_boards) #define boardtype ((const struct atmio16_board_t *)dev->board_ptr) /* function prototypes */ static int atmio16d_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int atmio16d_detach(struct comedi_device *dev); static irqreturn_t atmio16d_interrupt(int irq, void *d); static int atmio16d_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int atmio16d_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int atmio16d_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void reset_counters(struct comedi_device *dev); static void reset_atmio16d(struct comedi_device *dev); /* main driver struct */ static struct comedi_driver driver_atmio16d = { .driver_name = "atmio16", .module = THIS_MODULE, .attach = atmio16d_attach, .detach = atmio16d_detach, .board_name = &atmio16_boards[0].name, .num_names = n_atmio16_boards, .offset = sizeof(struct atmio16_board_t), }; COMEDI_INITCLEANUP(driver_atmio16d); /* range structs */ static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { 4, { BIP_RANGE (10), BIP_RANGE (1), BIP_RANGE (0.1), BIP_RANGE (0.02) } }; static const struct comedi_lrange range_atmio16d_ai_5_bipolar = { 4, { BIP_RANGE (5), BIP_RANGE (0.5), BIP_RANGE (0.05), BIP_RANGE (0.01) } }; static const struct comedi_lrange range_atmio16d_ai_unipolar = { 4, { UNI_RANGE (10), UNI_RANGE (1), UNI_RANGE (0.1), UNI_RANGE (0.02) } }; /* private data struct */ struct atmio16d_private { enum { adc_diff, adc_singleended } adc_mux; enum { adc_bipolar10, adc_bipolar5, adc_unipolar10 } adc_range; enum { adc_2comp, adc_straight } adc_coding; enum { dac_bipolar, dac_unipolar } dac0_range, dac1_range; enum { dac_internal, dac_external } dac0_reference, dac1_reference; enum { dac_2comp, dac_straight } dac0_coding, dac1_coding; const struct comedi_lrange *ao_range_type_list[2]; unsigned int ao_readback[2]; unsigned int com_reg_1_state; /* current state of command register 1 */ unsigned int com_reg_2_state; /* current state of command register 2 */ }; static void reset_counters(struct comedi_device *dev) { /* Counter 2 */ outw(0xFFC2, dev->iobase + AM9513A_COM_REG); outw(0xFF02, dev->iobase + AM9513A_COM_REG); outw(0x4, dev->iobase + AM9513A_DATA_REG); outw(0xFF0A, dev->iobase + AM9513A_COM_REG); outw(0x3, dev->iobase + AM9513A_DATA_REG); outw(0xFF42, dev->iobase + AM9513A_COM_REG); outw(0xFF42, dev->iobase + AM9513A_COM_REG); /* Counter 3 */ outw(0xFFC4, dev->iobase + AM9513A_COM_REG); outw(0xFF03, dev->iobase + AM9513A_COM_REG); outw(0x4, dev->iobase + AM9513A_DATA_REG); outw(0xFF0B, dev->iobase + AM9513A_COM_REG); outw(0x3, dev->iobase + AM9513A_DATA_REG); outw(0xFF44, dev->iobase + AM9513A_COM_REG); outw(0xFF44, dev->iobase + AM9513A_COM_REG); /* Counter 4 */ outw(0xFFC8, dev->iobase + AM9513A_COM_REG); outw(0xFF04, dev->iobase + AM9513A_COM_REG); outw(0x4, dev->iobase + AM9513A_DATA_REG); outw(0xFF0C, dev->iobase + AM9513A_COM_REG); outw(0x3, dev->iobase + AM9513A_DATA_REG); outw(0xFF48, dev->iobase + AM9513A_COM_REG); outw(0xFF48, dev->iobase + AM9513A_COM_REG); /* Counter 5 */ outw(0xFFD0, dev->iobase + AM9513A_COM_REG); outw(0xFF05, dev->iobase + AM9513A_COM_REG); outw(0x4, dev->iobase + AM9513A_DATA_REG); outw(0xFF0D, dev->iobase + AM9513A_COM_REG); outw(0x3, dev->iobase + AM9513A_DATA_REG); outw(0xFF50, dev->iobase + AM9513A_COM_REG); outw(0xFF50, dev->iobase + AM9513A_COM_REG); outw(0, dev->iobase + AD_CLEAR_REG); } static void reset_atmio16d(struct comedi_device *dev) { int i; /* now we need to initialize the board */ outw(0, dev->iobase + COM_REG_1); outw(0, dev->iobase + COM_REG_2); outw(0, dev->iobase + MUX_GAIN_REG); /* init AM9513A timer */ outw(0xFFFF, dev->iobase + AM9513A_COM_REG); outw(0xFFEF, dev->iobase + AM9513A_COM_REG); outw(0xFF17, dev->iobase + AM9513A_COM_REG); outw(0xF000, dev->iobase + AM9513A_DATA_REG); for (i = 1; i <= 5; ++i) { outw(0xFF00 + i, dev->iobase + AM9513A_COM_REG); outw(0x0004, dev->iobase + AM9513A_DATA_REG); outw(0xFF08 + i, dev->iobase + AM9513A_COM_REG); outw(0x3, dev->iobase + AM9513A_DATA_REG); } outw(0xFF5F, dev->iobase + AM9513A_COM_REG); /* timer init done */ outw(0, dev->iobase + AD_CLEAR_REG); outw(0, dev->iobase + INT2CLR_REG); /* select straight binary mode for Analog Input */ devpriv->com_reg_1_state |= 1; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); devpriv->adc_coding = adc_straight; /* zero the analog outputs */ outw(2048, dev->iobase + DAC0_REG); outw(2048, dev->iobase + DAC1_REG); } static irqreturn_t atmio16d_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_interrupt!\n"); #endif comedi_buf_put(s->async, inw(dev->iobase + AD_FIFO_REG)); comedi_event(dev, s); return IRQ_HANDLED; } static int atmio16d_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0, tmp; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_ai_cmdtest\n"); #endif /* make sure triggers are valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique & mutually compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } else { #if 0 /* external trigger */ /* should be level/edge, hi/lo specification here */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } #endif } if (cmd->convert_arg < 10000) { cmd->convert_arg = 10000; err++; } #if 0 if (cmd->convert_arg > SLOWEST_TIMER) { cmd->convert_arg = SLOWEST_TIMER; err++; } #endif if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; return 0; } static int atmio16d_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; unsigned int timer, base_clock; unsigned int sample_count, tmp, chan, gain; int i; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_ai_cmd\n"); #endif /* This is slowly becoming a working command interface. * * It is still uber-experimental */ reset_counters(dev); s->async->cur_chan = 0; /* check if scanning multiple channels */ if (cmd->chanlist_len < 2) { devpriv->com_reg_1_state &= ~COMREG1_SCANEN; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); } else { devpriv->com_reg_1_state |= COMREG1_SCANEN; devpriv->com_reg_2_state |= COMREG2_SCN2; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2); } /* Setup the Mux-Gain Counter */ for (i = 0; i < cmd->chanlist_len; ++i) { chan = CR_CHAN(cmd->chanlist[i]); gain = CR_RANGE(cmd->chanlist[i]); outw(i, dev->iobase + MUX_CNTR_REG); tmp = chan | (gain << 6); if (i == cmd->scan_end_arg - 1) tmp |= 0x0010; /* set LASTONE bit */ outw(tmp, dev->iobase + MUX_GAIN_REG); } /* Now program the sample interval timer */ /* Figure out which clock to use then get an * appropriate timer value */ if (cmd->convert_arg < 65536000) { base_clock = CLOCK_1_MHZ; timer = cmd->convert_arg / 1000; } else if (cmd->convert_arg < 655360000) { base_clock = CLOCK_100_KHZ; timer = cmd->convert_arg / 10000; } else if (cmd->convert_arg <= 0xffffffff /* 6553600000 */) { base_clock = CLOCK_10_KHZ; timer = cmd->convert_arg / 100000; } else if (cmd->convert_arg <= 0xffffffff /* 65536000000 */) { base_clock = CLOCK_1_KHZ; timer = cmd->convert_arg / 1000000; } outw(0xFF03, dev->iobase + AM9513A_COM_REG); outw(base_clock, dev->iobase + AM9513A_DATA_REG); outw(0xFF0B, dev->iobase + AM9513A_COM_REG); outw(0x2, dev->iobase + AM9513A_DATA_REG); outw(0xFF44, dev->iobase + AM9513A_COM_REG); outw(0xFFF3, dev->iobase + AM9513A_COM_REG); outw(timer, dev->iobase + AM9513A_DATA_REG); outw(0xFF24, dev->iobase + AM9513A_COM_REG); /* Now figure out how many samples to get */ /* and program the sample counter */ sample_count = cmd->stop_arg * cmd->scan_end_arg; outw(0xFF04, dev->iobase + AM9513A_COM_REG); outw(0x1025, dev->iobase + AM9513A_DATA_REG); outw(0xFF0C, dev->iobase + AM9513A_COM_REG); if (sample_count < 65536) { /* use only Counter 4 */ outw(sample_count, dev->iobase + AM9513A_DATA_REG); outw(0xFF48, dev->iobase + AM9513A_COM_REG); outw(0xFFF4, dev->iobase + AM9513A_COM_REG); outw(0xFF28, dev->iobase + AM9513A_COM_REG); devpriv->com_reg_1_state &= ~COMREG1_1632CNT; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); } else { /* Counter 4 and 5 are needed */ tmp = sample_count & 0xFFFF; if (tmp) outw(tmp - 1, dev->iobase + AM9513A_DATA_REG); else outw(0xFFFF, dev->iobase + AM9513A_DATA_REG); outw(0xFF48, dev->iobase + AM9513A_COM_REG); outw(0, dev->iobase + AM9513A_DATA_REG); outw(0xFF28, dev->iobase + AM9513A_COM_REG); outw(0xFF05, dev->iobase + AM9513A_COM_REG); outw(0x25, dev->iobase + AM9513A_DATA_REG); outw(0xFF0D, dev->iobase + AM9513A_COM_REG); tmp = sample_count & 0xFFFF; if ((tmp == 0) || (tmp == 1)) { outw((sample_count >> 16) & 0xFFFF, dev->iobase + AM9513A_DATA_REG); } else { outw(((sample_count >> 16) & 0xFFFF) + 1, dev->iobase + AM9513A_DATA_REG); } outw(0xFF70, dev->iobase + AM9513A_COM_REG); devpriv->com_reg_1_state |= COMREG1_1632CNT; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); } /* Program the scan interval timer ONLY IF SCANNING IS ENABLED */ /* Figure out which clock to use then get an * appropriate timer value */ if (cmd->chanlist_len > 1) { if (cmd->scan_begin_arg < 65536000) { base_clock = CLOCK_1_MHZ; timer = cmd->scan_begin_arg / 1000; } else if (cmd->scan_begin_arg < 655360000) { base_clock = CLOCK_100_KHZ; timer = cmd->scan_begin_arg / 10000; } else if (cmd->scan_begin_arg < 0xffffffff /* 6553600000 */) { base_clock = CLOCK_10_KHZ; timer = cmd->scan_begin_arg / 100000; } else if (cmd->scan_begin_arg < 0xffffffff /* 65536000000 */) { base_clock = CLOCK_1_KHZ; timer = cmd->scan_begin_arg / 1000000; } outw(0xFF02, dev->iobase + AM9513A_COM_REG); outw(base_clock, dev->iobase + AM9513A_DATA_REG); outw(0xFF0A, dev->iobase + AM9513A_COM_REG); outw(0x2, dev->iobase + AM9513A_DATA_REG); outw(0xFF42, dev->iobase + AM9513A_COM_REG); outw(0xFFF2, dev->iobase + AM9513A_COM_REG); outw(timer, dev->iobase + AM9513A_DATA_REG); outw(0xFF22, dev->iobase + AM9513A_COM_REG); } /* Clear the A/D FIFO and reset the MUX counter */ outw(0, dev->iobase + AD_CLEAR_REG); outw(0, dev->iobase + MUX_CNTR_REG); outw(0, dev->iobase + INT2CLR_REG); /* enable this acquisition operation */ devpriv->com_reg_1_state |= COMREG1_DAQEN; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); /* enable interrupts for conversion completion */ devpriv->com_reg_1_state |= COMREG1_CONVINTEN; devpriv->com_reg_2_state |= COMREG2_INTEN; outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1); outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2); /* apply a trigger. this starts the counters! */ outw(0, dev->iobase + START_DAQ_REG); return 0; } /* This will cancel a running acquisition operation */ static int atmio16d_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { reset_atmio16d(dev); return 0; } /* Mode 0 is used to get a single conversion on demand */ static int atmio16d_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, t; int chan; int gain; int status; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_ai_insn_read\n"); #endif chan = CR_CHAN(insn->chanspec); gain = CR_RANGE(insn->chanspec); /* reset the Analog input circuitry */ /* outw( 0, dev->iobase+AD_CLEAR_REG ); */ /* reset the Analog Input MUX Counter to 0 */ /* outw( 0, dev->iobase+MUX_CNTR_REG ); */ /* set the Input MUX gain */ outw(chan | (gain << 6), dev->iobase + MUX_GAIN_REG); for (i = 0; i < insn->n; i++) { /* start the conversion */ outw(0, dev->iobase + START_CONVERT_REG); /* wait for it to finish */ for (t = 0; t < ATMIO16D_TIMEOUT; t++) { /* check conversion status */ status = inw(dev->iobase + STAT_REG); #ifdef DEBUG1 printk(KERN_DEBUG "status=%x\n", status); #endif if (status & STAT_AD_CONVAVAIL) { /* read the data now */ data[i] = inw(dev->iobase + AD_FIFO_REG); /* change to two's complement if need be */ if (devpriv->adc_coding == adc_2comp) data[i] ^= 0x800; break; } if (status & STAT_AD_OVERFLOW) { printk(KERN_INFO "atmio16d: a/d FIFO overflow\n"); outw(0, dev->iobase + AD_CLEAR_REG); return -ETIME; } } /* end waiting, now check if it timed out */ if (t == ATMIO16D_TIMEOUT) { printk(KERN_INFO "atmio16d: timeout\n"); return -ETIME; } } return i; } static int atmio16d_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_ao_insn_read\n"); #endif for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[CR_CHAN(insn->chanspec)]; return i; } static int atmio16d_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan; int d; #ifdef DEBUG1 printk(KERN_DEBUG "atmio16d_ao_insn_write\n"); #endif chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { d = data[i]; switch (chan) { case 0: if (devpriv->dac0_coding == dac_2comp) d ^= 0x800; outw(d, dev->iobase + DAC0_REG); break; case 1: if (devpriv->dac1_coding == dac_2comp) d ^= 0x800; outw(d, dev->iobase + DAC1_REG); break; default: return -EINVAL; } devpriv->ao_readback[chan] = data[i]; } return i; } static int atmio16d_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] | data[1]); outw(s->state, dev->iobase + MIO_16_DIG_OUT_REG); } data[1] = inw(dev->iobase + MIO_16_DIG_IN_REG); return 2; } static int atmio16d_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int mask; for (i = 0; i < insn->n; i++) { mask = (CR_CHAN(insn->chanspec) < 4) ? 0x0f : 0xf0; s->io_bits &= ~mask; if (data[i]) s->io_bits |= mask; } devpriv->com_reg_2_state &= ~(COMREG2_DOUTEN0 | COMREG2_DOUTEN1); if (s->io_bits & 0x0f) devpriv->com_reg_2_state |= COMREG2_DOUTEN0; if (s->io_bits & 0xf0) devpriv->com_reg_2_state |= COMREG2_DOUTEN1; outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2); return i; } /* options[0] - I/O port options[1] - MIO irq 0 == no irq N == irq N {3,4,5,6,7,9,10,11,12,14,15} options[2] - DIO irq 0 == no irq N == irq N {3,4,5,6,7,9} options[3] - DMA1 channel 0 == no DMA N == DMA N {5,6,7} options[4] - DMA2 channel 0 == no DMA N == DMA N {5,6,7} options[5] - a/d mux 0=differential, 1=single options[6] - a/d range 0=bipolar10, 1=bipolar5, 2=unipolar10 options[7] - dac0 range 0=bipolar, 1=unipolar options[8] - dac0 reference 0=internal, 1=external options[9] - dac0 coding 0=2's comp, 1=straight binary options[10] - dac1 range options[11] - dac1 reference options[12] - dac1 coding */ static int atmio16d_attach(struct comedi_device *dev, struct comedi_devconfig *it) { unsigned int irq; unsigned long iobase; int ret; struct comedi_subdevice *s; /* make sure the address range is free and allocate it */ iobase = it->options[0]; printk(KERN_INFO "comedi%d: atmio16d: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, ATMIO16D_SIZE, "ni_atmio16d")) { printk("I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* board name */ dev->board_name = boardtype->name; ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; ret = alloc_private(dev, sizeof(struct atmio16d_private)); if (ret < 0) return ret; /* reset the atmio16d hardware */ reset_atmio16d(dev); /* check if our interrupt is available and get it */ irq = it->options[1]; if (irq) { ret = request_irq(irq, atmio16d_interrupt, 0, "atmio16d", dev); if (ret < 0) { printk(KERN_INFO "failed to allocate irq %u\n", irq); return ret; } dev->irq = irq; printk(KERN_INFO "( irq = %u )\n", irq); } else { printk(KERN_INFO "( no irq )"); } /* set device options */ devpriv->adc_mux = it->options[5]; devpriv->adc_range = it->options[6]; devpriv->dac0_range = it->options[7]; devpriv->dac0_reference = it->options[8]; devpriv->dac0_coding = it->options[9]; devpriv->dac1_range = it->options[10]; devpriv->dac1_reference = it->options[11]; devpriv->dac1_coding = it->options[12]; /* setup sub-devices */ s = dev->subdevices + 0; dev->read_subdev = s; /* ai subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = (devpriv->adc_mux ? 16 : 8); s->len_chanlist = 16; s->insn_read = atmio16d_ai_insn_read; s->do_cmdtest = atmio16d_ai_cmdtest; s->do_cmd = atmio16d_ai_cmd; s->cancel = atmio16d_ai_cancel; s->maxdata = 0xfff; /* 4095 decimal */ switch (devpriv->adc_range) { case adc_bipolar10: s->range_table = &range_atmio16d_ai_10_bipolar; break; case adc_bipolar5: s->range_table = &range_atmio16d_ai_5_bipolar; break; case adc_unipolar10: s->range_table = &range_atmio16d_ai_unipolar; break; } /* ao subdevice */ s++; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 2; s->insn_read = atmio16d_ao_insn_read; s->insn_write = atmio16d_ao_insn_write; s->maxdata = 0xfff; /* 4095 decimal */ s->range_table_list = devpriv->ao_range_type_list; switch (devpriv->dac0_range) { case dac_bipolar: devpriv->ao_range_type_list[0] = &range_bipolar10; break; case dac_unipolar: devpriv->ao_range_type_list[0] = &range_unipolar10; break; } switch (devpriv->dac1_range) { case dac_bipolar: devpriv->ao_range_type_list[1] = &range_bipolar10; break; case dac_unipolar: devpriv->ao_range_type_list[1] = &range_unipolar10; break; } /* Digital I/O */ s++; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 8; s->insn_bits = atmio16d_dio_insn_bits; s->insn_config = atmio16d_dio_insn_config; s->maxdata = 1; s->range_table = &range_digital; /* 8255 subdevice */ s++; if (boardtype->has_8255) subdev_8255_init(dev, s, NULL, dev->iobase); else s->type = COMEDI_SUBD_UNUSED; /* don't yet know how to deal with counter/timers */ #if 0 s++; /* do */ s->type = COMEDI_SUBD_TIMER; s->n_chan = 0; s->maxdata = 0 #endif printk("\n"); return 0; } static int atmio16d_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: atmio16d: remove\n", dev->minor); if (dev->subdevices && boardtype->has_8255) subdev_8255_cleanup(dev, dev->subdevices + 3); if (dev->irq) free_irq(dev->irq, dev); reset_atmio16d(dev); if (dev->iobase) release_region(dev->iobase, ATMIO16D_SIZE); return 0; }
gpl-2.0
HydraCompany/HydraKernel
drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
859
1456
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nve0.h" struct nouveau_oclass * nv108_fifo_oclass = &(struct nve0_fifo_impl) { .base.handle = NV_ENGINE(FIFO, 0x08), .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nve0_fifo_ctor, .dtor = nve0_fifo_dtor, .init = nve0_fifo_init, .fini = _nouveau_fifo_fini, }, .channels = 1024, }.base;
gpl-2.0
ShinySide/G530P_Permissive
net/decnet/dn_dev.c
1371
32805
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Device Layer * * Authors: Steve Whitehouse <SteveW@ACM.org> * Eduardo Marcelo Serrat <emserrat@geocities.com> * * Changes: * Steve Whitehouse : Devices now see incoming frames so they * can mark on who it came from. * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour * can now have a device specific setup func. * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/ * Steve Whitehouse : Fixed bug which sometimes killed timer * Steve Whitehouse : Multiple ifaddr support * Steve Whitehouse : SIOCGIFCONF is now a compile time option * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding * Steve Whitehouse : Removed timer1 - it's a user space issue now * Patrick Caulfield : Fixed router hello message format * Steve Whitehouse : Got rid of constant sizes for blksize for * devices. All mtu based now. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/if_addr.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/sysctl.h> #include <linux/notifier.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/flow.h> #include <net/fib_rules.h> #include <net/netlink.h> #include <net/dn.h> #include <net/dn_dev.h> #include <net/dn_route.h> #include <net/dn_neigh.h> #include <net/dn_fib.h> #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00}; static unsigned char dn_eco_version[3] = {0x02,0x00,0x00}; extern struct neigh_table dn_neigh_table; /* * decnet_address is kept in network order. */ __le16 decnet_address = 0; static DEFINE_SPINLOCK(dndev_lock); static struct net_device *decnet_default_device; static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); static void dn_dev_delete(struct net_device *dev); static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); static int dn_eth_up(struct net_device *); static void dn_eth_down(struct net_device *); static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa); static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa); static struct dn_dev_parms dn_dev_list[] = { { .type = ARPHRD_ETHER, /* Ethernet */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ethernet", .up = dn_eth_up, .down = dn_eth_down, .timer3 = dn_send_brd_hello, }, { .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ipgre", .timer3 = dn_send_brd_hello, }, #if 0 { .type = ARPHRD_X25, /* Bog standard X.25 */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "x25", .timer3 = dn_send_ptp_hello, }, #endif #if 0 { .type = ARPHRD_PPP, /* DECnet over PPP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ppp", .timer3 = dn_send_brd_hello, }, #endif { .type = ARPHRD_DDCMP, /* DECnet over DDCMP */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "ddcmp", .timer3 = dn_send_ptp_hello, }, { .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "loopback", .timer3 = dn_send_brd_hello, } }; #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x) #ifdef CONFIG_SYSCTL static int min_t2[] = { 1 }; static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */ static int min_t3[] = { 1 }; static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */ static int min_priority[1]; static int max_priority[] = { 127 }; /* From DECnet spec */ static int dn_forwarding_proc(ctl_table *, int, void __user *, size_t *, loff_t *); static struct dn_dev_sysctl_table { struct ctl_table_header *sysctl_header; ctl_table dn_dev_vars[5]; } dn_dev_sysctl = { NULL, { { .procname = "forwarding", .data = (void *)DN_DEV_PARMS_OFFSET(forwarding), .maxlen = sizeof(int), .mode = 0644, .proc_handler = dn_forwarding_proc, }, { .procname = "priority", .data = (void *)DN_DEV_PARMS_OFFSET(priority), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_priority, .extra2 = &max_priority }, { .procname = "t2", .data = (void *)DN_DEV_PARMS_OFFSET(t2), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t2, .extra2 = &max_t2 }, { .procname = "t3", .data = (void *)DN_DEV_PARMS_OFFSET(t3), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t3, .extra2 = &max_t3 }, {0} }, }; static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { struct dn_dev_sysctl_table *t; int i; char path[sizeof("net/decnet/conf/") + IFNAMSIZ]; t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) return; for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { long offset = (long)t->dn_dev_vars[i].data; t->dn_dev_vars[i].data = ((char *)parms) + offset; } snprintf(path, sizeof(path), "net/decnet/conf/%s", dev? dev->name : parms->name); t->dn_dev_vars[0].extra1 = (void *)dev; t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else parms->sysctl = t; } static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { if (parms->sysctl) { struct dn_dev_sysctl_table *t = parms->sysctl; parms->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); kfree(t); } } static int dn_forwarding_proc(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { #ifdef CONFIG_DECNET_ROUTER struct net_device *dev = table->extra1; struct dn_dev *dn_db; int err; int tmp, old; if (table->extra1 == NULL) return -EINVAL; dn_db = rcu_dereference_raw(dev->dn_ptr); old = dn_db->parms.forwarding; err = proc_dointvec(table, write, buffer, lenp, ppos); if ((err >= 0) && write) { if (dn_db->parms.forwarding < 0) dn_db->parms.forwarding = 0; if (dn_db->parms.forwarding > 2) dn_db->parms.forwarding = 2; /* * What an ugly hack this is... its works, just. It * would be nice if sysctl/proc were just that little * bit more flexible so I don't have to write a special * routine, or suffer hacks like this - SJW */ tmp = dn_db->parms.forwarding; dn_db->parms.forwarding = old; if (dn_db->parms.down) dn_db->parms.down(dev); dn_db->parms.forwarding = tmp; if (dn_db->parms.up) dn_db->parms.up(dev); } return err; #else return -EINVAL; #endif } #else /* CONFIG_SYSCTL */ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { } static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { } #endif /* CONFIG_SYSCTL */ static inline __u16 mtu2blksize(struct net_device *dev) { u32 blksize = dev->mtu; if (blksize > 0xffff) blksize = 0xffff; if (dev->type == ARPHRD_ETHER || dev->type == ARPHRD_PPP || dev->type == ARPHRD_IPGRE || dev->type == ARPHRD_LOOPBACK) blksize -= 2; return (__u16)blksize; } static struct dn_ifaddr *dn_dev_alloc_ifa(void) { struct dn_ifaddr *ifa; ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); return ifa; } static void dn_dev_free_ifa(struct dn_ifaddr *ifa) { kfree_rcu(ifa, rcu); } static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) { struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap); unsigned char mac_addr[6]; struct net_device *dev = dn_db->dev; ASSERT_RTNL(); *ifap = ifa1->ifa_next; if (dn_db->dev->type == ARPHRD_ETHER) { if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa1->ifa_local); dev_mc_del(dev, mac_addr); } } dn_ifaddr_notify(RTM_DELADDR, ifa1); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); if (destroy) { dn_dev_free_ifa(ifa1); if (dn_db->ifa_list == NULL) dn_dev_delete(dn_db->dev); } } static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) { struct net_device *dev = dn_db->dev; struct dn_ifaddr *ifa1; unsigned char mac_addr[6]; ASSERT_RTNL(); /* Check for duplicates */ for (ifa1 = rtnl_dereference(dn_db->ifa_list); ifa1 != NULL; ifa1 = rtnl_dereference(ifa1->ifa_next)) { if (ifa1->ifa_local == ifa->ifa_local) return -EEXIST; } if (dev->type == ARPHRD_ETHER) { if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa->ifa_local); dev_mc_add(dev, mac_addr); } } ifa->ifa_next = dn_db->ifa_list; rcu_assign_pointer(dn_db->ifa_list, ifa); dn_ifaddr_notify(RTM_NEWADDR, ifa); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); return 0; } static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); int rv; if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return err; } ifa->ifa_dev = dn_db; if (dev->flags & IFF_LOOPBACK) ifa->ifa_scope = RT_SCOPE_HOST; rv = dn_dev_insert_ifa(dn_db, ifa); if (rv) dn_dev_free_ifa(ifa); return rv; } int dn_dev_ioctl(unsigned int cmd, void __user *arg) { char buffer[DN_IFREQ_SIZE]; struct ifreq *ifr = (struct ifreq *)buffer; struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; struct dn_dev *dn_db; struct net_device *dev; struct dn_ifaddr *ifa = NULL; struct dn_ifaddr __rcu **ifap = NULL; int ret = 0; if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) return -EFAULT; ifr->ifr_name[IFNAMSIZ-1] = 0; dev_load(&init_net, ifr->ifr_name); switch (cmd) { case SIOCGIFADDR: break; case SIOCSIFADDR: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (sdn->sdn_family != AF_DECnet) return -EINVAL; break; default: return -EINVAL; } rtnl_lock(); if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { ret = -ENODEV; goto done; } if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) { for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) break; } if (ifa == NULL && cmd != SIOCSIFADDR) { ret = -EADDRNOTAVAIL; goto done; } switch (cmd) { case SIOCGIFADDR: *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; goto rarok; case SIOCSIFADDR: if (!ifa) { if ((ifa = dn_dev_alloc_ifa()) == NULL) { ret = -ENOBUFS; break; } memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); } else { if (ifa->ifa_local == dn_saddr2dn(sdn)) break; dn_dev_del_ifa(dn_db, ifap, 0); } ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); ret = dn_dev_set_ifa(dev, ifa); } done: rtnl_unlock(); return ret; rarok: if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) ret = -EFAULT; goto done; } struct net_device *dn_dev_get_default(void) { struct net_device *dev; spin_lock(&dndev_lock); dev = decnet_default_device; if (dev) { if (dev->dn_ptr) dev_hold(dev); else dev = NULL; } spin_unlock(&dndev_lock); return dev; } int dn_dev_set_default(struct net_device *dev, int force) { struct net_device *old = NULL; int rv = -EBUSY; if (!dev->dn_ptr) return -ENODEV; spin_lock(&dndev_lock); if (force || decnet_default_device == NULL) { old = decnet_default_device; decnet_default_device = dev; rv = 0; } spin_unlock(&dndev_lock); if (old) dev_put(old); return rv; } static void dn_dev_check_default(struct net_device *dev) { spin_lock(&dndev_lock); if (dev == decnet_default_device) { decnet_default_device = NULL; } else { dev = NULL; } spin_unlock(&dndev_lock); if (dev) dev_put(dev); } /* * Called with RTNL */ static struct dn_dev *dn_dev_by_index(int ifindex) { struct net_device *dev; struct dn_dev *dn_dev = NULL; dev = __dev_get_by_index(&init_net, ifindex); if (dev) dn_dev = rtnl_dereference(dev->dn_ptr); return dn_dev; } static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { [IFA_ADDRESS] = { .type = NLA_U16 }, [IFA_LOCAL] = { .type = NLA_U16 }, [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, }; static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; struct dn_ifaddr __rcu **ifap; int err = -EINVAL; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!net_eq(net, &init_net)) goto errout; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) goto errout; err = -ENODEV; ifm = nlmsg_data(nlh); if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) goto errout; err = -EADDRNOTAVAIL; for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) continue; if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue; dn_dev_del_ifa(dn_db, ifap, 1); return 0; } errout: return err; } static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; int err; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!net_eq(net, &init_net)) return -EINVAL; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) return err; if (tb[IFA_LOCAL] == NULL) return -EINVAL; ifm = nlmsg_data(nlh); if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) return -ENODEV; if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) { dn_db = dn_dev_create(dev, &err); if (!dn_db) return err; } if ((ifa = dn_dev_alloc_ifa()) == NULL) return -ENOBUFS; if (tb[IFA_ADDRESS] == NULL) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_dev = dn_db; if (tb[IFA_LABEL]) nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); err = dn_dev_insert_ifa(dn_db, ifa); if (err) dn_dev_free_ifa(ifa); return err; } static inline size_t dn_ifaddr_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(2) /* IFA_ADDRESS */ + nla_total_size(2); /* IFA_LOCAL */ } static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, u32 portid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_DECnet; ifm->ifa_prefixlen = 16; ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; if ((ifa->ifa_address && nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) || (ifa->ifa_local && nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) || (ifa->ifa_label[0] && nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) { struct sk_buff *skb; int err = -ENOBUFS; skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); if (skb == NULL) goto errout; err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err); } static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx, dn_idx = 0, skip_ndevs, skip_naddr; struct net_device *dev; struct dn_dev *dn_db; struct dn_ifaddr *ifa; if (!net_eq(net, &init_net)) return 0; skip_ndevs = cb->args[0]; skip_naddr = cb->args[1]; idx = 0; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (idx < skip_ndevs) goto cont; else if (idx > skip_ndevs) { /* Only skip over addresses for first dev dumped * in this iteration (idx == skip_ndevs) */ skip_naddr = 0; } if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL) goto cont; for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa; ifa = rcu_dereference(ifa->ifa_next), dn_idx++) { if (dn_idx < skip_naddr) continue; if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) < 0) goto done; } cont: idx++; } done: rcu_read_unlock(); cb->args[0] = idx; cb->args[1] = dn_idx; return skb->len; } static int dn_dev_get_first(struct net_device *dev, __le16 *addr) { struct dn_dev *dn_db; struct dn_ifaddr *ifa; int rv = -ENODEV; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db == NULL) goto out; ifa = rcu_dereference(dn_db->ifa_list); if (ifa != NULL) { *addr = ifa->ifa_local; rv = 0; } out: rcu_read_unlock(); return rv; } /* * Find a default address to bind to. * * This is one of those areas where the initial VMS concepts don't really * map onto the Linux concepts, and since we introduced multiple addresses * per interface we have to cope with slightly odd ways of finding out what * "our address" really is. Mostly it's not a problem; for this we just guess * a sensible default. Eventually the routing code will take care of all the * nasties for us I hope. */ int dn_dev_bind_default(__le16 *addr) { struct net_device *dev; int rv; dev = dn_dev_get_default(); last_chance: if (dev) { rv = dn_dev_get_first(dev, addr); dev_put(dev); if (rv == 0 || dev == init_net.loopback_dev) return rv; } dev = init_net.loopback_dev; dev_hold(dev); goto last_chance; } static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct endnode_hello_message *msg; struct sk_buff *skb = NULL; __le16 *pktlen; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) return; skb->dev = dev; msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg)); msg->msgflg = 0x0D; memcpy(msg->tiver, dn_eco_version, 3); dn_dn2eth(msg->id, ifa->ifa_local); msg->iinfo = DN_RT_INFO_ENDN; msg->blksize = cpu_to_le16(mtu2blksize(dev)); msg->area = 0x00; memset(msg->seed, 0, 8); memcpy(msg->neighbor, dn_hiord, ETH_ALEN); if (dn_db->router) { struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; dn_dn2eth(msg->neighbor, dn->addr); } msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3); msg->mpd = 0x00; msg->datalen = 0x02; memset(msg->data, 0xAA, 2); pktlen = (__le16 *)skb_push(skb,2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); } #define DRDELAY (5 * HZ) static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) { /* First check time since device went up */ if ((jiffies - dn_db->uptime) < DRDELAY) return 0; /* If there is no router, then yes... */ if (!dn_db->router) return 1; /* otherwise only if we have a higher priority or.. */ if (dn->priority < dn_db->parms.priority) return 1; /* if we have equal priority and a higher node number */ if (dn->priority != dn_db->parms.priority) return 0; if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local)) return 1; return 0; } static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int n; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; struct sk_buff *skb; size_t size; unsigned char *ptr; unsigned char *i1, *i2; __le16 *pktlen; char *src; if (mtu2blksize(dev) < (26 + 7)) return; n = mtu2blksize(dev) - 26; n /= 7; if (n > 32) n = 32; size = 2 + 26 + 7 * n; if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL) return; skb->dev = dev; ptr = skb_put(skb, size); *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH; *ptr++ = 2; /* ECO */ *ptr++ = 0; *ptr++ = 0; dn_dn2eth(ptr, ifa->ifa_local); src = ptr; ptr += ETH_ALEN; *ptr++ = dn_db->parms.forwarding == 1 ? DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev)); ptr += 2; *ptr++ = dn_db->parms.priority; /* Priority */ *ptr++ = 0; /* Area: Reserved */ *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3); ptr += 2; *ptr++ = 0; /* MPD: Reserved */ i1 = ptr++; memset(ptr, 0, 7); /* Name: Reserved */ ptr += 7; i2 = ptr++; n = dn_neigh_elist(dev, ptr, n); *i2 = 7 * n; *i1 = 8 + *i2; skb_trim(skb, (27 + *i2)); pktlen = (__le16 *)skb_push(skb, 2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); if (dn_am_i_a_router(dn, dn_db, ifa)) { struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); if (skb2) { dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src); } } dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dn_send_endnode_hello(dev, ifa); else dn_send_router_hello(dev, ifa); } static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int tdlen = 16; int size = dev->hard_header_len + 2 + 4 + tdlen; struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC); int i; unsigned char *ptr; char src[ETH_ALEN]; if (skb == NULL) return ; skb->dev = dev; skb_push(skb, dev->hard_header_len); ptr = skb_put(skb, 2 + 4 + tdlen); *ptr++ = DN_RT_PKT_HELO; *((__le16 *)ptr) = ifa->ifa_local; ptr += 2; *ptr++ = tdlen; for(i = 0; i < tdlen; i++) *ptr++ = 0252; dn_dn2eth(src, ifa->ifa_local); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static int dn_eth_up(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_add(dev, dn_rt_all_end_mcast); else dev_mc_add(dev, dn_rt_all_rt_mcast); dn_db->use_long = 1; return 0; } static void dn_eth_down(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_del(dev, dn_rt_all_end_mcast); else dev_mc_del(dev, dn_rt_all_rt_mcast); } static void dn_dev_set_timer(struct net_device *dev); static void dn_dev_timer_func(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct dn_dev *dn_db; struct dn_ifaddr *ifa; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db->t3 <= dn_db->parms.t2) { if (dn_db->parms.timer3) { for (ifa = rcu_dereference(dn_db->ifa_list); ifa; ifa = rcu_dereference(ifa->ifa_next)) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) dn_db->parms.timer3(dev, ifa); } } dn_db->t3 = dn_db->parms.t3; } else { dn_db->t3 -= dn_db->parms.t2; } rcu_read_unlock(); dn_dev_set_timer(dev); } static void dn_dev_set_timer(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.t2 > dn_db->parms.t3) dn_db->parms.t2 = dn_db->parms.t3; dn_db->timer.data = (unsigned long)dev; dn_db->timer.function = dn_dev_timer_func; dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); add_timer(&dn_db->timer); } static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) { int i; struct dn_dev_parms *p = dn_dev_list; struct dn_dev *dn_db; for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) { if (p->type == dev->type) break; } *err = -ENODEV; if (i == DN_DEV_LIST_SIZE) return NULL; *err = -ENOBUFS; if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) return NULL; memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); rcu_assign_pointer(dev->dn_ptr, dn_db); dn_db->dev = dev; init_timer(&dn_db->timer); dn_db->uptime = jiffies; dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); if (!dn_db->neigh_parms) { RCU_INIT_POINTER(dev->dn_ptr, NULL); kfree(dn_db); return NULL; } if (dn_db->parms.up) { if (dn_db->parms.up(dev) < 0) { neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); dev->dn_ptr = NULL; kfree(dn_db); return NULL; } } dn_dev_sysctl_register(dev, &dn_db->parms); dn_dev_set_timer(dev); *err = 0; return dn_db; } /* * This processes a device up event. We only start up * the loopback device & ethernet devices with correct * MAC addresses automatically. Others must be started * specifically. * * FIXME: How should we configure the loopback address ? If we could dispense * with using decnet_address here and for autobind, it will be one less thing * for users to worry about setting up. */ void dn_dev_up(struct net_device *dev) { struct dn_ifaddr *ifa; __le16 addr = decnet_address; int maybe_default = 0; struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) return; /* * Need to ensure that loopback device has a dn_db attached to it * to allow creation of neighbours against it, even though it might * not have a local address of its own. Might as well do the same for * all autoconfigured interfaces. */ if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return; } if (dev->type == ARPHRD_ETHER) { if (memcmp(dev->dev_addr, dn_hiord, 4) != 0) return; addr = dn_eth2dn(dev->dev_addr); maybe_default = 1; } if (addr == 0) return; if ((ifa = dn_dev_alloc_ifa()) == NULL) return; ifa->ifa_local = ifa->ifa_address = addr; ifa->ifa_flags = 0; ifa->ifa_scope = RT_SCOPE_UNIVERSE; strcpy(ifa->ifa_label, dev->name); dn_dev_set_ifa(dev, ifa); /* * Automagically set the default device to the first automatically * configured ethernet card in the system. */ if (maybe_default) { dev_hold(dev); if (dn_dev_set_default(dev, 0)) dev_put(dev); } } static void dn_dev_delete(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if (dn_db == NULL) return; del_timer_sync(&dn_db->timer); dn_dev_sysctl_unregister(&dn_db->parms); dn_dev_check_default(dev); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->parms.down) dn_db->parms.down(dev); dev->dn_ptr = NULL; neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->router) neigh_release(dn_db->router); if (dn_db->peer) neigh_release(dn_db->peer); kfree(dn_db); } void dn_dev_down(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); struct dn_ifaddr *ifa; if (dn_db == NULL) return; while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) { dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); dn_dev_free_ifa(ifa); } dn_dev_delete(dev); } void dn_dev_init_pkt(struct sk_buff *skb) { } void dn_dev_veri_pkt(struct sk_buff *skb) { } void dn_dev_hello(struct sk_buff *skb) { } void dn_dev_devices_off(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) dn_dev_down(dev); rtnl_unlock(); } void dn_dev_devices_on(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) { if (dev->flags & IFF_UP) dn_dev_up(dev); } rtnl_unlock(); } int register_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&dnaddr_chain, nb); } int unregister_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&dnaddr_chain, nb); } #ifdef CONFIG_PROC_FS static inline int is_dn_dev(struct net_device *dev) { return dev->dn_ptr != NULL; } static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { int i; struct net_device *dev; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; i = 1; for_each_netdev_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; if (i++ == *pos) return dev; } return NULL; } static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; ++*pos; dev = v; if (v == SEQ_START_TOKEN) dev = net_device_entry(&init_net.dev_base_head); for_each_netdev_continue_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; return dev; } return NULL; } static void dn_dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static char *dn_type2asc(char type) { switch (type) { case DN_DEV_BCAST: return "B"; case DN_DEV_UCAST: return "U"; case DN_DEV_MPOINT: return "M"; } return "?"; } static int dn_dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); else { struct net_device *dev = v; char peer_buf[DN_ASCBUF_LEN]; char router_buf[DN_ASCBUF_LEN]; struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr); seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" " %04hu %03d %02x %-10s %-7s %-7s\n", dev->name ? dev->name : "???", dn_type2asc(dn_db->parms.mode), 0, 0, dn_db->t3, dn_db->parms.t3, mtu2blksize(dev), dn_db->parms.priority, dn_db->parms.state, dn_db->parms.name, dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "", dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : ""); } return 0; } static const struct seq_operations dn_dev_seq_ops = { .start = dn_dev_seq_start, .next = dn_dev_seq_next, .stop = dn_dev_seq_stop, .show = dn_dev_seq_show, }; static int dn_dev_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &dn_dev_seq_ops); } static const struct file_operations dn_dev_seq_fops = { .owner = THIS_MODULE, .open = dn_dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static int addr[2]; module_param_array(addr, int, NULL, 0444); MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); void __init dn_dev_init(void) { if (addr[0] > 63 || addr[0] < 0) { printk(KERN_ERR "DECnet: Area must be between 0 and 63"); return; } if (addr[1] > 1023 || addr[1] < 0) { printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); return; } decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]); dn_dev_devices_on(); rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL); rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops); #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_register(NULL, &dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ } void __exit dn_dev_cleanup(void) { #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_unregister(&dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ remove_proc_entry("decnet_dev", init_net.proc_net); dn_dev_devices_off(); }
gpl-2.0
groeck/linux
tools/perf/ui/gtk/util.c
1883
2347
#include "../util.h" #include "../../util/debug.h" #include "gtk.h" #include <string.h> struct perf_gtk_context *pgctx; struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window) { struct perf_gtk_context *ctx; ctx = malloc(sizeof(*pgctx)); if (ctx) ctx->main_window = window; return ctx; } int perf_gtk__deactivate_context(struct perf_gtk_context **ctx) { if (!perf_gtk__is_active_context(*ctx)) return -1; zfree(ctx); return 0; } static int perf_gtk__error(const char *format, va_list args) { char *msg; GtkWidget *dialog; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Error:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window), GTK_DIALOG_DESTROY_WITH_PARENT, GTK_MESSAGE_ERROR, GTK_BUTTONS_CLOSE, "<b>Error</b>\n\n%s", msg); gtk_dialog_run(GTK_DIALOG(dialog)); gtk_widget_destroy(dialog); free(msg); return 0; } #ifdef HAVE_GTK_INFO_BAR_SUPPORT static int perf_gtk__warning_info_bar(const char *format, va_list args) { char *msg; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Warning:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg); gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar), GTK_MESSAGE_WARNING); gtk_widget_show(pgctx->info_bar); free(msg); return 0; } #else static int perf_gtk__warning_statusbar(const char *format, va_list args) { char *msg, *p; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Warning:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar), pgctx->statbar_ctx_id); /* Only first line can be displayed */ p = strchr(msg, '\n'); if (p) *p = '\0'; gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar), pgctx->statbar_ctx_id, msg); free(msg); return 0; } #endif struct perf_error_ops perf_gtk_eops = { .error = perf_gtk__error, #ifdef HAVE_GTK_INFO_BAR_SUPPORT .warning = perf_gtk__warning_info_bar, #else .warning = perf_gtk__warning_statusbar, #endif };
gpl-2.0
SaberMod/Linux-stable
tools/perf/ui/gtk/util.c
1883
2347
#include "../util.h" #include "../../util/debug.h" #include "gtk.h" #include <string.h> struct perf_gtk_context *pgctx; struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window) { struct perf_gtk_context *ctx; ctx = malloc(sizeof(*pgctx)); if (ctx) ctx->main_window = window; return ctx; } int perf_gtk__deactivate_context(struct perf_gtk_context **ctx) { if (!perf_gtk__is_active_context(*ctx)) return -1; zfree(ctx); return 0; } static int perf_gtk__error(const char *format, va_list args) { char *msg; GtkWidget *dialog; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Error:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window), GTK_DIALOG_DESTROY_WITH_PARENT, GTK_MESSAGE_ERROR, GTK_BUTTONS_CLOSE, "<b>Error</b>\n\n%s", msg); gtk_dialog_run(GTK_DIALOG(dialog)); gtk_widget_destroy(dialog); free(msg); return 0; } #ifdef HAVE_GTK_INFO_BAR_SUPPORT static int perf_gtk__warning_info_bar(const char *format, va_list args) { char *msg; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Warning:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg); gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar), GTK_MESSAGE_WARNING); gtk_widget_show(pgctx->info_bar); free(msg); return 0; } #else static int perf_gtk__warning_statusbar(const char *format, va_list args) { char *msg, *p; if (!perf_gtk__is_active_context(pgctx) || vasprintf(&msg, format, args) < 0) { fprintf(stderr, "Warning:\n"); vfprintf(stderr, format, args); fprintf(stderr, "\n"); return -1; } gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar), pgctx->statbar_ctx_id); /* Only first line can be displayed */ p = strchr(msg, '\n'); if (p) *p = '\0'; gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar), pgctx->statbar_ctx_id, msg); free(msg); return 0; } #endif struct perf_error_ops perf_gtk_eops = { .error = perf_gtk__error, #ifdef HAVE_GTK_INFO_BAR_SUPPORT .warning = perf_gtk__warning_info_bar, #else .warning = perf_gtk__warning_statusbar, #endif };
gpl-2.0
bilalliberty/depricated-kernel-villec2--3.4-
net/ceph/auth_x.c
2139
17331
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "crypto.h" #include "auth_x.h" #include "auth_x_protocol.h" #define TEMP_TICKET_BUF_LEN 256 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return (ac->want_keys & xi->have_keys) == ac->want_keys; } static int ceph_x_should_authenticate(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return need != 0; } static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + sizeof(u32); } static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen) { struct ceph_x_encrypt_header head = { .struct_v = 1, .magic = cpu_to_le64(CEPHX_ENC_MAGIC) }; size_t len = olen - sizeof(u32); int ret; ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, &head, sizeof(head), ibuf, ilen); if (ret) return ret; ceph_encode_32(&obuf, len); return len + sizeof(u32); } static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void *obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; } /* * get existing (or insert new) ticket handler */ static struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service) { struct ceph_x_ticket_handler *th; struct ceph_x_info *xi = ac->private; struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; while (*p) { parent = *p; th = rb_entry(parent, struct ceph_x_ticket_handler, node); if (service < th->service) p = &(*p)->rb_left; else if (service > th->service) p = &(*p)->rb_right; else return th; } /* add it */ th = kzalloc(sizeof(*th), GFP_NOFS); if (!th) return ERR_PTR(-ENOMEM); th->service = service; rb_link_node(&th->node, parent, p); rb_insert_color(&th->node, &xi->ticket_handlers); return th; } static void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th) { struct ceph_x_info *xi = ac->private; dout("remove_ticket_handler %p %d\n", th, th->service); rb_erase(&th->node, &xi->ticket_handlers); ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); kfree(th); } static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int num; void *p = buf; int ret; char *dbuf; char *ticket_buf; u8 reply_struct_v; dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!dbuf) return -ENOMEM; ret = -ENOMEM; ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!ticket_buf) goto out_dbuf; ceph_decode_need(&p, end, 1 + sizeof(u32), bad); reply_struct_v = ceph_decode_8(&p); if (reply_struct_v != 1) goto bad; num = ceph_decode_32(&p); dout("%d tickets\n", num); while (num--) { int type; u8 tkt_struct_v, blob_struct_v; struct ceph_x_ticket_handler *th; void *dp, *dend; int dlen; char is_enc; struct timespec validity; struct ceph_crypto_key old_key; void *tp, *tpend; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; unsigned long new_expires, new_renew_after; u64 new_secret_id; ceph_decode_need(&p, end, sizeof(u32) + 1, bad); type = ceph_decode_32(&p); dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); tkt_struct_v = ceph_decode_8(&p); if (tkt_struct_v != 1) goto bad; th = get_ticket_handler(ac, type); if (IS_ERR(th)) { ret = PTR_ERR(th); goto out; } /* blob for me */ dlen = ceph_x_decrypt(secret, &p, end, dbuf, TEMP_TICKET_BUF_LEN); if (dlen <= 0) { ret = dlen; goto out; } dout(" decrypted %d bytes\n", dlen); dend = dbuf + dlen; dp = dbuf; tkt_struct_v = ceph_decode_8(&dp); if (tkt_struct_v != 1) goto bad; memcpy(&old_key, &th->session_key, sizeof(old_key)); ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); if (ret) goto out; ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ceph_decode_timespec(&validity, &new_validity); new_expires = get_seconds() + validity.tv_sec; new_renew_after = new_expires - (validity.tv_sec / 4); dout(" expires=%lu renew_after=%lu\n", new_expires, new_renew_after); /* ticket blob for service */ ceph_decode_8_safe(&p, end, is_enc, bad); tp = ticket_buf; if (is_enc) { /* encrypted */ dout(" encrypted ticket\n"); dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, TEMP_TICKET_BUF_LEN); if (dlen < 0) { ret = dlen; goto out; } dlen = ceph_decode_32(&tp); } else { /* unencrypted */ ceph_decode_32_safe(&p, end, dlen, bad); ceph_decode_need(&p, end, dlen, bad); ceph_decode_copy(&p, ticket_buf, dlen); } tpend = tp + dlen; dout(" ticket blob is %d bytes\n", dlen); ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); blob_struct_v = ceph_decode_8(&tp); new_secret_id = ceph_decode_64(&tp); ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); if (ret) goto out; /* all is well, update our ticket */ ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); th->session_key = new_session_key; th->ticket_blob = new_ticket_blob; th->validity = new_validity; th->secret_id = new_secret_id; th->expires = new_expires; th->renew_after = new_renew_after; dout(" got ticket service %d (%s) secret_id %lld len %d\n", type, ceph_entity_type_name(type), th->secret_id, (int)th->ticket_blob->vec.iov_len); xi->have_keys |= th->service; } ret = 0; out: kfree(ticket_buf); out_dbuf: kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) { int maxlen; struct ceph_x_authorize_a *msg_a; struct ceph_x_authorize_b msg_b; void *p, *end; int ret; int ticket_blob_len = (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); dout("build_authorizer for %s %p\n", ceph_entity_type_name(th->service), au); maxlen = sizeof(*msg_a) + sizeof(msg_b) + ceph_x_encrypt_buflen(ticket_blob_len); dout(" need len %d\n", maxlen); if (au->buf && au->buf->alloc_len < maxlen) { ceph_buffer_put(au->buf); au->buf = NULL; } if (!au->buf) { au->buf = ceph_buffer_new(maxlen, GFP_NOFS); if (!au->buf) return -ENOMEM; } au->service = th->service; au->secret_id = th->secret_id; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; msg_a->global_id = cpu_to_le64(ac->global_id); msg_a->service_id = cpu_to_le32(th->service); msg_a->ticket_blob.struct_v = 1; msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); if (ticket_blob_len) { memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, th->ticket_blob->vec.iov_len); } dout(" th %p secret_id %lld %lld\n", th, th->secret_id, le64_to_cpu(msg_a->ticket_blob.secret_id)); p = msg_a + 1; p += ticket_blob_len; end = au->buf->vec.iov_base + au->buf->vec.iov_len; get_random_bytes(&au->nonce, sizeof(au->nonce)); msg_b.struct_v = 1; msg_b.nonce = cpu_to_le64(au->nonce); ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), p, end - p); if (ret < 0) goto out_buf; p += ret; au->buf->vec.iov_len = p - au->buf->vec.iov_base; dout(" built authorizer nonce %llx len %d\n", au->nonce, (int)au->buf->vec.iov_len); BUG_ON(au->buf->vec.iov_len > maxlen); return 0; out_buf: ceph_buffer_put(au->buf); au->buf = NULL; return ret; } static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end) { ceph_decode_need(p, end, 1 + sizeof(u64), bad); ceph_encode_8(p, 1); ceph_encode_64(p, th->secret_id); if (th->ticket_blob) { const char *buf = th->ticket_blob->vec.iov_base; u32 len = th->ticket_blob->vec.iov_len; ceph_encode_32_safe(p, end, len, bad); ceph_encode_copy_safe(p, end, buf, len, bad); } else { ceph_encode_32_safe(p, end, 0, bad); } return 0; bad: return -ERANGE; } static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) { int want = ac->want_keys; struct ceph_x_info *xi = ac->private; int service; *pneed = ac->want_keys & ~(xi->have_keys); for (service = 1; service <= want; service <<= 1) { struct ceph_x_ticket_handler *th; if (!(ac->want_keys & service)) continue; if (*pneed & service) continue; th = get_ticket_handler(ac, service); if (IS_ERR(th)) { *pneed |= service; continue; } if (get_seconds() >= th->renew_after) *pneed |= service; if (get_seconds() >= th->expires) xi->have_keys &= ~service; } } static int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int need; struct ceph_x_request_header *head = buf; int ret; struct ceph_x_ticket_handler *th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ceph_x_validate_tickets(ac, &need); dout("build_request want %x have %x need %x\n", ac->want_keys, xi->have_keys, need); if (need & CEPH_ENTITY_TYPE_AUTH) { struct ceph_x_authenticate *auth = (void *)(head + 1); void *p = auth + 1; struct ceph_x_challenge_blob tmp; char tmp_enc[40]; u64 *u; if (p > end) return -ERANGE; dout(" get_auth_session_key\n"); head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); /* encrypt and hash */ get_random_bytes(&auth->client_challenge, sizeof(u64)); tmp.client_challenge = auth->client_challenge; tmp.server_challenge = cpu_to_le64(xi->server_challenge); ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; auth->struct_v = 1; auth->key = 0; for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) auth->key ^= *(__le64 *)u; dout(" server_challenge %llx client_challenge %llx key %llx\n", xi->server_challenge, le64_to_cpu(auth->client_challenge), le64_to_cpu(auth->key)); /* now encode the old ticket if exists */ ret = ceph_x_encode_ticket(th, &p, end); if (ret < 0) return ret; return p - buf; } if (need) { void *p = head + 1; struct ceph_x_service_ticket_request *req; if (p > end) return -ERANGE; head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); if (ret) return ret; ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, xi->auth_authorizer.buf->vec.iov_len); req = p; req->keys = cpu_to_le32(need); p += sizeof(*req); return p - buf; } return 0; } static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_x_info *xi = ac->private; struct ceph_x_reply_header *head = buf; struct ceph_x_ticket_handler *th; int len = end - buf; int op; int ret; if (result) return result; /* XXX hmm? */ if (xi->starting) { /* it's a hello */ struct ceph_x_server_challenge *sc = buf; if (len != sizeof(*sc)) return -EINVAL; xi->server_challenge = le64_to_cpu(sc->server_challenge); dout("handle_reply got server challenge %llx\n", xi->server_challenge); xi->starting = false; xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; return -EAGAIN; } op = le16_to_cpu(head->op); result = le32_to_cpu(head->result); dout("handle_reply op %d result %d\n", op, result); switch (op) { case CEPHX_GET_AUTH_SESSION_KEY: /* verify auth key */ ret = ceph_x_proc_ticket_reply(ac, &xi->secret, buf + sizeof(*head), end); break; case CEPHX_GET_PRINCIPAL_SESSION_KEY: th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_proc_ticket_reply(ac, &th->session_key, buf + sizeof(*head), end); break; default: return -EINVAL; } if (ret) return ret; if (ac->want_keys == xi->have_keys) return 0; return -EAGAIN; } static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = kzalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); return ret; } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf->vec.iov_base; auth->authorizer_buf_len = au->buf->vec.iov_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; } static int ceph_x_update_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = (struct ceph_x_authorizer *)auth->authorizer; if (au->secret_id < th->secret_id) { dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", au->service, au->secret_id, th->secret_id); return ceph_x_build_authorizer(ac, th, au); } return 0; } static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; } static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { struct ceph_x_authorizer *au = (void *)a; ceph_buffer_put(au->buf); kfree(au); } static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; dout("reset\n"); xi->starting = true; xi->server_challenge = 0; } static void ceph_x_destroy(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; struct rb_node *p; dout("ceph_x_destroy %p\n", ac); ceph_crypto_key_destroy(&xi->secret); while ((p = rb_first(&xi->ticket_handlers)) != NULL) { struct ceph_x_ticket_handler *th = rb_entry(p, struct ceph_x_ticket_handler, node); remove_ticket_handler(ac, th); } if (xi->auth_authorizer.buf) ceph_buffer_put(xi->auth_authorizer.buf); kfree(ac->private); ac->private = NULL; } static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) memset(&th->validity, 0, sizeof(th->validity)); } static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, }; int ceph_x_init(struct ceph_auth_client *ac) { struct ceph_x_info *xi; int ret; dout("ceph_x_init %p\n", ac); ret = -ENOMEM; xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) goto out; ret = -EINVAL; if (!ac->key) { pr_err("no secret set (for auth_x protocol)\n"); goto out_nomem; } ret = ceph_crypto_key_clone(&xi->secret, ac->key); if (ret < 0) { pr_err("cannot clone key: %d\n", ret); goto out_nomem; } xi->starting = true; xi->ticket_handlers = RB_ROOT; ac->protocol = CEPH_AUTH_CEPHX; ac->private = xi; ac->ops = &ceph_x_ops; return 0; out_nomem: kfree(xi); out: return ret; }
gpl-2.0
dinh-linux/linux-socfpga
drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
2395
3494
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/software.h> #include <engine/disp.h> #include <core/class.h> #include "nv50.h" static struct nouveau_oclass nva3_disp_sclass[] = { { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, {} }; struct nouveau_omthds nva3_disp_base_omthds[] = { { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, {}, }; static struct nouveau_oclass nva3_disp_base_oclass[] = { { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, {} }; static int nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int ret; ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", "display", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nva3_disp_base_oclass; nv_engine(priv)->cclass = &nv50_disp_cclass; nv_subdev(priv)->intr = nv50_disp_intr; INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); priv->sclass = nva3_disp_sclass; priv->head.nr = 2; priv->dac.nr = 3; priv->sor.nr = 4; priv->pior.nr = 3; priv->dac.power = nv50_dac_power; priv->dac.sense = nv50_dac_sense; priv->sor.power = nv50_sor_power; priv->sor.hda_eld = nva3_hda_eld; priv->sor.hdmi = nva3_hdmi_ctrl; priv->sor.dp = &nv94_sor_dp_func; priv->pior.power = nv50_pior_power; priv->pior.dp = &nv50_pior_dp_func; return 0; } struct nouveau_oclass nva3_disp_oclass = { .handle = NV_ENGINE(DISP, 0x85), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nva3_disp_ctor, .dtor = _nouveau_disp_dtor, .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, };
gpl-2.0
jetonbacaj/SomeKernel_920P_OL1
drivers/staging/iio/light/tsl2583.c
2395
25713
/* * Device driver for monitoring ambient light intensity (lux) * within the TAOS tsl258x family of devices (tsl2580, tsl2581). * * Copyright (c) 2011, TAOS Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/iio/iio.h> #define TSL258X_MAX_DEVICE_REGS 32 /* Triton register offsets */ #define TSL258X_REG_MAX 8 /* Device Registers and Masks */ #define TSL258X_CNTRL 0x00 #define TSL258X_ALS_TIME 0X01 #define TSL258X_INTERRUPT 0x02 #define TSL258X_GAIN 0x07 #define TSL258X_REVID 0x11 #define TSL258X_CHIPID 0x12 #define TSL258X_ALS_CHAN0LO 0x14 #define TSL258X_ALS_CHAN0HI 0x15 #define TSL258X_ALS_CHAN1LO 0x16 #define TSL258X_ALS_CHAN1HI 0x17 #define TSL258X_TMR_LO 0x18 #define TSL258X_TMR_HI 0x19 /* tsl2583 cmd reg masks */ #define TSL258X_CMD_REG 0x80 #define TSL258X_CMD_SPL_FN 0x60 #define TSL258X_CMD_ALS_INT_CLR 0X01 /* tsl2583 cntrl reg masks */ #define TSL258X_CNTL_ADC_ENBL 0x02 #define TSL258X_CNTL_PWR_ON 0x01 /* tsl2583 status reg masks */ #define TSL258X_STA_ADC_VALID 0x01 #define TSL258X_STA_ADC_INTR 0x10 /* Lux calculation constants */ #define TSL258X_LUX_CALC_OVER_FLOW 65535 enum { TSL258X_CHIP_UNKNOWN = 0, TSL258X_CHIP_WORKING = 1, TSL258X_CHIP_SUSPENDED = 2 }; /* Per-device data */ struct taos_als_info { u16 als_ch0; u16 als_ch1; u16 lux; }; struct taos_settings { int als_time; int als_gain; int als_gain_trim; int als_cal_target; }; struct tsl2583_chip { struct mutex als_mutex; struct i2c_client *client; struct taos_als_info als_cur_info; struct taos_settings taos_settings; int als_time_scale; int als_saturation; int taos_chip_status; u8 taos_config[8]; }; /* * Initial values for device - this values can/will be changed by driver. * and applications as needed. * These values are dynamic. */ static const u8 taos_config[8] = { 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00 }; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */ struct taos_lux { unsigned int ratio; unsigned int ch0; unsigned int ch1; }; /* This structure is intentionally large to accommodate updates via sysfs. */ /* Sized to 11 = max 10 segments + 1 termination segment */ /* Assumption is one and only one type of glass used */ static struct taos_lux taos_device_lux[11] = { { 9830, 8520, 15729 }, { 12452, 10807, 23344 }, { 14746, 6383, 11705 }, { 17695, 4063, 6554 }, }; struct gainadj { s16 ch0; s16 ch1; }; /* Index = (0 - 3) Used to validate the gain selection index */ static const struct gainadj gainadj[] = { { 1, 1 }, { 8, 8 }, { 16, 16 }, { 107, 115 } }; /* * Provides initial operational parameter defaults. * These defaults may be changed through the device's sysfs files. */ static void taos_defaults(struct tsl2583_chip *chip) { /* Operational parameters */ chip->taos_settings.als_time = 100; /* must be a multiple of 50mS */ chip->taos_settings.als_gain = 0; /* this is actually an index into the gain table */ /* assume clear glass as default */ chip->taos_settings.als_gain_trim = 1000; /* default gain trim to account for aperture effects */ chip->taos_settings.als_cal_target = 130; /* Known external ALS reading used for calibration */ } /* * Read a number of bytes starting at register (reg) location. * Return 0, or i2c_smbus_write_byte ERROR code. */ static int taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len) { int i, ret; for (i = 0; i < len; i++) { /* select register to write */ ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg)); if (ret < 0) { dev_err(&client->dev, "taos_i2c_read failed to write" " register %x\n", reg); return ret; } /* read the data */ *val = i2c_smbus_read_byte(client); val++; reg++; } return 0; } /* * Reads and calculates current lux value. * The raw ch0 and ch1 values of the ambient light sensed in the last * integration cycle are read from the device. * Time scale factor array values are adjusted based on the integration time. * The raw values are multiplied by a scale factor, and device gain is obtained * using gain index. Limit checks are done next, then the ratio of a multiple * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[] * declared above is then scanned to find the first ratio value that is just * above the ratio we just calculated. The ch0 and ch1 multiplier constants in * the array are then used along with the time scale factor array values, to * calculate the lux. */ static int taos_get_lux(struct iio_dev *indio_dev) { u16 ch0, ch1; /* separated ch0/ch1 data from device */ u32 lux; /* raw lux calculated from device data */ u64 lux64; u32 ratio; u8 buf[5]; struct taos_lux *p; struct tsl2583_chip *chip = iio_priv(indio_dev); int i, ret; u32 ch0lux = 0; u32 ch1lux = 0; if (mutex_trylock(&chip->als_mutex) == 0) { dev_info(&chip->client->dev, "taos_get_lux device is busy\n"); return chip->als_cur_info.lux; /* busy, so return LAST VALUE */ } if (chip->taos_chip_status != TSL258X_CHIP_WORKING) { /* device is not enabled */ dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n"); ret = -EBUSY ; goto out_unlock; } ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1); if (ret < 0) { dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n"); goto out_unlock; } /* is data new & valid */ if (!(buf[0] & TSL258X_STA_ADC_INTR)) { dev_err(&chip->client->dev, "taos_get_lux data not valid\n"); ret = chip->als_cur_info.lux; /* return LAST VALUE */ goto out_unlock; } for (i = 0; i < 4; i++) { int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i); ret = taos_i2c_read(chip->client, reg, &buf[i], 1); if (ret < 0) { dev_err(&chip->client->dev, "taos_get_lux failed to read" " register %x\n", reg); goto out_unlock; } } /* clear status, really interrupt status (interrupts are off), but * we use the bit anyway - don't forget 0x80 - this is a command*/ ret = i2c_smbus_write_byte(chip->client, (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN | TSL258X_CMD_ALS_INT_CLR)); if (ret < 0) { dev_err(&chip->client->dev, "taos_i2c_write_command failed in taos_get_lux, err = %d\n", ret); goto out_unlock; /* have no data, so return failure */ } /* extract ALS/lux data */ ch0 = le16_to_cpup((const __le16 *)&buf[0]); ch1 = le16_to_cpup((const __le16 *)&buf[2]); chip->als_cur_info.als_ch0 = ch0; chip->als_cur_info.als_ch1 = ch1; if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) goto return_max; if (ch0 == 0) { /* have no data, so return LAST VALUE */ ret = chip->als_cur_info.lux = 0; goto out_unlock; } /* calculate ratio */ ratio = (ch1 << 15) / ch0; /* convert to unscaled lux using the pointer to the table */ for (p = (struct taos_lux *) taos_device_lux; p->ratio != 0 && p->ratio < ratio; p++) ; if (p->ratio == 0) { lux = 0; } else { ch0lux = ((ch0 * p->ch0) + (gainadj[chip->taos_settings.als_gain].ch0 >> 1)) / gainadj[chip->taos_settings.als_gain].ch0; ch1lux = ((ch1 * p->ch1) + (gainadj[chip->taos_settings.als_gain].ch1 >> 1)) / gainadj[chip->taos_settings.als_gain].ch1; lux = ch0lux - ch1lux; } /* note: lux is 31 bit max at this point */ if (ch1lux > ch0lux) { dev_dbg(&chip->client->dev, "No Data - Return last value\n"); ret = chip->als_cur_info.lux = 0; goto out_unlock; } /* adjust for active time scale */ if (chip->als_time_scale == 0) lux = 0; else lux = (lux + (chip->als_time_scale >> 1)) / chip->als_time_scale; /* Adjust for active gain scale. * The taos_device_lux tables above have a factor of 8192 built in, * so we need to shift right. * User-specified gain provides a multiplier. * Apply user-specified gain before shifting right to retain precision. * Use 64 bits to avoid overflow on multiplication. * Then go back to 32 bits before division to avoid using div_u64(). */ lux64 = lux; lux64 = lux64 * chip->taos_settings.als_gain_trim; lux64 >>= 13; lux = lux64; lux = (lux + 500) / 1000; if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */ return_max: lux = TSL258X_LUX_CALC_OVER_FLOW; } /* Update the structure with the latest VALID lux. */ chip->als_cur_info.lux = lux; ret = lux; out_unlock: mutex_unlock(&chip->als_mutex); return ret; } /* * Obtain single reading and calculate the als_gain_trim (later used * to derive actual lux). * Return updated gain_trim value. */ static int taos_als_calibrate(struct iio_dev *indio_dev) { struct tsl2583_chip *chip = iio_priv(indio_dev); u8 reg_val; unsigned int gain_trim_val; int ret; int lux_val; ret = i2c_smbus_write_byte(chip->client, (TSL258X_CMD_REG | TSL258X_CNTRL)); if (ret < 0) { dev_err(&chip->client->dev, "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n", ret); return ret; } reg_val = i2c_smbus_read_byte(chip->client); if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) { dev_err(&chip->client->dev, "taos_als_calibrate failed: device not powered on with ADC enabled\n"); return -1; } ret = i2c_smbus_write_byte(chip->client, (TSL258X_CMD_REG | TSL258X_CNTRL)); if (ret < 0) { dev_err(&chip->client->dev, "taos_als_calibrate failed to reach the STATUS register, ret=%d\n", ret); return ret; } reg_val = i2c_smbus_read_byte(chip->client); if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) { dev_err(&chip->client->dev, "taos_als_calibrate failed: STATUS - ADC not valid.\n"); return -ENODATA; } lux_val = taos_get_lux(indio_dev); if (lux_val < 0) { dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n"); return lux_val; } gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target) * chip->taos_settings.als_gain_trim) / lux_val); if ((gain_trim_val < 250) || (gain_trim_val > 4000)) { dev_err(&chip->client->dev, "taos_als_calibrate failed: trim_val of %d is out of range\n", gain_trim_val); return -ENODATA; } chip->taos_settings.als_gain_trim = (int) gain_trim_val; return (int) gain_trim_val; } /* * Turn the device on. * Configuration must be set before calling this function. */ static int taos_chip_on(struct iio_dev *indio_dev) { int i; int ret; u8 *uP; u8 utmp; int als_count; int als_time; struct tsl2583_chip *chip = iio_priv(indio_dev); /* and make sure we're not already on */ if (chip->taos_chip_status == TSL258X_CHIP_WORKING) { /* if forcing a register update - turn off, then on */ dev_info(&chip->client->dev, "device is already enabled\n"); return -EINVAL; } /* determine als integration register */ als_count = (chip->taos_settings.als_time * 100 + 135) / 270; if (als_count == 0) als_count = 1; /* ensure at least one cycle */ /* convert back to time (encompasses overrides) */ als_time = (als_count * 27 + 5) / 10; chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count; /* Set the gain based on taos_settings struct */ chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain; /* set chip struct re scaling and saturation */ chip->als_saturation = als_count * 922; /* 90% of full scale */ chip->als_time_scale = (als_time + 25) / 50; /* TSL258x Specific power-on / adc enable sequence * Power on the device 1st. */ utmp = TSL258X_CNTL_PWR_ON; ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG | TSL258X_CNTRL, utmp); if (ret < 0) { dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n"); return -1; } /* Use the following shadow copy for our delay before enabling ADC. * Write all the registers. */ for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) { ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG + i, *uP++); if (ret < 0) { dev_err(&chip->client->dev, "taos_chip_on failed on reg %d.\n", i); return -1; } } msleep(3); /* NOW enable the ADC * initialize the desired mode of operation */ utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL; ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG | TSL258X_CNTRL, utmp); if (ret < 0) { dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n"); return -1; } chip->taos_chip_status = TSL258X_CHIP_WORKING; return ret; } static int taos_chip_off(struct iio_dev *indio_dev) { struct tsl2583_chip *chip = iio_priv(indio_dev); int ret; /* turn device off */ chip->taos_chip_status = TSL258X_CHIP_SUSPENDED; ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG | TSL258X_CNTRL, 0x00); return ret; } /* Sysfs Interface Functions */ static ssize_t taos_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", chip->taos_chip_status); } static ssize_t taos_power_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; if (value == 0) taos_chip_off(indio_dev); else taos_chip_on(indio_dev); return len; } static ssize_t taos_gain_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); char gain[4] = {0}; switch (chip->taos_settings.als_gain) { case 0: strcpy(gain, "001"); break; case 1: strcpy(gain, "008"); break; case 2: strcpy(gain, "016"); break; case 3: strcpy(gain, "111"); break; } return sprintf(buf, "%s\n", gain); } static ssize_t taos_gain_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; switch (value) { case 1: chip->taos_settings.als_gain = 0; break; case 8: chip->taos_settings.als_gain = 1; break; case 16: chip->taos_settings.als_gain = 2; break; case 111: chip->taos_settings.als_gain = 3; break; default: dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n"); return -1; } return len; } static ssize_t taos_gain_available_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", "1 8 16 111"); } static ssize_t taos_als_time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", chip->taos_settings.als_time); } static ssize_t taos_als_time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; if ((value < 50) || (value > 650)) return -EINVAL; if (value % 50) return -EINVAL; chip->taos_settings.als_time = value; return len; } static ssize_t taos_als_time_available_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", "50 100 150 200 250 300 350 400 450 500 550 600 650"); } static ssize_t taos_als_trim_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim); } static ssize_t taos_als_trim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; if (value) chip->taos_settings.als_gain_trim = value; return len; } static ssize_t taos_als_cal_target_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target); } static ssize_t taos_als_cal_target_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; if (value) chip->taos_settings.als_cal_target = value; return len; } static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; ret = taos_get_lux(dev_to_iio_dev(dev)); if (ret < 0) return ret; return sprintf(buf, "%d\n", ret); } static ssize_t taos_do_calibrate(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); unsigned long value; if (strict_strtoul(buf, 0, &value)) return -EINVAL; if (value == 1) taos_als_calibrate(indio_dev); return len; } static ssize_t taos_luxtable_show(struct device *dev, struct device_attribute *attr, char *buf) { int i; int offset = 0; for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) { offset += sprintf(buf + offset, "%d,%d,%d,", taos_device_lux[i].ratio, taos_device_lux[i].ch0, taos_device_lux[i].ch1); if (taos_device_lux[i].ratio == 0) { /* We just printed the first "0" entry. * Now get rid of the extra "," and break. */ offset--; break; } } offset += sprintf(buf + offset, "\n"); return offset; } static ssize_t taos_luxtable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); int value[ARRAY_SIZE(taos_device_lux)*3 + 1]; int n; get_options(buf, ARRAY_SIZE(value), value); /* We now have an array of ints starting at value[1], and * enumerated by value[0]. * We expect each group of three ints is one table entry, * and the last table entry is all 0. */ n = value[0]; if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) { dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n); return -EINVAL; } if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) { dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n); return -EINVAL; } if (chip->taos_chip_status == TSL258X_CHIP_WORKING) taos_chip_off(indio_dev); /* Zero out the table */ memset(taos_device_lux, 0, sizeof(taos_device_lux)); memcpy(taos_device_lux, &value[1], (value[0] * 4)); taos_chip_on(indio_dev); return len; } static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, taos_power_state_show, taos_power_state_store); static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR, taos_gain_show, taos_gain_store); static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO, taos_gain_available_show, NULL); static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR, taos_als_time_show, taos_als_time_store); static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO, taos_als_time_available_show, NULL); static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR, taos_als_trim_show, taos_als_trim_store); static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR, taos_als_cal_target_show, taos_als_cal_target_store); static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL); static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate); static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR, taos_luxtable_show, taos_luxtable_store); static struct attribute *sysfs_attrs_ctrl[] = { &dev_attr_power_state.attr, &dev_attr_illuminance0_calibscale.attr, /* Gain */ &dev_attr_illuminance0_calibscale_available.attr, &dev_attr_illuminance0_integration_time.attr, /* I time*/ &dev_attr_illuminance0_integration_time_available.attr, &dev_attr_illuminance0_calibbias.attr, /* trim */ &dev_attr_illuminance0_input_target.attr, &dev_attr_illuminance0_input.attr, &dev_attr_illuminance0_calibrate.attr, &dev_attr_illuminance0_lux_table.attr, NULL }; static struct attribute_group tsl2583_attribute_group = { .attrs = sysfs_attrs_ctrl, }; /* Use the default register values to identify the Taos device */ static int taos_tsl258x_device(unsigned char *bufp) { return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90); } static const struct iio_info tsl2583_info = { .attrs = &tsl2583_attribute_group, .driver_module = THIS_MODULE, }; /* * Client probe function - When a valid device is found, the driver's device * data structure is updated, and initialization completes successfully. */ static int taos_probe(struct i2c_client *clientp, const struct i2c_device_id *idp) { int i, ret; unsigned char buf[TSL258X_MAX_DEVICE_REGS]; struct tsl2583_chip *chip; struct iio_dev *indio_dev; if (!i2c_check_functionality(clientp->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data " "functions unsupported\n"); return -EOPNOTSUPP; } indio_dev = iio_device_alloc(sizeof(*chip)); if (indio_dev == NULL) { ret = -ENOMEM; dev_err(&clientp->dev, "iio allocation failed\n"); goto fail1; } chip = iio_priv(indio_dev); chip->client = clientp; i2c_set_clientdata(clientp, indio_dev); mutex_init(&chip->als_mutex); chip->taos_chip_status = TSL258X_CHIP_UNKNOWN; memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config)); for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) { ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | (TSL258X_CNTRL + i))); if (ret < 0) { dev_err(&clientp->dev, "i2c_smbus_write_bytes() to cmd " "reg failed in taos_probe(), err = %d\n", ret); goto fail2; } ret = i2c_smbus_read_byte(clientp); if (ret < 0) { dev_err(&clientp->dev, "i2c_smbus_read_byte from " "reg failed in taos_probe(), err = %d\n", ret); goto fail2; } buf[i] = ret; } if (!taos_tsl258x_device(buf)) { dev_info(&clientp->dev, "i2c device found but does not match " "expected id in taos_probe()\n"); goto fail2; } ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL)); if (ret < 0) { dev_err(&clientp->dev, "i2c_smbus_write_byte() to cmd reg " "failed in taos_probe(), err = %d\n", ret); goto fail2; } indio_dev->info = &tsl2583_info; indio_dev->dev.parent = &clientp->dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->name = chip->client->name; ret = iio_device_register(indio_dev); if (ret) { dev_err(&clientp->dev, "iio registration failed\n"); goto fail2; } /* Load up the V2 defaults (these are hard coded defaults for now) */ taos_defaults(chip); /* Make sure the chip is on */ taos_chip_on(indio_dev); dev_info(&clientp->dev, "Light sensor found.\n"); return 0; fail1: iio_device_free(indio_dev); fail2: return ret; } #ifdef CONFIG_PM_SLEEP static int taos_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct tsl2583_chip *chip = iio_priv(indio_dev); int ret = 0; mutex_lock(&chip->als_mutex); if (chip->taos_chip_status == TSL258X_CHIP_WORKING) { ret = taos_chip_off(indio_dev); chip->taos_chip_status = TSL258X_CHIP_SUSPENDED; } mutex_unlock(&chip->als_mutex); return ret; } static int taos_resume(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct tsl2583_chip *chip = iio_priv(indio_dev); int ret = 0; mutex_lock(&chip->als_mutex); if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED) ret = taos_chip_on(indio_dev); mutex_unlock(&chip->als_mutex); return ret; } static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume); #define TAOS_PM_OPS (&taos_pm_ops) #else #define TAOS_PM_OPS NULL #endif static int taos_remove(struct i2c_client *client) { iio_device_unregister(i2c_get_clientdata(client)); iio_device_free(i2c_get_clientdata(client)); return 0; } static struct i2c_device_id taos_idtable[] = { { "tsl2580", 0 }, { "tsl2581", 1 }, { "tsl2583", 2 }, {} }; MODULE_DEVICE_TABLE(i2c, taos_idtable); /* Driver definition */ static struct i2c_driver taos_driver = { .driver = { .name = "tsl2583", .pm = TAOS_PM_OPS, }, .id_table = taos_idtable, .probe = taos_probe, .remove = taos_remove, }; module_i2c_driver(taos_driver); MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>"); MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
kirananto/REDMI2_RAZOR
drivers/video/q40fb.c
2395
3398
/* * linux/drivers/video/q40fb.c -- Q40 frame buffer device * * Copyright (C) 2001 * * Richard Zidlicky <rz@linux-m68k.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/setup.h> #include <asm/q40_master.h> #include <linux/fb.h> #include <linux/module.h> #include <asm/pgtable.h> #define Q40_PHYS_SCREEN_ADDR 0xFE800000 static struct fb_fix_screeninfo q40fb_fix = { .id = "Q40", .smem_len = 1024*1024, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = 1024*2, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo q40fb_var = { .xres = 1024, .yres = 512, .xres_virtual = 1024, .yres_virtual = 512, .bits_per_pixel = 16, .red = {6, 5, 0}, .green = {11, 5, 0}, .blue = {0, 6, 0}, .activate = FB_ACTIVATE_NOW, .height = 230, .width = 300, .vmode = FB_VMODE_NONINTERLACED, }; static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { /* * Set a single color register. The values supplied have a 16 bit * magnitude. * Return != 0 for invalid regno. */ if (regno > 255) return 1; red>>=11; green>>=11; blue>>=10; if (regno < 16) { ((u32 *)info->pseudo_palette)[regno] = ((red & 31) <<6) | ((green & 31) << 11) | (blue & 63); } return 0; } static struct fb_ops q40fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = q40fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int q40fb_probe(struct platform_device *dev) { struct fb_info *info; if (!MACH_IS_Q40) return -ENXIO; /* mapped in q40/config.c */ q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR; info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev); if (!info) return -ENOMEM; info->var = q40fb_var; info->fix = q40fb_fix; info->fbops = &q40fb_ops; info->flags = FBINFO_DEFAULT; /* not as module for now */ info->pseudo_palette = info->par; info->par = NULL; info->screen_base = (char *) q40fb_fix.smem_start; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); return -ENOMEM; } master_outb(3, DISPLAY_CONTROL_REG); if (register_framebuffer(info) < 0) { printk(KERN_ERR "Unable to register Q40 frame buffer\n"); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return -EINVAL; } printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n", info->node); return 0; } static struct platform_driver q40fb_driver = { .probe = q40fb_probe, .driver = { .name = "q40fb", }, }; static struct platform_device q40fb_device = { .name = "q40fb", }; int __init q40fb_init(void) { int ret = 0; if (fb_get_options("q40fb", NULL)) return -ENODEV; ret = platform_driver_register(&q40fb_driver); if (!ret) { ret = platform_device_register(&q40fb_device); if (ret) platform_driver_unregister(&q40fb_driver); } return ret; } module_init(q40fb_init); MODULE_LICENSE("GPL");
gpl-2.0
vibhu0009/android_kernel_cyanogen_msm8916
drivers/input/joystick/grip_mp.c
2651
17042
/* * Driver for the Gravis Grip Multiport, a gamepad "hub" that * connects up to four 9-pin digital gamepads/joysticks. * Driver tested on SMP and UP kernel versions 2.4.18-4 and 2.4.18-5. * * Thanks to Chris Gassib for helpful advice. * * Copyright (c) 2002 Brian Bonnlander, Bill Soudan * Copyright (c) 1998-2000 Vojtech Pavlik */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/jiffies.h> #define DRIVER_DESC "Gravis Grip Multiport driver" MODULE_AUTHOR("Brian Bonnlander"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #ifdef GRIP_DEBUG #define dbg(format, arg...) printk(KERN_ERR __FILE__ ": " format "\n" , ## arg) #else #define dbg(format, arg...) do {} while (0) #endif #define GRIP_MAX_PORTS 4 /* * Grip multiport state */ struct grip_port { struct input_dev *dev; int mode; int registered; /* individual gamepad states */ int buttons; int xaxes; int yaxes; int dirty; /* has the state been updated? */ }; struct grip_mp { struct gameport *gameport; struct grip_port *port[GRIP_MAX_PORTS]; int reads; int bads; }; /* * Multiport packet interpretation */ #define PACKET_FULL 0x80000000 /* packet is full */ #define PACKET_IO_FAST 0x40000000 /* 3 bits per gameport read */ #define PACKET_IO_SLOW 0x20000000 /* 1 bit per gameport read */ #define PACKET_MP_MORE 0x04000000 /* multiport wants to send more */ #define PACKET_MP_DONE 0x02000000 /* multiport done sending */ /* * Packet status code interpretation */ #define IO_GOT_PACKET 0x0100 /* Got a packet */ #define IO_MODE_FAST 0x0200 /* Used 3 data bits per gameport read */ #define IO_SLOT_CHANGE 0x0800 /* Multiport physical slot status changed */ #define IO_DONE 0x1000 /* Multiport is done sending packets */ #define IO_RETRY 0x4000 /* Try again later to get packet */ #define IO_RESET 0x8000 /* Force multiport to resend all packets */ /* * Gamepad configuration data. Other 9-pin digital joystick devices * may work with the multiport, so this may not be an exhaustive list! * Commodore 64 joystick remains untested. */ #define GRIP_INIT_DELAY 2000 /* 2 ms */ #define GRIP_MODE_NONE 0 #define GRIP_MODE_RESET 1 #define GRIP_MODE_GP 2 #define GRIP_MODE_C64 3 static const int grip_btn_gp[] = { BTN_TR, BTN_TL, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, -1 }; static const int grip_btn_c64[] = { BTN_JOYSTICK, -1 }; static const int grip_abs_gp[] = { ABS_X, ABS_Y, -1 }; static const int grip_abs_c64[] = { ABS_X, ABS_Y, -1 }; static const int *grip_abs[] = { NULL, NULL, grip_abs_gp, grip_abs_c64 }; static const int *grip_btn[] = { NULL, NULL, grip_btn_gp, grip_btn_c64 }; static const char *grip_name[] = { NULL, NULL, "Gravis Grip Pad", "Commodore 64 Joystick" }; static const int init_seq[] = { 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1 }; /* Maps multiport directional values to X,Y axis values (each axis encoded in 3 bits) */ static const int axis_map[] = { 5, 9, 1, 5, 6, 10, 2, 6, 4, 8, 0, 4, 5, 9, 1, 5 }; static int register_slot(int i, struct grip_mp *grip); /* * Returns whether an odd or even number of bits are on in pkt. */ static int bit_parity(u32 pkt) { int x = pkt ^ (pkt >> 16); x ^= x >> 8; x ^= x >> 4; x ^= x >> 2; x ^= x >> 1; return x & 1; } /* * Poll gameport; return true if all bits set in 'onbits' are on and * all bits set in 'offbits' are off. */ static inline int poll_until(u8 onbits, u8 offbits, int u_sec, struct gameport* gp, u8 *data) { int i, nloops; nloops = gameport_time(gp, u_sec); for (i = 0; i < nloops; i++) { *data = gameport_read(gp); if ((*data & onbits) == onbits && (~(*data) & offbits) == offbits) return 1; } dbg("gameport timed out after %d microseconds.\n", u_sec); return 0; } /* * Gets a 28-bit packet from the multiport. * * After getting a packet successfully, commands encoded by sendcode may * be sent to the multiport. * * The multiport clock value is reflected in gameport bit B4. * * Returns a packet status code indicating whether packet is valid, the transfer * mode, and any error conditions. * * sendflags: current I/O status * sendcode: data to send to the multiport if sendflags is nonzero */ static int mp_io(struct gameport* gameport, int sendflags, int sendcode, u32 *packet) { u8 raw_data; /* raw data from gameport */ u8 data_mask; /* packet data bits from raw_data */ u32 pkt; /* packet temporary storage */ int bits_per_read; /* num packet bits per gameport read */ int portvals = 0; /* used for port value sanity check */ int i; /* Gameport bits B0, B4, B5 should first be off, then B4 should come on. */ *packet = 0; raw_data = gameport_read(gameport); if (raw_data & 1) return IO_RETRY; for (i = 0; i < 64; i++) { raw_data = gameport_read(gameport); portvals |= 1 << ((raw_data >> 4) & 3); /* Demux B4, B5 */ } if (portvals == 1) { /* B4, B5 off */ raw_data = gameport_read(gameport); portvals = raw_data & 0xf0; if (raw_data & 0x31) return IO_RESET; gameport_trigger(gameport); if (!poll_until(0x10, 0, 308, gameport, &raw_data)) return IO_RESET; } else return IO_RETRY; /* Determine packet transfer mode and prepare for packet construction. */ if (raw_data & 0x20) { /* 3 data bits/read */ portvals |= raw_data >> 4; /* Compare B4-B7 before & after trigger */ if (portvals != 0xb) return 0; data_mask = 7; bits_per_read = 3; pkt = (PACKET_FULL | PACKET_IO_FAST) >> 28; } else { /* 1 data bit/read */ data_mask = 1; bits_per_read = 1; pkt = (PACKET_FULL | PACKET_IO_SLOW) >> 28; } /* Construct a packet. Final data bits must be zero. */ while (1) { if (!poll_until(0, 0x10, 77, gameport, &raw_data)) return IO_RESET; raw_data = (raw_data >> 5) & data_mask; if (pkt & PACKET_FULL) break; pkt = (pkt << bits_per_read) | raw_data; if (!poll_until(0x10, 0, 77, gameport, &raw_data)) return IO_RESET; } if (raw_data) return IO_RESET; /* If 3 bits/read used, drop from 30 bits to 28. */ if (bits_per_read == 3) { pkt = (pkt & 0xffff0000) | ((pkt << 1) & 0xffff); pkt = (pkt >> 2) | 0xf0000000; } if (bit_parity(pkt) == 1) return IO_RESET; /* Acknowledge packet receipt */ if (!poll_until(0x30, 0, 77, gameport, &raw_data)) return IO_RESET; raw_data = gameport_read(gameport); if (raw_data & 1) return IO_RESET; gameport_trigger(gameport); if (!poll_until(0, 0x20, 77, gameport, &raw_data)) return IO_RESET; /* Return if we just wanted the packet or multiport wants to send more */ *packet = pkt; if ((sendflags == 0) || ((sendflags & IO_RETRY) && !(pkt & PACKET_MP_DONE))) return IO_GOT_PACKET; if (pkt & PACKET_MP_MORE) return IO_GOT_PACKET | IO_RETRY; /* Multiport is done sending packets and is ready to receive data */ if (!poll_until(0x20, 0, 77, gameport, &raw_data)) return IO_GOT_PACKET | IO_RESET; raw_data = gameport_read(gameport); if (raw_data & 1) return IO_GOT_PACKET | IO_RESET; /* Trigger gameport based on bits in sendcode */ gameport_trigger(gameport); do { if (!poll_until(0x20, 0x10, 116, gameport, &raw_data)) return IO_GOT_PACKET | IO_RESET; if (!poll_until(0x30, 0, 193, gameport, &raw_data)) return IO_GOT_PACKET | IO_RESET; if (raw_data & 1) return IO_GOT_PACKET | IO_RESET; if (sendcode & 1) gameport_trigger(gameport); sendcode >>= 1; } while (sendcode); return IO_GOT_PACKET | IO_MODE_FAST; } /* * Disables and restores interrupts for mp_io(), which does the actual I/O. */ static int multiport_io(struct gameport* gameport, int sendflags, int sendcode, u32 *packet) { int status; unsigned long flags; local_irq_save(flags); status = mp_io(gameport, sendflags, sendcode, packet); local_irq_restore(flags); return status; } /* * Puts multiport into digital mode. Multiport LED turns green. * * Returns true if a valid digital packet was received, false otherwise. */ static int dig_mode_start(struct gameport *gameport, u32 *packet) { int i; int flags, tries = 0, bads = 0; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { /* Send magic sequence */ if (init_seq[i]) gameport_trigger(gameport); udelay(GRIP_INIT_DELAY); } for (i = 0; i < 16; i++) /* Wait for multiport to settle */ udelay(GRIP_INIT_DELAY); while (tries < 64 && bads < 8) { /* Reset multiport and try getting a packet */ flags = multiport_io(gameport, IO_RESET, 0x27, packet); if (flags & IO_MODE_FAST) return 1; if (flags & IO_RETRY) tries++; else bads++; } return 0; } /* * Packet structure: B0-B15 => gamepad state * B16-B20 => gamepad device type * B21-B24 => multiport slot index (1-4) * * Known device types: 0x1f (grip pad), 0x0 (no device). Others may exist. * * Returns the packet status. */ static int get_and_decode_packet(struct grip_mp *grip, int flags) { struct grip_port *port; u32 packet; int joytype = 0; int slot; /* Get a packet and check for validity */ flags &= IO_RESET | IO_RETRY; flags = multiport_io(grip->gameport, flags, 0, &packet); grip->reads++; if (packet & PACKET_MP_DONE) flags |= IO_DONE; if (flags && !(flags & IO_GOT_PACKET)) { grip->bads++; return flags; } /* Ignore non-gamepad packets, e.g. multiport hardware version */ slot = ((packet >> 21) & 0xf) - 1; if ((slot < 0) || (slot > 3)) return flags; port = grip->port[slot]; /* * Handle "reset" packets, which occur at startup, and when gamepads * are removed or plugged in. May contain configuration of a new gamepad. */ joytype = (packet >> 16) & 0x1f; if (!joytype) { if (port->registered) { printk(KERN_INFO "grip_mp: removing %s, slot %d\n", grip_name[port->mode], slot); input_unregister_device(port->dev); port->registered = 0; } dbg("Reset: grip multiport slot %d\n", slot); port->mode = GRIP_MODE_RESET; flags |= IO_SLOT_CHANGE; return flags; } /* Interpret a grip pad packet */ if (joytype == 0x1f) { int dir = (packet >> 8) & 0xf; /* eight way directional value */ port->buttons = (~packet) & 0xff; port->yaxes = ((axis_map[dir] >> 2) & 3) - 1; port->xaxes = (axis_map[dir] & 3) - 1; port->dirty = 1; if (port->mode == GRIP_MODE_RESET) flags |= IO_SLOT_CHANGE; port->mode = GRIP_MODE_GP; if (!port->registered) { dbg("New Grip pad in multiport slot %d.\n", slot); if (register_slot(slot, grip)) { port->mode = GRIP_MODE_RESET; port->dirty = 0; } } return flags; } /* Handle non-grip device codes. For now, just print diagnostics. */ { static int strange_code = 0; if (strange_code != joytype) { printk(KERN_INFO "Possible non-grip pad/joystick detected.\n"); printk(KERN_INFO "Got joy type 0x%x and packet 0x%x.\n", joytype, packet); strange_code = joytype; } } return flags; } /* * Returns true if all multiport slot states appear valid. */ static int slots_valid(struct grip_mp *grip) { int flags, slot, invalid = 0, active = 0; flags = get_and_decode_packet(grip, 0); if (!(flags & IO_GOT_PACKET)) return 0; for (slot = 0; slot < 4; slot++) { if (grip->port[slot]->mode == GRIP_MODE_RESET) invalid = 1; if (grip->port[slot]->mode != GRIP_MODE_NONE) active = 1; } /* Return true if no active slot but multiport sent all its data */ if (!active) return (flags & IO_DONE) ? 1 : 0; /* Return false if invalid device code received */ return invalid ? 0 : 1; } /* * Returns whether the multiport was placed into digital mode and * able to communicate its state successfully. */ static int multiport_init(struct grip_mp *grip) { int dig_mode, initialized = 0, tries = 0; u32 packet; dig_mode = dig_mode_start(grip->gameport, &packet); while (!dig_mode && tries < 4) { dig_mode = dig_mode_start(grip->gameport, &packet); tries++; } if (dig_mode) dbg("multiport_init(): digital mode activated.\n"); else { dbg("multiport_init(): unable to activate digital mode.\n"); return 0; } /* Get packets, store multiport state, and check state's validity */ for (tries = 0; tries < 4096; tries++) { if (slots_valid(grip)) { initialized = 1; break; } } dbg("multiport_init(): initialized == %d\n", initialized); return initialized; } /* * Reports joystick state to the linux input layer. */ static void report_slot(struct grip_mp *grip, int slot) { struct grip_port *port = grip->port[slot]; int i; /* Store button states with linux input driver */ for (i = 0; i < 8; i++) input_report_key(port->dev, grip_btn_gp[i], (port->buttons >> i) & 1); /* Store axis states with linux driver */ input_report_abs(port->dev, ABS_X, port->xaxes); input_report_abs(port->dev, ABS_Y, port->yaxes); /* Tell the receiver of the events to process them */ input_sync(port->dev); port->dirty = 0; } /* * Get the multiport state. */ static void grip_poll(struct gameport *gameport) { struct grip_mp *grip = gameport_get_drvdata(gameport); int i, npkts, flags; for (npkts = 0; npkts < 4; npkts++) { flags = IO_RETRY; for (i = 0; i < 32; i++) { flags = get_and_decode_packet(grip, flags); if ((flags & IO_GOT_PACKET) || !(flags & IO_RETRY)) break; } if (flags & IO_DONE) break; } for (i = 0; i < 4; i++) if (grip->port[i]->dirty) report_slot(grip, i); } /* * Called when a joystick device file is opened */ static int grip_open(struct input_dev *dev) { struct grip_mp *grip = input_get_drvdata(dev); gameport_start_polling(grip->gameport); return 0; } /* * Called when a joystick device file is closed */ static void grip_close(struct input_dev *dev) { struct grip_mp *grip = input_get_drvdata(dev); gameport_stop_polling(grip->gameport); } /* * Tell the linux input layer about a newly plugged-in gamepad. */ static int register_slot(int slot, struct grip_mp *grip) { struct grip_port *port = grip->port[slot]; struct input_dev *input_dev; int j, t; int err; port->dev = input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = grip_name[port->mode]; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_GRAVIS; input_dev->id.product = 0x0100 + port->mode; input_dev->id.version = 0x0100; input_dev->dev.parent = &grip->gameport->dev; input_set_drvdata(input_dev, grip); input_dev->open = grip_open; input_dev->close = grip_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (j = 0; (t = grip_abs[port->mode][j]) >= 0; j++) input_set_abs_params(input_dev, t, -1, 1, 0, 0); for (j = 0; (t = grip_btn[port->mode][j]) >= 0; j++) if (t > 0) set_bit(t, input_dev->keybit); err = input_register_device(port->dev); if (err) { input_free_device(port->dev); return err; } port->registered = 1; if (port->dirty) /* report initial state, if any */ report_slot(grip, slot); return 0; } static int grip_connect(struct gameport *gameport, struct gameport_driver *drv) { struct grip_mp *grip; int err; if (!(grip = kzalloc(sizeof(struct grip_mp), GFP_KERNEL))) return -ENOMEM; grip->gameport = gameport; gameport_set_drvdata(gameport, grip); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; gameport_set_poll_handler(gameport, grip_poll); gameport_set_poll_interval(gameport, 20); if (!multiport_init(grip)) { err = -ENODEV; goto fail2; } if (!grip->port[0]->mode && !grip->port[1]->mode && !grip->port[2]->mode && !grip->port[3]->mode) { /* nothing plugged in */ err = -ENODEV; goto fail2; } return 0; fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); kfree(grip); return err; } static void grip_disconnect(struct gameport *gameport) { struct grip_mp *grip = gameport_get_drvdata(gameport); int i; for (i = 0; i < 4; i++) if (grip->port[i]->registered) input_unregister_device(grip->port[i]->dev); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); kfree(grip); } static struct gameport_driver grip_drv = { .driver = { .name = "grip_mp", }, .description = DRIVER_DESC, .connect = grip_connect, .disconnect = grip_disconnect, }; module_gameport_driver(grip_drv);
gpl-2.0
yashrastogi/Enigma_Kernel_MSM7x27A_ICS
sound/core/seq/seq_dummy.c
2907
6755
/* * ALSA sequencer MIDI-through client * Copyright (c) 1999-2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include "seq_clientmgr.h" #include <sound/initval.h> #include <sound/asoundef.h> /* Sequencer MIDI-through client This gives a simple midi-through client. All the normal input events are redirected to output port immediately. The routing can be done via aconnect program in alsa-utils. Each client has a static client number 62 (= SNDRV_SEQ_CLIENT_DUMMY). If you want to auto-load this module, you may add the following alias in your /etc/conf.modules file. alias snd-seq-client-62 snd-seq-dummy The module is loaded on demand for client 62, or /proc/asound/seq/ is accessed. If you don't need this module to be loaded, alias snd-seq-client-62 as "off". This will help modprobe. The number of ports to be created can be specified via the module parameter "ports". For example, to create four ports, add the following option in /etc/modprobe.conf: option snd-seq-dummy ports=4 The model option "duplex=1" enables duplex operation to the port. In duplex mode, a pair of ports are created instead of single port, and events are tunneled between pair-ports. For example, input to port A is sent to output port of another port B and vice versa. In duplex mode, each port has DUPLEX capability. */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA sequencer MIDI-through client"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-seq-client-" __stringify(SNDRV_SEQ_CLIENT_DUMMY)); static int ports = 1; static int duplex; module_param(ports, int, 0444); MODULE_PARM_DESC(ports, "number of ports to be created"); module_param(duplex, bool, 0444); MODULE_PARM_DESC(duplex, "create DUPLEX ports"); struct snd_seq_dummy_port { int client; int port; int duplex; int connect; }; static int my_client = -1; /* * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events * to subscribers. * Note: this callback is called only after all subscribers are removed. */ static int dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_seq_dummy_port *p; int i; struct snd_seq_event ev; p = private_data; memset(&ev, 0, sizeof(ev)); if (p->duplex) ev.source.port = p->connect; else ev.source.port = p->port; ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; ev.type = SNDRV_SEQ_EVENT_CONTROLLER; for (i = 0; i < 16; i++) { ev.data.control.channel = i; ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); } return 0; } /* * event input callback - just redirect events to subscribers */ static int dummy_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_seq_dummy_port *p; struct snd_seq_event tmpev; p = private_data; if (ev->source.client == SNDRV_SEQ_CLIENT_SYSTEM || ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return 0; /* ignore system messages */ tmpev = *ev; if (p->duplex) tmpev.source.port = p->connect; else tmpev.source.port = p->port; tmpev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; return snd_seq_kernel_client_dispatch(p->client, &tmpev, atomic, hop); } /* * free_private callback */ static void dummy_free(void *private_data) { kfree(private_data); } /* * create a port */ static struct snd_seq_dummy_port __init * create_port(int idx, int type) { struct snd_seq_port_info pinfo; struct snd_seq_port_callback pcb; struct snd_seq_dummy_port *rec; if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL) return NULL; rec->client = my_client; rec->duplex = duplex; rec->connect = 0; memset(&pinfo, 0, sizeof(pinfo)); pinfo.addr.client = my_client; if (duplex) sprintf(pinfo.name, "Midi Through Port-%d:%c", idx, (type ? 'B' : 'A')); else sprintf(pinfo.name, "Midi Through Port-%d", idx); pinfo.capability = SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ; pinfo.capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE; if (duplex) pinfo.capability |= SNDRV_SEQ_PORT_CAP_DUPLEX; pinfo.type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_SOFTWARE | SNDRV_SEQ_PORT_TYPE_PORT; memset(&pcb, 0, sizeof(pcb)); pcb.owner = THIS_MODULE; pcb.unuse = dummy_unuse; pcb.event_input = dummy_input; pcb.private_free = dummy_free; pcb.private_data = rec; pinfo.kernel = &pcb; if (snd_seq_kernel_client_ctl(my_client, SNDRV_SEQ_IOCTL_CREATE_PORT, &pinfo) < 0) { kfree(rec); return NULL; } rec->port = pinfo.addr.port; return rec; } /* * register client and create ports */ static int __init register_client(void) { struct snd_seq_dummy_port *rec1, *rec2; int i; if (ports < 1) { snd_printk(KERN_ERR "invalid number of ports %d\n", ports); return -EINVAL; } /* create client */ my_client = snd_seq_create_kernel_client(NULL, SNDRV_SEQ_CLIENT_DUMMY, "Midi Through"); if (my_client < 0) return my_client; /* create ports */ for (i = 0; i < ports; i++) { rec1 = create_port(i, 0); if (rec1 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } if (duplex) { rec2 = create_port(i, 1); if (rec2 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } rec1->connect = rec2->port; rec2->connect = rec1->port; } } return 0; } /* * delete client if exists */ static void __exit delete_client(void) { if (my_client >= 0) snd_seq_delete_kernel_client(my_client); } /* * Init part */ static int __init alsa_seq_dummy_init(void) { int err; snd_seq_autoload_lock(); err = register_client(); snd_seq_autoload_unlock(); return err; } static void __exit alsa_seq_dummy_exit(void) { delete_client(); } module_init(alsa_seq_dummy_init) module_exit(alsa_seq_dummy_exit)
gpl-2.0
upndwn4par/kernel_hammerhead_lollipop
net/ipv4/inet_timewait_sock.c
4699
14851
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic TIME_WAIT sockets functions * * From code orinally in TCP */ #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/slab.h> #include <linux/module.h> #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/ip.h> /** * inet_twsk_unhash - unhash a timewait socket from established hash * @tw: timewait socket * * unhash a timewait socket from established hash, if hashed. * ehash lock must be held by caller. * Returns 1 if caller should call inet_twsk_put() after lock release. */ int inet_twsk_unhash(struct inet_timewait_sock *tw) { if (hlist_nulls_unhashed(&tw->tw_node)) return 0; hlist_nulls_del_rcu(&tw->tw_node); sk_nulls_node_init(&tw->tw_node); /* * We cannot call inet_twsk_put() ourself under lock, * caller must call it for us. */ return 1; } /** * inet_twsk_bind_unhash - unhash a timewait socket from bind hash * @tw: timewait socket * @hashinfo: hashinfo pointer * * unhash a timewait socket from bind hash, if hashed. * bind hash lock must be held by caller. * Returns 1 if caller should call inet_twsk_put() after lock release. */ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) { struct inet_bind_bucket *tb = tw->tw_tb; if (!tb) return 0; __hlist_del(&tw->tw_bind_node); tw->tw_tb = NULL; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); /* * We cannot call inet_twsk_put() ourself under lock, * caller must call it for us. */ return 1; } /* Must be called with locally disabled BHs. */ static void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) { struct inet_bind_hashbucket *bhead; int refcnt; /* Unlink from established hashes. */ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); spin_lock(lock); refcnt = inet_twsk_unhash(tw); spin_unlock(lock); /* Disassociate with bind bucket. */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, hashinfo->bhash_size)]; spin_lock(&bhead->lock); refcnt += inet_twsk_bind_unhash(tw, hashinfo); spin_unlock(&bhead->lock); #ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); } #endif while (refcnt) { inet_twsk_put(tw); refcnt--; } } static noinline void inet_twsk_free(struct inet_timewait_sock *tw) { struct module *owner = tw->tw_prot->owner; twsk_destructor((struct sock *)tw); #ifdef SOCK_REFCNT_DEBUG pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); #endif release_net(twsk_net(tw)); kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); module_put(owner); } void inet_twsk_put(struct inet_timewait_sock *tw) { if (atomic_dec_and_test(&tw->tw_refcnt)) inet_twsk_free(tw); } EXPORT_SYMBOL_GPL(inet_twsk_put); /* * Enter the time wait state. This is called with locally disabled BH. * Essentially we whip up a timewait bucket, copy the relevant info into it * from the SK, and mess with hash chains and list linkage. */ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo) { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); struct inet_bind_hashbucket *bhead; /* Step 1: Put TW into bind hash. Original socket stays there too. Note, that any socket with inet->num != 0 MUST be bound in binding cache, even if it is closed. */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, hashinfo->bhash_size)]; spin_lock(&bhead->lock); tw->tw_tb = icsk->icsk_bind_hash; WARN_ON(!icsk->icsk_bind_hash); inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); spin_unlock(&bhead->lock); spin_lock(lock); /* * Step 2: Hash TW into TIMEWAIT chain. * Should be done before removing sk from established chain * because readers are lockless and search established first. */ inet_twsk_add_node_rcu(tw, &ehead->twchain); /* Step 3: Remove SK from established hash. */ if (__sk_nulls_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); /* * Notes : * - We initially set tw_refcnt to 0 in inet_twsk_alloc() * - We add one reference for the bhash link * - We add one reference for the ehash link * - We want this refcnt update done before allowing other * threads to find this tw in ehash chain. */ atomic_add(1 + 1 + 1, &tw->tw_refcnt); spin_unlock(lock); } EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) { struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, GFP_ATOMIC); if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); kmemcheck_annotate_bitfield(tw, flags); /* Give us an identity. */ tw->tw_daddr = inet->inet_daddr; tw->tw_rcv_saddr = inet->inet_rcv_saddr; tw->tw_bound_dev_if = sk->sk_bound_dev_if; tw->tw_tos = inet->tos; tw->tw_num = inet->inet_num; tw->tw_state = TCP_TIME_WAIT; tw->tw_substate = state; tw->tw_sport = inet->inet_sport; tw->tw_dport = inet->inet_dport; tw->tw_family = sk->sk_family; tw->tw_reuse = sk->sk_reuse; tw->tw_hash = sk->sk_hash; tw->tw_ipv6only = 0; tw->tw_transparent = inet->transparent; tw->tw_prot = sk->sk_prot_creator; twsk_net_set(tw, hold_net(sock_net(sk))); /* * Because we use RCU lookups, we should not set tw_refcnt * to a non null value before everything is setup for this * timewait socket. */ atomic_set(&tw->tw_refcnt, 0); inet_twsk_dead_node_init(tw); __module_get(tw->tw_prot->owner); } return tw; } EXPORT_SYMBOL_GPL(inet_twsk_alloc); /* Returns non-zero if quota exceeded. */ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, const int slot) { struct inet_timewait_sock *tw; struct hlist_node *node; unsigned int killed; int ret; /* NOTE: compare this to previous version where lock * was released after detaching chain. It was racy, * because tw buckets are scheduled in not serialized context * in 2.3 (with netfilter), and with softnet it is common, because * soft irqs are not sequenced. */ killed = 0; ret = 0; rescan: inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { __inet_twsk_del_dead_node(tw); spin_unlock(&twdr->death_lock); __inet_twsk_kill(tw, twdr->hashinfo); #ifdef CONFIG_NET_NS NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); #endif inet_twsk_put(tw); killed++; spin_lock(&twdr->death_lock); if (killed > INET_TWDR_TWKILL_QUOTA) { ret = 1; break; } /* While we dropped twdr->death_lock, another cpu may have * killed off the next TW bucket in the list, therefore * do a fresh re-read of the hlist head node with the * lock reacquired. We still use the hlist traversal * macro in order to get the prefetches. */ goto rescan; } twdr->tw_count -= killed; #ifndef CONFIG_NET_NS NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); #endif return ret; } void inet_twdr_hangman(unsigned long data) { struct inet_timewait_death_row *twdr; int unsigned need_timer; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); if (twdr->tw_count == 0) goto out; need_timer = 0; if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { twdr->thread_slots |= (1 << twdr->slot); schedule_work(&twdr->twkill_work); need_timer = 1; } else { /* We purged the entire slot, anything left? */ if (twdr->tw_count) need_timer = 1; twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); } if (need_timer) mod_timer(&twdr->tw_timer, jiffies + twdr->period); out: spin_unlock(&twdr->death_lock); } EXPORT_SYMBOL_GPL(inet_twdr_hangman); void inet_twdr_twkill_work(struct work_struct *work) { struct inet_timewait_death_row *twdr = container_of(work, struct inet_timewait_death_row, twkill_work); int i; BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)); while (twdr->thread_slots) { spin_lock_bh(&twdr->death_lock); for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { if (!(twdr->thread_slots & (1 << i))) continue; while (inet_twdr_do_twkill_work(twdr, i) != 0) { if (need_resched()) { spin_unlock_bh(&twdr->death_lock); schedule(); spin_lock_bh(&twdr->death_lock); } } twdr->thread_slots &= ~(1 << i); } spin_unlock_bh(&twdr->death_lock); } } EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); /* These are always called from BH context. See callers in * tcp_input.c to verify this. */ /* This is for handling early-kills of TIME_WAIT sockets. */ void inet_twsk_deschedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr) { spin_lock(&twdr->death_lock); if (inet_twsk_del_dead_node(tw)) { inet_twsk_put(tw); if (--twdr->tw_count == 0) del_timer(&twdr->tw_timer); } spin_unlock(&twdr->death_lock); __inet_twsk_kill(tw, twdr->hashinfo); } EXPORT_SYMBOL(inet_twsk_deschedule); void inet_twsk_schedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr, const int timeo, const int timewait_len) { struct hlist_head *list; int slot; /* timeout := RTO * 3.5 * * 3.5 = 1+2+0.5 to wait for two retransmits. * * RATIONALE: if FIN arrived and we entered TIME-WAIT state, * our ACK acking that FIN can be lost. If N subsequent retransmitted * FINs (or previous seqments) are lost (probability of such event * is p^(N+1), where p is probability to lose single packet and * time to detect the loss is about RTO*(2^N - 1) with exponential * backoff). Normal timewait length is calculated so, that we * waited at least for one retransmitted FIN (maximal RTO is 120sec). * [ BTW Linux. following BSD, violates this requirement waiting * only for 60sec, we should wait at least for 240 secs. * Well, 240 consumes too much of resources 8) * ] * This interval is not reduced to catch old duplicate and * responces to our wandering segments living for two MSLs. * However, if we use PAWS to detect * old duplicates, we can reduce the interval to bounds required * by RTO, rather than MSL. So, if peer understands PAWS, we * kill tw bucket after 3.5*RTO (it is important that this number * is greater than TS tick!) and detect old duplicates with help * of PAWS. */ slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; spin_lock(&twdr->death_lock); /* Unlink it, if it was scheduled */ if (inet_twsk_del_dead_node(tw)) twdr->tw_count--; else atomic_inc(&tw->tw_refcnt); if (slot >= INET_TWDR_RECYCLE_SLOTS) { /* Schedule to slow timer */ if (timeo >= timewait_len) { slot = INET_TWDR_TWKILL_SLOTS - 1; } else { slot = DIV_ROUND_UP(timeo, twdr->period); if (slot >= INET_TWDR_TWKILL_SLOTS) slot = INET_TWDR_TWKILL_SLOTS - 1; } tw->tw_ttd = jiffies + timeo; slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); list = &twdr->cells[slot]; } else { tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); if (twdr->twcal_hand < 0) { twdr->twcal_hand = 0; twdr->twcal_jiffie = jiffies; twdr->twcal_timer.expires = twdr->twcal_jiffie + (slot << INET_TWDR_RECYCLE_TICK); add_timer(&twdr->twcal_timer); } else { if (time_after(twdr->twcal_timer.expires, jiffies + (slot << INET_TWDR_RECYCLE_TICK))) mod_timer(&twdr->twcal_timer, jiffies + (slot << INET_TWDR_RECYCLE_TICK)); slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); } list = &twdr->twcal_row[slot]; } hlist_add_head(&tw->tw_death_node, list); if (twdr->tw_count++ == 0) mod_timer(&twdr->tw_timer, jiffies + twdr->period); spin_unlock(&twdr->death_lock); } EXPORT_SYMBOL_GPL(inet_twsk_schedule); void inet_twdr_twcal_tick(unsigned long data) { struct inet_timewait_death_row *twdr; int n, slot; unsigned long j; unsigned long now = jiffies; int killed = 0; int adv = 0; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); if (twdr->twcal_hand < 0) goto out; slot = twdr->twcal_hand; j = twdr->twcal_jiffie; for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { if (time_before_eq(j, now)) { struct hlist_node *node, *safe; struct inet_timewait_sock *tw; inet_twsk_for_each_inmate_safe(tw, node, safe, &twdr->twcal_row[slot]) { __inet_twsk_del_dead_node(tw); __inet_twsk_kill(tw, twdr->hashinfo); #ifdef CONFIG_NET_NS NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); #endif inet_twsk_put(tw); killed++; } } else { if (!adv) { adv = 1; twdr->twcal_jiffie = j; twdr->twcal_hand = slot; } if (!hlist_empty(&twdr->twcal_row[slot])) { mod_timer(&twdr->twcal_timer, j); goto out; } } j += 1 << INET_TWDR_RECYCLE_TICK; slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); } twdr->twcal_hand = -1; out: if ((twdr->tw_count -= killed) == 0) del_timer(&twdr->tw_timer); #ifndef CONFIG_NET_NS NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); #endif spin_unlock(&twdr->death_lock); } EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); void inet_twsk_purge(struct inet_hashinfo *hashinfo, struct inet_timewait_death_row *twdr, int family) { struct inet_timewait_sock *tw; struct sock *sk; struct hlist_nulls_node *node; unsigned int slot; for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; restart_rcu: rcu_read_lock(); restart: sk_nulls_for_each_rcu(sk, node, &head->twchain) { tw = inet_twsk(sk); if ((tw->tw_family != family) || atomic_read(&twsk_net(tw)->count)) continue; if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt))) continue; if (unlikely((tw->tw_family != family) || atomic_read(&twsk_net(tw)->count))) { inet_twsk_put(tw); goto restart; } rcu_read_unlock(); local_bh_disable(); inet_twsk_deschedule(tw, twdr); local_bh_enable(); inet_twsk_put(tw); goto restart_rcu; } /* If the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto restart; rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(inet_twsk_purge);
gpl-2.0
FrancescoCG/Crazy-Kernel1-TW-Kernel
drivers/cpufreq/exynos5250-cpufreq.c
4699
9390
/* * Copyright (c) 2010-20122Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS5250 - CPU frequency scaling support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/cpufreq.h> #define CPUFREQ_LEVEL_END (L15 + 1) static int max_support_idx; static int min_support_idx = (CPUFREQ_LEVEL_END - 1); static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; struct cpufreq_clkdiv { unsigned int index; unsigned int clkdiv; unsigned int clkdiv1; }; static unsigned int exynos5250_volt_table[CPUFREQ_LEVEL_END]; static struct cpufreq_frequency_table exynos5250_freq_table[] = { {L0, 1700 * 1000}, {L1, 1600 * 1000}, {L2, 1500 * 1000}, {L3, 1400 * 1000}, {L4, 1300 * 1000}, {L5, 1200 * 1000}, {L6, 1100 * 1000}, {L7, 1000 * 1000}, {L8, 900 * 1000}, {L9, 800 * 1000}, {L10, 700 * 1000}, {L11, 600 * 1000}, {L12, 500 * 1000}, {L13, 400 * 1000}, {L14, 300 * 1000}, {L15, 200 * 1000}, {0, CPUFREQ_TABLE_END}, }; static struct cpufreq_clkdiv exynos5250_clkdiv_table[CPUFREQ_LEVEL_END]; static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = { /* * Clock divider value for following * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 } */ { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1700 MHz - N/A */ { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1600 MHz - N/A */ { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1500 MHz - N/A */ { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1400 MHz */ { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */ { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */ { 0, 2, 7, 7, 5, 1, 2, 0 }, /* 1100 MHz */ { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */ { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */ { 0, 2, 7, 7, 3, 1, 1, 0 }, /* 800 MHz */ { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */ { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 600 MHz */ { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */ { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 400 MHz */ { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */ { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */ }; static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = { /* Clock divider value for following * { COPY, HPM } */ { 0, 2 }, /* 1700 MHz - N/A */ { 0, 2 }, /* 1600 MHz - N/A */ { 0, 2 }, /* 1500 MHz - N/A */ { 0, 2 }, /* 1400 MHz */ { 0, 2 }, /* 1300 MHz */ { 0, 2 }, /* 1200 MHz */ { 0, 2 }, /* 1100 MHz */ { 0, 2 }, /* 1000 MHz */ { 0, 2 }, /* 900 MHz */ { 0, 2 }, /* 800 MHz */ { 0, 2 }, /* 700 MHz */ { 0, 2 }, /* 600 MHz */ { 0, 2 }, /* 500 MHz */ { 0, 2 }, /* 400 MHz */ { 0, 2 }, /* 300 MHz */ { 0, 2 }, /* 200 MHz */ }; static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = { (0), /* 1700 MHz - N/A */ (0), /* 1600 MHz - N/A */ (0), /* 1500 MHz - N/A */ (0), /* 1400 MHz */ ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */ ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */ ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */ ((125 << 16) | (3 << 8) | 0), /* 1000 MHz */ ((150 << 16) | (4 << 8) | 0), /* 900 MHz */ ((100 << 16) | (3 << 8) | 0), /* 800 MHz */ ((175 << 16) | (3 << 8) | 1), /* 700 MHz */ ((200 << 16) | (4 << 8) | 1), /* 600 MHz */ ((125 << 16) | (3 << 8) | 1), /* 500 MHz */ ((100 << 16) | (3 << 8) | 1), /* 400 MHz */ ((200 << 16) | (4 << 8) | 2), /* 300 MHz */ ((100 << 16) | (3 << 8) | 2), /* 200 MHz */ }; /* ASV group voltage table */ static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = { 0, 0, 0, 0, 0, 0, 0, /* 1700 MHz ~ 1100 MHz Not supported */ 1175000, 1125000, 1075000, 1050000, 1000000, 950000, 925000, 925000, 900000 }; static void set_clkdiv(unsigned int div_index) { unsigned int tmp; /* Change Divider - CPU0 */ tmp = exynos5250_clkdiv_table[div_index].clkdiv; __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = exynos5250_clkdiv_table[div_index].clkdiv1; __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) cpu_relax(); } static void set_apll(unsigned int new_index, unsigned int old_index) { unsigned int tmp, pdiv; /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); tmp &= 0x7; } while (tmp != 0x2); /* 2. Set APLL Lock time */ pdiv = ((exynos5_apll_pms_table[new_index] >> 8) & 0x3f); __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK); /* 3. Change PLL PMS values */ tmp = __raw_readl(EXYNOS5_APLL_CON0); tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); tmp |= exynos5_apll_pms_table[new_index]; __raw_writel(tmp, EXYNOS5_APLL_CON0); /* 4. wait_lock_time */ do { cpu_relax(); tmp = __raw_readl(EXYNOS5_APLL_CON0); } while (!(tmp & (0x1 << 29))); /* 5. MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); tmp &= (0x7 << 16); } while (tmp != (0x1 << 16)); } bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index) { unsigned int old_pm = (exynos5_apll_pms_table[old_index] >> 8); unsigned int new_pm = (exynos5_apll_pms_table[new_index] >> 8); return (old_pm == new_pm) ? 0 : 1; } static void exynos5250_set_frequency(unsigned int old_index, unsigned int new_index) { unsigned int tmp; if (old_index > new_index) { if (!exynos5250_pms_change(old_index, new_index)) { /* 1. Change the system clock divider values */ set_clkdiv(new_index); /* 2. Change just s value in apll m,p,s value */ tmp = __raw_readl(EXYNOS5_APLL_CON0); tmp &= ~(0x7 << 0); tmp |= (exynos5_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, EXYNOS5_APLL_CON0); } else { /* Clock Configuration Procedure */ /* 1. Change the system clock divider values */ set_clkdiv(new_index); /* 2. Change the apll m,p,s value */ set_apll(new_index, old_index); } } else if (old_index < new_index) { if (!exynos5250_pms_change(old_index, new_index)) { /* 1. Change just s value in apll m,p,s value */ tmp = __raw_readl(EXYNOS5_APLL_CON0); tmp &= ~(0x7 << 0); tmp |= (exynos5_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, EXYNOS5_APLL_CON0); /* 2. Change the system clock divider values */ set_clkdiv(new_index); } else { /* Clock Configuration Procedure */ /* 1. Change the apll m,p,s value */ set_apll(new_index, old_index); /* 2. Change the system clock divider values */ set_clkdiv(new_index); } } } static void __init set_volt_table(void) { unsigned int i; exynos5250_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L1].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L2].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L3].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L4].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L5].frequency = CPUFREQ_ENTRY_INVALID; exynos5250_freq_table[L6].frequency = CPUFREQ_ENTRY_INVALID; max_support_idx = L7; for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++) exynos5250_volt_table[i] = asv_voltage_5250[i]; } int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) { int i; unsigned int tmp; unsigned long rate; set_volt_table(); cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); moutcore = clk_get(NULL, "mout_cpu"); if (IS_ERR(moutcore)) goto err_moutcore; mout_mpll = clk_get(NULL, "mout_mpll"); if (IS_ERR(mout_mpll)) goto err_mout_mpll; rate = clk_get_rate(mout_mpll) / 1000; mout_apll = clk_get(NULL, "mout_apll"); if (IS_ERR(mout_apll)) goto err_mout_apll; for (i = L0; i < CPUFREQ_LEVEL_END; i++) { exynos5250_clkdiv_table[i].index = i; tmp = __raw_readl(EXYNOS5_CLKDIV_CPU0); tmp &= ~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) | (0x7 << 12) | (0x7 << 16) | (0x7 << 20) | (0x7 << 24) | (0x7 << 28)); tmp |= ((clkdiv_cpu0_5250[i][0] << 0) | (clkdiv_cpu0_5250[i][1] << 4) | (clkdiv_cpu0_5250[i][2] << 8) | (clkdiv_cpu0_5250[i][3] << 12) | (clkdiv_cpu0_5250[i][4] << 16) | (clkdiv_cpu0_5250[i][5] << 20) | (clkdiv_cpu0_5250[i][6] << 24) | (clkdiv_cpu0_5250[i][7] << 28)); exynos5250_clkdiv_table[i].clkdiv = tmp; tmp = __raw_readl(EXYNOS5_CLKDIV_CPU1); tmp &= ~((0x7 << 0) | (0x7 << 4)); tmp |= ((clkdiv_cpu1_5250[i][0] << 0) | (clkdiv_cpu1_5250[i][1] << 4)); exynos5250_clkdiv_table[i].clkdiv1 = tmp; } info->mpll_freq_khz = rate; /* 1000Mhz */ info->pm_lock_idx = L7; /* 800Mhz */ info->pll_safe_idx = L9; info->max_support_idx = max_support_idx; info->min_support_idx = min_support_idx; info->cpu_clk = cpu_clk; info->volt_table = exynos5250_volt_table; info->freq_table = exynos5250_freq_table; info->set_freq = exynos5250_set_frequency; info->need_apll_change = exynos5250_pms_change; return 0; err_mout_apll: clk_put(mout_mpll); err_mout_mpll: clk_put(moutcore); err_moutcore: clk_put(cpu_clk); pr_err("%s: failed initialization\n", __func__); return -EINVAL; } EXPORT_SYMBOL(exynos5250_cpufreq_init);
gpl-2.0
javelinanddart/android_kernel_sense_ville
drivers/net/wireless/ath/ath6kl/hif.c
4955
19048
/* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hif.h" #include <linux/export.h> #include "core.h" #include "target.h" #include "hif-ops.h" #include "debug.h" #define MAILBOX_FOR_BLOCK_SIZE 1 #define ATH6KL_TIME_QUANTUM 10 /* in ms */ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) { u8 *buf; int i; buf = req->virt_dma_buf; for (i = 0; i < req->scat_entries; i++) { if (from_dma) memcpy(req->scat_list[i].buf, buf, req->scat_list[i].len); else memcpy(buf, req->scat_list[i].buf, req->scat_list[i].len); buf += req->scat_list[i].len; } return 0; } int ath6kl_hif_rw_comp_handler(void *context, int status) { struct htc_packet *packet = context; ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n", packet, status); packet->status = status; packet->completion(packet->context, packet); return 0; } EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler); #define REG_DUMP_COUNT_AR6003 60 #define REGISTER_DUMP_LEN_MAX 60 static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) { __le32 regdump_val[REGISTER_DUMP_LEN_MAX]; u32 i, address, regdump_addr = 0; int ret; if (ar->target_type != TARGET_TYPE_AR6003) return; /* the reg dump pointer is copied to the host interest area */ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); address = TARG_VTOP(ar->target_type, address); /* read RAM location through diagnostic window */ ret = ath6kl_diag_read32(ar, address, &regdump_addr); if (ret || !regdump_addr) { ath6kl_warn("failed to get ptr to register dump area: %d\n", ret); return; } ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n", regdump_addr); regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); /* fetch register dump data */ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0], REG_DUMP_COUNT_AR6003 * (sizeof(u32))); if (ret) { ath6kl_warn("failed to get register dump: %d\n", ret); return; } ath6kl_info("crash dump:\n"); ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, ar->wiphy->fw_version); BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4); for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) { ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", i, le32_to_cpu(regdump_val[i]), le32_to_cpu(regdump_val[i + 1]), le32_to_cpu(regdump_val[i + 2]), le32_to_cpu(regdump_val[i + 3])); } } static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) { u32 dummy; int ret; ath6kl_warn("firmware crashed\n"); /* * read counter to clear the interrupt, the debug error interrupt is * counter 0. */ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); if (ret) ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); ath6kl_hif_dump_fw_crash(dev->ar); ath6kl_read_fwlogs(dev->ar); return ret; } /* mailbox recv message polling */ int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, int timeout) { struct ath6kl_irq_proc_registers *rg; int status = 0, i; u8 htc_mbox = 1 << HTC_MAILBOX; for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) { /* this is the standard HIF way, load the reg table */ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, (u8 *) &dev->irq_proc_reg, sizeof(dev->irq_proc_reg), HIF_RD_SYNC_BYTE_INC); if (status) { ath6kl_err("failed to read reg table\n"); return status; } /* check for MBOX data and valid lookahead */ if (dev->irq_proc_reg.host_int_status & htc_mbox) { if (dev->irq_proc_reg.rx_lkahd_valid & htc_mbox) { /* * Mailbox has a message and the look ahead * is valid. */ rg = &dev->irq_proc_reg; *lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); break; } } /* delay a little */ mdelay(ATH6KL_TIME_QUANTUM); ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i); } if (i == 0) { ath6kl_err("timeout waiting for recv message\n"); status = -ETIME; /* check if the target asserted */ if (dev->irq_proc_reg.counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) /* * Target failure handler will be called in case of * an assert. */ ath6kl_hif_proc_dbg_intr(dev); } return status; } /* * Disable packet reception (used in case the host runs out of buffers) * using the interrupt enable registers through the host I/F */ int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) { struct ath6kl_irq_enable_reg regs; int status = 0; ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n", enable_rx ? "enable" : "disable"); /* take the lock to protect interrupt enable shadows */ spin_lock_bh(&dev->lock); if (enable_rx) dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); else dev->irq_en_reg.int_status_en &= ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(struct ath6kl_irq_enable_reg), HIF_WR_SYNC_BYTE_INC); return status; } int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, struct hif_scatter_req *scat_req, bool read) { int status = 0; if (read) { scat_req->req = HIF_RD_SYNC_BLOCK_FIX; scat_req->addr = dev->ar->mbox_info.htc_addr; } else { scat_req->req = HIF_WR_ASYNC_BLOCK_INC; scat_req->addr = (scat_req->len > HIF_MBOX_WIDTH) ? dev->ar->mbox_info.htc_ext_addr : dev->ar->mbox_info.htc_addr; } ath6kl_dbg(ATH6KL_DBG_HIF, "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n", scat_req->scat_entries, scat_req->len, scat_req->addr, !read ? "async" : "sync", (read) ? "rd" : "wr"); if (!read && scat_req->virt_scat) { status = ath6kl_hif_cp_scat_dma_buf(scat_req, false); if (status) { scat_req->status = status; scat_req->complete(dev->ar->htc_target, scat_req); return 0; } } status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); if (read) { /* in sync mode, we can touch the scatter request */ scat_req->status = status; if (!status && scat_req->virt_scat) scat_req->status = ath6kl_hif_cp_scat_dma_buf(scat_req, true); } return status; } static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) { u8 counter_int_status; ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n"); counter_int_status = dev->irq_proc_reg.counter_int_status & dev->irq_en_reg.cntr_int_status_en; ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n", counter_int_status); /* * NOTE: other modules like GMBOX may use the counter interrupt for * credit flow control on other counters, we only need to check for * the debug assertion counter interrupt. */ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) return ath6kl_hif_proc_dbg_intr(dev); return 0; } static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) { int status; u8 error_int_status; u8 reg_buf[4]; ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n"); error_int_status = dev->irq_proc_reg.error_int_status & 0x0F; if (!error_int_status) { WARN_ON(1); return -EIO; } ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n", error_int_status); if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status)) ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n"); if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status)) ath6kl_err("rx underflow\n"); if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status)) ath6kl_err("tx overflow\n"); /* Clear the interrupt */ dev->irq_proc_reg.error_int_status &= ~error_int_status; /* set W1C value to clear the interrupt, this hits the register first */ reg_buf[0] = error_int_status; reg_buf[1] = 0; reg_buf[2] = 0; reg_buf[3] = 0; status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); if (status) WARN_ON(1); return status; } static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev) { int status; u8 cpu_int_status; u8 reg_buf[4]; ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n"); cpu_int_status = dev->irq_proc_reg.cpu_int_status & dev->irq_en_reg.cpu_int_status_en; if (!cpu_int_status) { WARN_ON(1); return -EIO; } ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", cpu_int_status); /* Clear the interrupt */ dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status; /* * Set up the register transfer buffer to hit the register 4 times , * this is done to make the access 4-byte aligned to mitigate issues * with host bus interconnects that restrict bus transfer lengths to * be a multiple of 4-bytes. */ /* set W1C value to clear the interrupt, this hits the register first */ reg_buf[0] = cpu_int_status; /* the remaining are set to zero which have no-effect */ reg_buf[1] = 0; reg_buf[2] = 0; reg_buf[3] = 0; status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); if (status) WARN_ON(1); return status; } /* process pending interrupts synchronously */ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) { struct ath6kl_irq_proc_registers *rg; int status = 0; u8 host_int_status = 0; u32 lk_ahd = 0; u8 htc_mbox = 1 << HTC_MAILBOX; ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev); /* * NOTE: HIF implementation guarantees that the context of this * call allows us to perform SYNCHRONOUS I/O, that is we can block, * sleep or call any API that can block or switch thread/task * contexts. This is a fully schedulable context. */ /* * Process pending intr only when int_status_en is clear, it may * result in unnecessary bus transaction otherwise. Target may be * unresponsive at the time. */ if (dev->irq_en_reg.int_status_en) { /* * Read the first 28 bytes of the HTC register table. This * will yield us the value of different int status * registers and the lookahead registers. * * length = sizeof(int_status) + sizeof(cpu_int_status) * + sizeof(error_int_status) + * sizeof(counter_int_status) + * sizeof(mbox_frame) + sizeof(rx_lkahd_valid) * + sizeof(hole) + sizeof(rx_lkahd) + * sizeof(int_status_en) + * sizeof(cpu_int_status_en) + * sizeof(err_int_status_en) + * sizeof(cntr_int_status_en); */ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, (u8 *) &dev->irq_proc_reg, sizeof(dev->irq_proc_reg), HIF_RD_SYNC_BYTE_INC); if (status) goto out; ath6kl_dump_registers(dev, &dev->irq_proc_reg, &dev->irq_en_reg); /* Update only those registers that are enabled */ host_int_status = dev->irq_proc_reg.host_int_status & dev->irq_en_reg.int_status_en; /* Look at mbox status */ if (host_int_status & htc_mbox) { /* * Mask out pending mbox value, we use "lookAhead as * the real flag for mbox processing. */ host_int_status &= ~htc_mbox; if (dev->irq_proc_reg.rx_lkahd_valid & htc_mbox) { rg = &dev->irq_proc_reg; lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); if (!lk_ahd) ath6kl_err("lookAhead is zero!\n"); } } } if (!host_int_status && !lk_ahd) { *done = true; goto out; } if (lk_ahd) { int fetched = 0; ath6kl_dbg(ATH6KL_DBG_IRQ, "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd); /* * Mailbox Interrupt, the HTC layer may issue async * requests to empty the mailbox. When emptying the recv * mailbox we use the async handler above called from the * completion routine of the callers read request. This can * improve performance by reducing context switching when * we rapidly pull packets. */ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, lk_ahd, &fetched); if (status) goto out; if (!fetched) /* * HTC could not pull any messages out due to lack * of resources. */ dev->htc_cnxt->chk_irq_status_cnt = 0; } /* now handle the rest of them */ ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) for other interrupts: 0x%x\n", host_int_status); if (MS(HOST_INT_STATUS_CPU, host_int_status)) { /* CPU Interrupt */ status = ath6kl_hif_proc_cpu_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { /* Error Interrupt */ status = ath6kl_hif_proc_err_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) /* Counter Interrupt */ status = ath6kl_hif_proc_counter_intr(dev); out: /* * An optimization to bypass reading the IRQ status registers * unecessarily which can re-wake the target, if upper layers * determine that we are in a low-throughput mode, we can rely on * taking another interrupt rather than re-checking the status * registers which can re-wake the target. * * NOTE : for host interfaces that makes use of detecting pending * mbox messages at hif can not use this optimization due to * possible side effects, SPI requires the host to drain all * messages from the mailbox before exiting the ISR routine. */ ath6kl_dbg(ATH6KL_DBG_IRQ, "bypassing irq status re-check, forcing done\n"); if (!dev->htc_cnxt->chk_irq_status_cnt) *done = true; ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (done:%d, status=%d\n", *done, status); return status; } /* interrupt handler, kicks off all interrupt processing */ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) { struct ath6kl_device *dev = ar->htc_target->dev; unsigned long timeout; int status = 0; bool done = false; /* * Reset counter used to flag a re-scan of IRQ status registers on * the target. */ dev->htc_cnxt->chk_irq_status_cnt = 0; /* * IRQ processing is synchronous, interrupt status registers can be * re-read. */ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT); while (time_before(jiffies, timeout) && !done) { status = proc_pending_irqs(dev, &done); if (status) break; } return status; } EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler); static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; int status; spin_lock_bh(&dev->lock); /* Enable all but ATH6KL CPU interrupts */ dev->irq_en_reg.int_status_en = SM(INT_STATUS_ENABLE_ERROR, 0x01) | SM(INT_STATUS_ENABLE_CPU, 0x01) | SM(INT_STATUS_ENABLE_COUNTER, 0x01); /* * NOTE: There are some cases where HIF can do detection of * pending mbox messages which is disabled now. */ dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); /* Set up the CPU Interrupt status Register */ dev->irq_en_reg.cpu_int_status_en = 0; /* Set up the Error Interrupt status Register */ dev->irq_en_reg.err_int_status_en = SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) | SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1); /* * Enable Counter interrupt status register to get fatal errors for * debugging. */ dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT, ATH6KL_TARGET_DEBUG_INTR_MASK); memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(regs), HIF_WR_SYNC_BYTE_INC); if (status) ath6kl_err("failed to update interrupt ctl reg err: %d\n", status); return status; } int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; spin_lock_bh(&dev->lock); /* Disable all interrupts */ dev->irq_en_reg.int_status_en = 0; dev->irq_en_reg.cpu_int_status_en = 0; dev->irq_en_reg.err_int_status_en = 0; dev->irq_en_reg.cntr_int_status_en = 0; memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(regs), HIF_WR_SYNC_BYTE_INC); } /* enable device interrupts */ int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) { int status = 0; /* * Make sure interrupt are disabled before unmasking at the HIF * layer. The rationale here is that between device insertion * (where we clear the interrupts the first time) and when HTC * is finally ready to handle interrupts, other software can perform * target "soft" resets. The ATH6KL interrupt enables reset back to an * "enabled" state when this happens. */ ath6kl_hif_disable_intrs(dev); /* unmask the host controller interrupts */ ath6kl_hif_irq_enable(dev->ar); status = ath6kl_hif_enable_intrs(dev); return status; } /* disable all device interrupts */ int ath6kl_hif_mask_intrs(struct ath6kl_device *dev) { /* * Mask the interrupt at the HIF layer to avoid any stray interrupt * taken while we zero out our shadow registers in * ath6kl_hif_disable_intrs(). */ ath6kl_hif_irq_disable(dev->ar); return ath6kl_hif_disable_intrs(dev); } int ath6kl_hif_setup(struct ath6kl_device *dev) { int status = 0; spin_lock_init(&dev->lock); /* * NOTE: we actually get the block size of a mailbox other than 0, * for SDIO the block size on mailbox 0 is artificially set to 1. * So we use the block size that is set for the other 3 mailboxes. */ dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size; /* must be a power of 2 */ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { WARN_ON(1); status = -EINVAL; goto fail_setup; } /* assemble mask, used for padding to a block */ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); /* usb doesn't support enabling interrupts */ /* FIXME: remove check once USB support is implemented */ if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) return 0; status = ath6kl_hif_disable_intrs(dev); fail_setup: return status; }
gpl-2.0
lollipop-og/bricked-geehrc
drivers/spi/spi-fsl-lib.c
5467
5933
/* * Freescale SPI/eSPI controller driver library. * * Maintainer: Kumar Gala * * Copyright (C) 2006 Polycom, Inc. * * CPM SPI and QE buffer descriptors mode support: * Copyright (c) 2009 MontaVista Software, Inc. * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/of_platform.h> #include <linux/of_spi.h> #include <sysdev/fsl_soc.h> #include "spi-fsl-lib.h" #define MPC8XXX_SPI_RX_BUF(type) \ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ { \ type *rx = mpc8xxx_spi->rx; \ *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ mpc8xxx_spi->rx = rx; \ } #define MPC8XXX_SPI_TX_BUF(type) \ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ { \ u32 data; \ const type *tx = mpc8xxx_spi->tx; \ if (!tx) \ return 0; \ data = *tx++ << mpc8xxx_spi->tx_shift; \ mpc8xxx_spi->tx = tx; \ return data; \ } MPC8XXX_SPI_RX_BUF(u8) MPC8XXX_SPI_RX_BUF(u16) MPC8XXX_SPI_RX_BUF(u32) MPC8XXX_SPI_TX_BUF(u8) MPC8XXX_SPI_TX_BUF(u16) MPC8XXX_SPI_TX_BUF(u32) struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) { return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); } void mpc8xxx_spi_work(struct work_struct *work) { struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, work); spin_lock_irq(&mpc8xxx_spi->lock); while (!list_empty(&mpc8xxx_spi->queue)) { struct spi_message *m = container_of(mpc8xxx_spi->queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irq(&mpc8xxx_spi->lock); if (mpc8xxx_spi->spi_do_one_msg) mpc8xxx_spi->spi_do_one_msg(m); spin_lock_irq(&mpc8xxx_spi->lock); } spin_unlock_irq(&mpc8xxx_spi->lock); } int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); unsigned long flags; m->actual_length = 0; m->status = -EINPROGRESS; spin_lock_irqsave(&mpc8xxx_spi->lock, flags); list_add_tail(&m->queue, &mpc8xxx_spi->queue); queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); return 0; } void mpc8xxx_spi_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } const char *mpc8xxx_spi_strmode(unsigned int flags) { if (flags & SPI_QE_CPU_MODE) { return "QE CPU"; } else if (flags & SPI_CPM_MODE) { if (flags & SPI_QE) return "QE"; else if (flags & SPI_CPM2) return "CPM2"; else return "CPM1"; } return "CPU"; } int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; int ret = 0; master = dev_get_drvdata(dev); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_LOOP; master->transfer = mpc8xxx_spi_transfer; master->cleanup = mpc8xxx_spi_cleanup; master->dev.of_node = dev->of_node; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->dev = dev; mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; mpc8xxx_spi->flags = pdata->flags; mpc8xxx_spi->spibrg = pdata->sysclk; mpc8xxx_spi->irq = irq; mpc8xxx_spi->rx_shift = 0; mpc8xxx_spi->tx_shift = 0; init_completion(&mpc8xxx_spi->done); master->bus_num = pdata->bus_num; master->num_chipselect = pdata->max_chipselect; spin_lock_init(&mpc8xxx_spi->lock); init_completion(&mpc8xxx_spi->done); INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); INIT_LIST_HEAD(&mpc8xxx_spi->queue); mpc8xxx_spi->workqueue = create_singlethread_workqueue( dev_name(master->dev.parent)); if (mpc8xxx_spi->workqueue == NULL) { ret = -EBUSY; goto err; } return 0; err: return ret; } int __devexit mpc8xxx_spi_remove(struct device *dev) { struct mpc8xxx_spi *mpc8xxx_spi; struct spi_master *master; master = dev_get_drvdata(dev); mpc8xxx_spi = spi_master_get_devdata(master); flush_workqueue(mpc8xxx_spi->workqueue); destroy_workqueue(mpc8xxx_spi->workqueue); spi_unregister_master(master); free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); if (mpc8xxx_spi->spi_remove) mpc8xxx_spi->spi_remove(mpc8xxx_spi); return 0; } int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_spi_probe_info *pinfo; struct fsl_spi_platform_data *pdata; const void *prop; int ret = -ENOMEM; pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); if (!pinfo) return -ENOMEM; pdata = &pinfo->pdata; dev->platform_data = pdata; /* Allocate bus num dynamically. */ pdata->bus_num = -1; /* SPI controller is either clocked from QE or SoC clock. */ pdata->sysclk = get_brgfreq(); if (pdata->sysclk == -1) { pdata->sysclk = fsl_get_sys_freq(); if (pdata->sysclk == -1) { ret = -ENODEV; goto err; } } prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata->flags = SPI_QE_CPU_MODE; else if (prop && !strcmp(prop, "qe")) pdata->flags = SPI_CPM_MODE | SPI_QE; else if (of_device_is_compatible(np, "fsl,cpm2-spi")) pdata->flags = SPI_CPM_MODE | SPI_CPM2; else if (of_device_is_compatible(np, "fsl,cpm1-spi")) pdata->flags = SPI_CPM_MODE | SPI_CPM1; return 0; err: kfree(pinfo); return ret; }
gpl-2.0
mtshima/Victara-CM-kernel
arch/xtensa/lib/pci-auto.c
9563
9268
/* * arch/xtensa/lib/pci-auto.c * * PCI autoconfiguration library * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Chris Zankel <zankel@tensilica.com, cez@zankel.net> * * Based on work from Matt Porter <mporter@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/pci-bridge.h> /* * * Setting up a PCI * * pci_ctrl->first_busno = <first bus number (0)> * pci_ctrl->last_busno = <last bus number (0xff)> * pci_ctrl->ops = <PCI config operations> * pci_ctrl->map_irq = <function to return the interrupt number for a device> * * pci_ctrl->io_space.start = <IO space start address (PCI view)> * pci_ctrl->io_space.end = <IO space end address (PCI view)> * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space> * pci_ctrl->mem_space.start = <MEM space start address (PCI view)> * pci_ctrl->mem_space.end = <MEM space end address (PCI view)> * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space> * * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>, * <IO space end>, IORESOURCE_IO, "PCI host bridge"); * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>, * <MEM space end>, IORESOURCE_MEM, "PCI host bridge"); * * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno); * * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) * */ /* define DEBUG to print some debugging messages. */ #undef DEBUG #ifdef DEBUG # define DBG(x...) printk(x) #else # define DBG(x...) #endif static int pciauto_upper_iospc; static int pciauto_upper_memspc; static struct pci_dev pciauto_dev; static struct pci_bus pciauto_bus; /* * Helper functions */ /* Initialize the bars of a PCI device. */ static void __init pciauto_setup_bars(struct pci_dev *dev, int bar_limit) { int bar_size; int bar, bar_nr; int *upper_limit; int found_mem64 = 0; for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0; bar <= bar_limit; bar+=4, bar_nr++) { /* Tickle the BAR and get the size */ pci_write_config_dword(dev, bar, 0xffffffff); pci_read_config_dword(dev, bar, &bar_size); /* If BAR is not implemented go to the next BAR */ if (!bar_size) continue; /* Check the BAR type and set our address mask */ if (bar_size & PCI_BASE_ADDRESS_SPACE_IO) { bar_size &= PCI_BASE_ADDRESS_IO_MASK; upper_limit = &pciauto_upper_iospc; DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); } else { if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) found_mem64 = 1; bar_size &= PCI_BASE_ADDRESS_MEM_MASK; upper_limit = &pciauto_upper_memspc; DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); } /* Allocate a base address (bar_size is negative!) */ *upper_limit = (*upper_limit + bar_size) & bar_size; /* Write it out and update our limit */ pci_write_config_dword(dev, bar, *upper_limit); /* * If we are a 64-bit decoder then increment to the * upper 32 bits of the bar and force it to locate * in the lower 4GB of memory. */ if (found_mem64) pci_write_config_dword(dev, (bar+=4), 0x00000000); DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); } } /* Initialize the interrupt number. */ static void __init pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) { u8 pin; int irq = 0; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); /* Fix illegal pin numbers. */ if (pin == 0 || pin > 4) pin = 1; if (pci_ctrl->map_irq) irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin); if (irq == -1) irq = 0; DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } static void __init pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { /* Configure bus number registers */ pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus); pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff); /* Round memory allocator to 1MB boundary */ pciauto_upper_memspc &= ~(0x100000 - 1); *memsave = pciauto_upper_memspc; /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); *iosave = pciauto_upper_iospc; /* Set up memory and I/O filter limits, assume 32-bit I/O space */ pci_write_config_word(dev, PCI_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pci_write_config_byte(dev, PCI_IO_LIMIT, ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16); } static void __init pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { int cmdstat; /* Configure bus number registers */ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus); /* * Round memory allocator to 1MB boundary. * If no space used, allocate minimum. */ pciauto_upper_memspc &= ~(0x100000 - 1); if (*memsave == pciauto_upper_memspc) pciauto_upper_memspc -= 0x00100000; pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Allocate 1MB for pre-fretch */ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pciauto_upper_memspc -= 0x100000; pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); if (*iosave == pciauto_upper_iospc) pciauto_upper_iospc -= 0x1000; pci_write_config_byte(dev, PCI_IO_BASE, (pciauto_upper_iospc & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_BASE_UPPER16, pciauto_upper_iospc >> 16); /* Enable memory and I/O accesses, enable bus master */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); } /* * Scan the current PCI bus. */ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) { int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0; unsigned short vid; unsigned char header_type; struct pci_dev *dev = &pciauto_dev; pciauto_dev.bus = &pciauto_bus; pciauto_dev.sysdata = pci_ctrl; pciauto_bus.ops = pci_ctrl->ops; /* * Fetch our I/O and memory space upper boundaries used * to allocated base addresses on this pci_controller. */ if (current_bus == pci_ctrl->first_busno) { pciauto_upper_iospc = pci_ctrl->io_resource.end + 1; pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1; } sub_bus = current_bus; for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { /* Skip our host bridge */ if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0)) continue; if (PCI_FUNC(pci_devfn) && !found_multi) continue; pciauto_bus.number = current_bus; pciauto_dev.devfn = pci_devfn; /* If config space read fails from this device, move on */ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type)) continue; if (!PCI_FUNC(pci_devfn)) found_multi = header_type & 0x80; pci_read_config_word(dev, PCI_VENDOR_ID, &vid); if (vid == 0xffff || vid == 0x0000) { found_multi = 0; continue; } pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class); if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) { int iosave, memsave; DBG("PCI Autoconfig: Found P2P bridge, device %d\n", PCI_SLOT(pci_devfn)); /* Allocate PCI I/O and/or memory space */ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); pciauto_prescan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1); pciauto_postscan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); pciauto_bus.number = current_bus; continue; } #if 0 /* Skip legacy mode IDE controller */ if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { unsigned char prg_iface; pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { DBG("PCI Autoconfig: Skipping legacy mode " "IDE controller\n"); continue; } } #endif /* * Found a peripheral, enable some standard * settings */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Allocate PCI I/O and/or memory space */ DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_irq(pci_ctrl, dev, pci_devfn); } return sub_bus; }
gpl-2.0